repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
factorlibre/OCB | addons/crm_partner_assign/crm_partner_assign.py | 174 | 11357 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import random
from openerp.addons.base_geolocalize.models.res_partner import geo_find, geo_query_address
from openerp.osv import osv
from openerp.osv import fields
class res_partner_grade(osv.osv):
_order = 'sequence'
_name = 'res.partner.grade'
_columns = {
'sequence': fields.integer('Sequence'),
'active': fields.boolean('Active'),
'name': fields.char('Grade Name'),
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
}
_defaults = {
'active': lambda *args: 1,
'partner_weight':1
}
class res_partner_activation(osv.osv):
_name = 'res.partner.activation'
_order = 'sequence'
_columns = {
'sequence' : fields.integer('Sequence'),
'name' : fields.char('Name', required=True),
}
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
'opportunity_assigned_ids': fields.one2many('crm.lead', 'partner_assigned_id',\
'Assigned Opportunities'),
'grade_id': fields.many2one('res.partner.grade', 'Grade'),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'date_partnership' : fields.date('Partnership Date'),
'date_review' : fields.date('Latest Partner Review'),
'date_review_next' : fields.date('Next Partner Review'),
# customer implementation
'assigned_partner_id': fields.many2one(
'res.partner', 'Implemented by',
),
'implemented_partner_ids': fields.one2many(
'res.partner', 'assigned_partner_id',
string='Implementation References',
),
}
_defaults = {
'partner_weight': lambda *args: 0
}
def onchange_grade_id(self, cr, uid, ids, grade_id, context=None):
res = {'value' :{'partner_weight':0}}
if grade_id:
partner_grade = self.pool.get('res.partner.grade').browse(cr, uid, grade_id)
res['value']['partner_weight'] = partner_grade.partner_weight
return res
class crm_lead(osv.osv):
_inherit = "crm.lead"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner',track_visibility='onchange' , help="Partner this case has been forwarded/assigned to.", select=True),
'date_assign': fields.date('Assignation Date', help="Last date this case was forwarded/assigned to a partner"),
}
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
fields += ['partner_latitude', 'partner_longitude', 'partner_assigned_id', 'date_assign']
return super(crm_lead, self)._merge_data(cr, uid, ids, oldest, fields, context=context)
def onchange_assign_id(self, cr, uid, ids, partner_assigned_id, context=None):
"""This function updates the "assignation date" automatically, when manually assign a partner in the geo assign tab
"""
if not partner_assigned_id:
return {'value':{'date_assign': False}}
else:
partners = self.pool.get('res.partner').browse(cr, uid, [partner_assigned_id], context=context)
user_id = partners[0] and partners[0].user_id.id or False
return {'value':
{'date_assign': fields.date.context_today(self,cr,uid,context=context),
'user_id' : user_id}
}
def action_assign_partner(self, cr, uid, ids, context=None):
return self.assign_partner(cr, uid, ids, partner_id=False, context=context)
def assign_partner(self, cr, uid, ids, partner_id=False, context=None):
partner_ids = {}
res = False
res_partner = self.pool.get('res.partner')
if not partner_id:
partner_ids = self.search_geo_partner(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner_ids.get(lead.id, False)
if not partner_id:
continue
self.assign_geo_localize(cr, uid, [lead.id], lead.partner_latitude, lead.partner_longitude, context=context)
partner = res_partner.browse(cr, uid, partner_id, context=context)
if partner.user_id:
salesteam_id = partner.section_id and partner.section_id.id or False
for lead_id in ids:
self.allocate_salesman(cr, uid, [lead_id], [partner.user_id.id], team_id=salesteam_id, context=context)
self.write(cr, uid, [lead.id], {'date_assign': fields.date.context_today(self,cr,uid,context=context), 'partner_assigned_id': partner_id}, context=context)
return res
def assign_geo_localize(self, cr, uid, ids, latitude=False, longitude=False, context=None):
if latitude and longitude:
self.write(cr, uid, ids, {
'partner_latitude': latitude,
'partner_longitude': longitude
}, context=context)
return True
# Don't pass context to browse()! We need country name in english below
for lead in self.browse(cr, uid, ids):
if lead.partner_latitude and lead.partner_longitude:
continue
if lead.country_id:
result = geo_find(geo_query_address(street=lead.street,
zip=lead.zip,
city=lead.city,
state=lead.state_id.name,
country=lead.country_id.name))
if result:
self.write(cr, uid, [lead.id], {
'partner_latitude': result[0],
'partner_longitude': result[1]
}, context=context)
return True
def search_geo_partner(self, cr, uid, ids, context=None):
res_partner = self.pool.get('res.partner')
res_partner_ids = {}
self.assign_geo_localize(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
partner_ids = []
if not lead.country_id:
continue
latitude = lead.partner_latitude
longitude = lead.partner_longitude
if latitude and longitude:
# 1. first way: in the same country, small area
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 2), ('partner_latitude', '<', latitude + 2),
('partner_longitude', '>', longitude - 1.5), ('partner_longitude', '<', longitude + 1.5),
('country_id', '=', lead.country_id.id),
], context=context)
# 2. second way: in the same country, big area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 4), ('partner_latitude', '<', latitude + 4),
('partner_longitude', '>', longitude - 3), ('partner_longitude', '<' , longitude + 3),
('country_id', '=', lead.country_id.id),
], context=context)
# 3. third way: in the same country, extra large area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight','>', 0),
('partner_latitude','>', latitude - 8), ('partner_latitude','<', latitude + 8),
('partner_longitude','>', longitude - 8), ('partner_longitude','<', longitude + 8),
('country_id', '=', lead.country_id.id),
], context=context)
# 5. fifth way: anywhere in same country
if not partner_ids:
# still haven't found any, let's take all partners in the country!
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('country_id', '=', lead.country_id.id),
], context=context)
# 6. sixth way: closest partner whatsoever, just to have at least one result
if not partner_ids:
# warning: point() type takes (longitude, latitude) as parameters in this order!
cr.execute("""SELECT id, distance
FROM (select id, (point(partner_longitude, partner_latitude) <-> point(%s,%s)) AS distance FROM res_partner
WHERE active
AND partner_longitude is not null
AND partner_latitude is not null
AND partner_weight > 0) AS d
ORDER BY distance LIMIT 1""", (longitude, latitude))
res = cr.dictfetchone()
if res:
partner_ids.append(res['id'])
total_weight = 0
toassign = []
for partner in res_partner.browse(cr, uid, partner_ids, context=context):
total_weight += partner.partner_weight
toassign.append( (partner.id, total_weight) )
random.shuffle(toassign) # avoid always giving the leads to the first ones in db natural order!
nearest_weight = random.randint(0, total_weight)
for partner_id, weight in toassign:
if nearest_weight <= weight:
res_partner_ids[lead.id] = partner_id
break
return res_partner_ids
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RandyLowery/erpnext | erpnext/stock/stock_ledger.py | 1 | 17296 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import cint, flt, cstr, now
from erpnext.stock.utils import get_valuation_method
import json
# future reposting
class NegativeStockError(frappe.ValidationError): pass
_exceptions = frappe.local('stockledger_exceptions')
# _exceptions = []
def make_sl_entries(sl_entries, is_amended=None, allow_negative_stock=False, via_landed_cost_voucher=False):
if sl_entries:
from erpnext.stock.utils import update_bin
cancel = True if sl_entries[0].get("is_cancelled") == "Yes" else False
if cancel:
set_as_cancel(sl_entries[0].get('voucher_no'), sl_entries[0].get('voucher_type'))
for sle in sl_entries:
sle_id = None
if sle.get('is_cancelled') == 'Yes':
sle['actual_qty'] = -flt(sle['actual_qty'])
if sle.get("actual_qty") or sle.get("voucher_type")=="Stock Reconciliation":
sle_id = make_entry(sle, allow_negative_stock, via_landed_cost_voucher)
args = sle.copy()
args.update({
"sle_id": sle_id,
"is_amended": is_amended
})
update_bin(args, allow_negative_stock, via_landed_cost_voucher)
if cancel:
delete_cancelled_entry(sl_entries[0].get('voucher_type'), sl_entries[0].get('voucher_no'))
def set_as_cancel(voucher_type, voucher_no):
frappe.db.sql("""update `tabStock Ledger Entry` set is_cancelled='Yes',
modified=%s, modified_by=%s
where voucher_no=%s and voucher_type=%s""",
(now(), frappe.session.user, voucher_type, voucher_no))
def make_entry(args, allow_negative_stock=False, via_landed_cost_voucher=False):
args.update({"doctype": "Stock Ledger Entry"})
sle = frappe.get_doc(args)
sle.flags.ignore_permissions = 1
sle.allow_negative_stock=allow_negative_stock
sle.via_landed_cost_voucher = via_landed_cost_voucher
sle.insert()
sle.submit()
return sle.name
def delete_cancelled_entry(voucher_type, voucher_no):
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
class update_entries_after(object):
"""
update valution rate and qty after transaction
from the current time-bucket onwards
:param args: args as dict
args = {
"item_code": "ABC",
"warehouse": "XYZ",
"posting_date": "2012-12-12",
"posting_time": "12:00"
}
"""
def __init__(self, args, allow_zero_rate=False, allow_negative_stock=None, via_landed_cost_voucher=False, verbose=1):
from frappe.model.meta import get_field_precision
self.exceptions = []
self.verbose = verbose
self.allow_zero_rate = allow_zero_rate
self.allow_negative_stock = allow_negative_stock
self.via_landed_cost_voucher = via_landed_cost_voucher
if not self.allow_negative_stock:
self.allow_negative_stock = cint(frappe.db.get_single_value("Stock Settings",
"allow_negative_stock"))
self.args = args
for key, value in args.iteritems():
setattr(self, key, value)
self.previous_sle = self.get_sle_before_datetime()
self.previous_sle = self.previous_sle[0] if self.previous_sle else frappe._dict()
for key in ("qty_after_transaction", "valuation_rate", "stock_value"):
setattr(self, key, flt(self.previous_sle.get(key)))
self.company = frappe.db.get_value("Warehouse", self.warehouse, "company")
self.precision = get_field_precision(frappe.get_meta("Stock Ledger Entry").get_field("stock_value"),
currency=frappe.db.get_value("Company", self.company, "default_currency", cache=True))
self.prev_stock_value = self.previous_sle.stock_value or 0.0
self.stock_queue = json.loads(self.previous_sle.stock_queue or "[]")
self.valuation_method = get_valuation_method(self.item_code)
self.stock_value_difference = 0.0
self.build()
def build(self):
# includes current entry!
entries_to_fix = self.get_sle_after_datetime()
for sle in entries_to_fix:
self.process_sle(sle)
if self.exceptions:
self.raise_exceptions()
self.update_bin()
def update_bin(self):
# update bin
bin_name = frappe.db.get_value("Bin", {
"item_code": self.item_code,
"warehouse": self.warehouse
})
if not bin_name:
bin_doc = frappe.get_doc({
"doctype": "Bin",
"item_code": self.item_code,
"warehouse": self.warehouse
})
bin_doc.insert(ignore_permissions=True)
else:
bin_doc = frappe.get_doc("Bin", bin_name)
bin_doc.update({
"valuation_rate": self.valuation_rate,
"actual_qty": self.qty_after_transaction,
"stock_value": self.stock_value
})
bin_doc.flags.via_stock_ledger_entry = True
bin_doc.save(ignore_permissions=True)
def process_sle(self, sle):
if (sle.serial_no and not self.via_landed_cost_voucher) or not cint(self.allow_negative_stock):
# validate negative stock for serialized items, fifo valuation
# or when negative stock is not allowed for moving average
if not self.validate_negative_stock(sle):
self.qty_after_transaction += flt(sle.actual_qty)
return
if sle.serial_no:
self.get_serialized_values(sle)
self.qty_after_transaction += flt(sle.actual_qty)
self.stock_value = flt(self.qty_after_transaction) * flt(self.valuation_rate)
else:
if sle.voucher_type=="Stock Reconciliation":
# assert
self.valuation_rate = sle.valuation_rate
self.qty_after_transaction = sle.qty_after_transaction
self.stock_queue = [[self.qty_after_transaction, self.valuation_rate]]
self.stock_value = flt(self.qty_after_transaction) * flt(self.valuation_rate)
else:
if self.valuation_method == "Moving Average":
self.get_moving_average_values(sle)
self.qty_after_transaction += flt(sle.actual_qty)
self.stock_value = flt(self.qty_after_transaction) * flt(self.valuation_rate)
else:
self.get_fifo_values(sle)
self.qty_after_transaction += flt(sle.actual_qty)
self.stock_value = sum((flt(batch[0]) * flt(batch[1]) for batch in self.stock_queue))
# rounding as per precision
self.stock_value = flt(self.stock_value, self.precision)
stock_value_difference = self.stock_value - self.prev_stock_value
self.prev_stock_value = self.stock_value
# update current sle
sle.qty_after_transaction = self.qty_after_transaction
sle.valuation_rate = self.valuation_rate
sle.stock_value = self.stock_value
sle.stock_queue = json.dumps(self.stock_queue)
sle.stock_value_difference = stock_value_difference
sle.doctype="Stock Ledger Entry"
frappe.get_doc(sle).db_update()
def validate_negative_stock(self, sle):
"""
validate negative stock for entries current datetime onwards
will not consider cancelled entries
"""
diff = self.qty_after_transaction + flt(sle.actual_qty)
if diff < 0 and abs(diff) > 0.0001:
# negative stock!
exc = sle.copy().update({"diff": diff})
self.exceptions.append(exc)
return False
else:
return True
def get_serialized_values(self, sle):
incoming_rate = flt(sle.incoming_rate)
actual_qty = flt(sle.actual_qty)
serial_no = cstr(sle.serial_no).split("\n")
if incoming_rate < 0:
# wrong incoming rate
incoming_rate = self.valuation_rate
stock_value_change = 0
if incoming_rate:
stock_value_change = actual_qty * incoming_rate
elif actual_qty < 0:
# In case of delivery/stock issue, get average purchase rate
# of serial nos of current entry
stock_value_change = -1 * flt(frappe.db.sql("""select sum(purchase_rate)
from `tabSerial No` where name in (%s)""" % (", ".join(["%s"]*len(serial_no))),
tuple(serial_no))[0][0])
new_stock_qty = self.qty_after_transaction + actual_qty
if new_stock_qty > 0:
new_stock_value = (self.qty_after_transaction * self.valuation_rate) + stock_value_change
if new_stock_value > 0:
# calculate new valuation rate only if stock value is positive
# else it remains the same as that of previous entry
self.valuation_rate = new_stock_value / new_stock_qty
def get_moving_average_values(self, sle):
actual_qty = flt(sle.actual_qty)
new_stock_qty = flt(self.qty_after_transaction) + actual_qty
if new_stock_qty >= 0:
if actual_qty > 0:
if flt(self.qty_after_transaction) <= 0:
self.valuation_rate = sle.incoming_rate
else:
new_stock_value = (self.qty_after_transaction * self.valuation_rate) + \
(actual_qty * sle.incoming_rate)
self.valuation_rate = new_stock_value / new_stock_qty
elif sle.outgoing_rate:
if new_stock_qty:
new_stock_value = (self.qty_after_transaction * self.valuation_rate) + \
(actual_qty * sle.outgoing_rate)
self.valuation_rate = new_stock_value / new_stock_qty
else:
self.valuation_rate = sle.outgoing_rate
else:
if flt(self.qty_after_transaction) >= 0 and sle.outgoing_rate:
self.valuation_rate = sle.outgoing_rate
if not self.valuation_rate and actual_qty > 0:
self.valuation_rate = sle.incoming_rate
# Get valuation rate from previous SLE or Item master, if item is not a sample item
if not self.valuation_rate and sle.voucher_detail_no:
is_sample_item = self.check_if_sample_item(sle.voucher_type, sle.voucher_detail_no)
if not is_sample_item:
self.valuation_rate = get_valuation_rate(sle.item_code, sle.warehouse,
sle.voucher_type, sle.voucher_no, self.allow_zero_rate,
currency=erpnext.get_company_currency(sle.company))
def get_fifo_values(self, sle):
incoming_rate = flt(sle.incoming_rate)
actual_qty = flt(sle.actual_qty)
outgoing_rate = flt(sle.outgoing_rate)
if actual_qty > 0:
if not self.stock_queue:
self.stock_queue.append([0, 0])
# last row has the same rate, just updated the qty
if self.stock_queue[-1][1]==incoming_rate:
self.stock_queue[-1][0] += actual_qty
else:
if self.stock_queue[-1][0] > 0:
self.stock_queue.append([actual_qty, incoming_rate])
else:
qty = self.stock_queue[-1][0] + actual_qty
self.stock_queue[-1] = [qty, incoming_rate]
else:
qty_to_pop = abs(actual_qty)
while qty_to_pop:
if not self.stock_queue:
# Get valuation rate from last sle if exists or from valuation rate field in item master
is_sample_item = self.check_if_sample_item(sle.voucher_type, sle.voucher_detail_no)
if not is_sample_item:
_rate = get_valuation_rate(sle.item_code, sle.warehouse,
sle.voucher_type, sle.voucher_no, self.allow_zero_rate,
currency=erpnext.get_company_currency(sle.company))
else:
_rate = 0
self.stock_queue.append([0, _rate])
index = None
if outgoing_rate > 0:
# Find the entry where rate matched with outgoing rate
for i, v in enumerate(self.stock_queue):
if v[1] == outgoing_rate:
index = i
break
# If no entry found with outgoing rate, collapse stack
if index == None:
new_stock_value = sum((d[0]*d[1] for d in self.stock_queue)) - qty_to_pop*outgoing_rate
new_stock_qty = sum((d[0] for d in self.stock_queue)) - qty_to_pop
self.stock_queue = [[new_stock_qty, new_stock_value/new_stock_qty if new_stock_qty > 0 else outgoing_rate]]
break
else:
index = 0
# select first batch or the batch with same rate
batch = self.stock_queue[index]
if qty_to_pop >= batch[0]:
# consume current batch
qty_to_pop = qty_to_pop - batch[0]
self.stock_queue.pop(index)
if not self.stock_queue and qty_to_pop:
# stock finished, qty still remains to be withdrawn
# negative stock, keep in as a negative batch
self.stock_queue.append([-qty_to_pop, outgoing_rate or batch[1]])
break
else:
# qty found in current batch
# consume it and exit
batch[0] = batch[0] - qty_to_pop
qty_to_pop = 0
stock_value = sum((flt(batch[0]) * flt(batch[1]) for batch in self.stock_queue))
stock_qty = sum((flt(batch[0]) for batch in self.stock_queue))
if stock_qty:
self.valuation_rate = stock_value / flt(stock_qty)
if not self.stock_queue:
self.stock_queue.append([0, sle.incoming_rate or sle.outgoing_rate or self.valuation_rate])
def check_if_sample_item(self, voucher_type, voucher_detail_no):
ref_item_dt = voucher_type + (" Detail" if voucher_type == "Stock Entry" else " Item")
return frappe.db.get_value(ref_item_dt, voucher_detail_no, "is_sample_item")
def get_sle_before_datetime(self):
"""get previous stock ledger entry before current time-bucket"""
return get_stock_ledger_entries(self.args, "<", "desc", "limit 1", for_update=False)
def get_sle_after_datetime(self):
"""get Stock Ledger Entries after a particular datetime, for reposting"""
return get_stock_ledger_entries(self.previous_sle or frappe._dict({
"item_code": self.args.get("item_code"), "warehouse": self.args.get("warehouse") }),
">", "asc", for_update=True)
def raise_exceptions(self):
deficiency = min(e["diff"] for e in self.exceptions)
if ((self.exceptions[0]["voucher_type"], self.exceptions[0]["voucher_no"]) in
frappe.local.flags.currently_saving):
msg = _("{0} units of {1} needed in {2} to complete this transaction.").format(
abs(deficiency), frappe.get_desk_link('Item', self.item_code),
frappe.get_desk_link('Warehouse', self.warehouse))
else:
msg = _("{0} units of {1} needed in {2} on {3} {4} for {5} to complete this transaction.").format(
abs(deficiency), frappe.get_desk_link('Item', self.item_code),
frappe.get_desk_link('Warehouse', self.warehouse),
self.exceptions[0]["posting_date"], self.exceptions[0]["posting_time"],
frappe.get_desk_link(self.exceptions[0]["voucher_type"], self.exceptions[0]["voucher_no"]))
if self.verbose:
frappe.throw(msg, NegativeStockError, title='Insufficent Stock')
else:
raise NegativeStockError, msg
def get_previous_sle(args, for_update=False):
"""
get the last sle on or before the current time-bucket,
to get actual qty before transaction, this function
is called from various transaction like stock entry, reco etc
args = {
"item_code": "ABC",
"warehouse": "XYZ",
"posting_date": "2012-12-12",
"posting_time": "12:00",
"sle": "name of reference Stock Ledger Entry"
}
"""
args["name"] = args.get("sle", None) or ""
sle = get_stock_ledger_entries(args, "<=", "desc", "limit 1", for_update=for_update)
return sle and sle[0] or {}
def get_stock_ledger_entries(previous_sle, operator=None, order="desc", limit=None, for_update=False, debug=False):
"""get stock ledger entries filtered by specific posting datetime conditions"""
conditions = "timestamp(posting_date, posting_time) {0} timestamp(%(posting_date)s, %(posting_time)s)".format(operator)
if not previous_sle.get("posting_date"):
previous_sle["posting_date"] = "1900-01-01"
if not previous_sle.get("posting_time"):
previous_sle["posting_time"] = "00:00"
if operator in (">", "<=") and previous_sle.get("name"):
conditions += " and name!=%(name)s"
return frappe.db.sql("""select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry`
where item_code = %%(item_code)s
and warehouse = %%(warehouse)s
and ifnull(is_cancelled, 'No')='No'
and %(conditions)s
order by timestamp(posting_date, posting_time) %(order)s, name %(order)s
%(limit)s %(for_update)s""" % {
"conditions": conditions,
"limit": limit or "",
"for_update": for_update and "for update" or "",
"order": order
}, previous_sle, as_dict=1, debug=debug)
def get_valuation_rate(item_code, warehouse, voucher_type, voucher_no,
allow_zero_rate=False, currency=None):
# Get valuation rate from last sle for the same item and warehouse
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and warehouse = %s
and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", (item_code, warehouse))
if not last_valuation_rate:
# Get valuation rate from last sle for the item against any warehouse
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", item_code)
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
if not valuation_rate:
# If negative stock allowed, and item delivered without any incoming entry,
# syste does not found any SLE, then take valuation rate from Item
valuation_rate = frappe.db.get_value("Item", item_code, "valuation_rate")
if not valuation_rate:
# try in price list
valuation_rate = frappe.db.get_value('Item Price',
dict(item_code=item_code, buying=1, currency=currency), 'price_list_rate')
if not allow_zero_rate and not valuation_rate \
and cint(frappe.db.get_value("Accounts Settings", None, "auto_accounting_for_stock")):
frappe.local.message_log = []
frappe.throw(_("Valuation rate not found for the Item {0}, which is required to do accounting entries for {1} {2}. If the item is transacting as a sample item in the {1}, please mention that in the {1} Item table. Otherwise, please create an incoming stock transaction for the item or mention valuation rate in the Item record, and then try submiting/cancelling this entry").format(item_code, voucher_type, voucher_no))
return valuation_rate
| gpl-3.0 |
AMOboxTV/AMOBox.LegoBuild | plugin.video.specto/resources/lib/resolvers/googlepicasa.py | 23 | 2529 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json
from resources.lib.libraries import client
def resolve(url):
try:
id = re.compile('#(\d*)').findall(url)[0]
result = client.request(url)
result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
result = json.loads(result)['feed']['entry']
if len(result) > 1: result = [i for i in result if str(id) in i['streamIds'][0]][0]
elif len(result) == 1: result = result[0]
result = result['media']['content']
result = [i['url'] for i in result if 'video' in i['type']]
result = sum([tag(i) for i in result], [])
url = []
try: url += [[i for i in result if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
| gpl-2.0 |
anhhoangiot/people_recognition_pi | fscognitive/commons/process_parallel.py | 1 | 1048 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-10-08
# @Author : Anh Hoang (anhhoang.work.mail@gmail.com)
# @Project : FSCognitive
# @Version : 1.0
from multiprocessing import Process
class ProcessParallel(object):
"""
To Process the functions parallely
"""
def __init__(self, *jobs):
"""
"""
self.jobs = jobs
self.processes = []
def fork_processes(self):
"""
Creates the process objects for given function deligates
"""
for job in self.jobs:
proc = Process(target=job)
self.processes.append(proc)
def fork_threads(self):
for job in self.jobs:
self.processes.append(job)
def start_all(self):
"""
Starts the functions process all together.
"""
for proc in self.processes:
proc.start()
def join_all(self):
"""
Waits untill all the functions executed.
"""
for proc in self.processes:
proc.join()
| mit |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/numpy/ctypeslib.py | 34 | 14636 | """
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
from __future__ import division, absolute_import, print_function
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>]
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
Numpy supplies the load_library function as a convenience.
Parameters
----------
libname : str
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : str
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
asttribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
@property
def __array_interface__(self):
return {'descr': self._dtype_.descr,
'__ref': self,
'strides': None,
'shape': self._shape_,
'version': 3,
'typestr': self._dtype_.descr[0][1],
'data': (self.value, False),
}
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, dtype):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
typestr = _dtype(dtype).str
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) is _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
def prep_pointer(pointer_obj, shape):
"""Given a ctypes pointer object, construct and
attach an __array_interface__ property to it if it does not
yet have one.
"""
try: pointer_obj.__array_interface__
except AttributeError: pass
else: return
contents = pointer_obj.contents
dtype = _dtype(type(contents))
inter = {'version': 3,
'typestr': dtype.str,
'data': (ct.addressof(contents), False),
'shape': shape}
pointer_obj.__array_interface__ = inter
################################################################
# public functions
def as_array(obj, shape=None):
"""Create a numpy array from a ctypes array or a ctypes POINTER.
The numpy array shares the memory with the ctypes object.
The size parameter must be given if converting from a ctypes POINTER.
The size parameter is ignored if converting from a ctypes array
"""
tp = type(obj)
try: tp.__array_interface__
except AttributeError:
if hasattr(obj, 'contents'):
prep_pointer(obj, shape)
else:
prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
| mit |
wnoc-drexel/gem5-stable | src/arch/alpha/AlphaISA.py | 61 | 2377 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class AlphaISA(SimObject):
type = 'AlphaISA'
cxx_class = 'AlphaISA::ISA'
cxx_header = "arch/alpha/isa.hh"
system = Param.System(Parent.any, "System this ISA object belongs to")
| bsd-3-clause |
enthought/python-analytics | python_analytics/tests/test_tracker.py | 1 | 3916 | from __future__ import absolute_import, unicode_literals
import unittest
import uuid
from mock import patch
import requests
import responses
import six
from six import PY2, binary_type
from six.moves.urllib import parse
from ..events import Event
from ..tracker import _AnalyticsHandler, Tracker
from ..utils import get_user_agent
def _decode_qs(item):
if isinstance(item, binary_type):
return item.decode('utf-8')
elif isinstance(item, list):
return [_decode_qs(sub_item) for sub_item in item]
elif isinstance(item, dict):
return {_decode_qs(key): _decode_qs(value)
for key, value in item.items()}
return item
class TestAnalyticsHandler(unittest.TestCase):
if PY2:
assertRegex = unittest.TestCase.assertRegexpMatches
def test_default_user_agent(self):
# Given
handler = _AnalyticsHandler()
# Then
user_agent = handler._session.headers['User-Agent']
self.assertRegex(user_agent, r'^python-analytics/')
self.assertEqual(user_agent, get_user_agent(None))
def test_override_user_agent(self):
# Given
session = requests.Session()
session.headers['User-Agent'] = 'MyAgent/1.0'
handler = _AnalyticsHandler(session=session)
# Then
user_agent = handler._session.headers['User-Agent']
self.assertRegex(
user_agent, r'^python-analytics/[^ ]+ MyAgent/1.0')
self.assertEqual(user_agent, get_user_agent('MyAgent/1.0'))
@responses.activate
def test_encode_unicode(self):
# Given
responses.add(
responses.POST,
_AnalyticsHandler.target,
status=200,
)
key = '\N{GREEK SMALL LETTER MU}'
value = '\N{GREEK SMALL LETTER PI}'
handler = _AnalyticsHandler()
data = {key: value}
# When
handler.send(data)
# Then
self.assertEqual(len(responses.calls), 1)
call, = responses.calls
request, response = call
sent_encoded = request.body
sent_decoded = _decode_qs(parse.parse_qs(sent_encoded))
self.assertEqual(sent_decoded, {key: [value]})
@responses.activate
def test_send_analytics(self):
# Given
responses.add(
responses.POST,
_AnalyticsHandler.target,
status=200,
)
uid = str(uuid.uuid4())
expected = {
'v': ['1'],
'tid': ['GA-ID'],
'cid': [uid],
}
handler = _AnalyticsHandler()
data = {
'v': 1,
'tid': 'GA-ID',
'cid': uid,
}
# When
handler.send(data)
# Then
self.assertEqual(len(responses.calls), 1)
call, = responses.calls
request, response = call
sent_data = parse.parse_qs(request.body)
self.assertEqual(sent_data, expected)
class TestTracker(unittest.TestCase):
maxDiff = None
@patch('uuid.uuid4')
@responses.activate
def test_tracker(self, uuid4):
# Given
responses.add(
responses.POST,
_AnalyticsHandler.target,
status=200,
)
my_uuid = 'my-uuid'
uuid4.return_value = my_uuid
category = 'category'
action = 'action'
tracker = Tracker('GA-ID')
event = Event(category=category, action=action)
expected = {
'v': ['1'],
'tid': ['GA-ID'],
'cid': [my_uuid],
't': ['event'],
'ec': [category],
'ea': [action],
}
# When
tracker.send(event)
# Then
self.assertEqual(len(responses.calls), 1)
call, = responses.calls
request, response = call
sent_data = parse.parse_qs(request.body)
self.assertEqual(sent_data, expected)
| bsd-3-clause |
iagapov/ocelot | demos/optics/ex7.py | 2 | 1987 | '''
example07 -- crystal -- dynamical diffraction (ocelot.optics.bragg)
'''
from ocelot.optics.utils import *
def save_filter(filt, f_name):
f= open(f_name,'w')
for i in xrange( len(filt.ev)):
f.write(str(filt.ev[i]) + '\t' + str(np.abs(filt.tr[i])**2) + '\t' + str(np.abs(filt.ref[i])**2) + '\n')
E_ev = 10000
ref_idx = (2,2,0)
thickness = 5 * mum
cr1 = Crystal(r=[0,0,0*cm], size=[5*cm,5*cm,thickness], no=[0,0,-1], id="cr1")
cr1.lattice = CrystalLattice('Si')
#cr1.lattice = CrystalLattice('Si')
#cr1.psi_n = -(pi/2. - 54.7356*(pi/180.0)) #input angle psi_n according to Authier
cr1.psi_n = -pi/2. #input angle psi_n according to Authier (symmetric reflection, Si)
r = Ray(r0=[0,0.0,-0.5], k=[0,0.0,1])
r.lamb = 2 * pi * hbar * c / E_ev
print('wavelength', r.lamb)
w1 = read_signal(file_name='data/pulse_9kev_20fs.txt', npad =10, E_ref = E_ev)
plt.figure()
plot_signal(w1)
#plt.figure()
f_test = get_crystal_filter(cryst=cr1, ray=r, nk=3000, ref_idx = ref_idx)
filt = get_crystal_filter(cr1, r, ref_idx = ref_idx, k = w1.freq_k)
#save_filter(f_test, 'C400_8000ev_filter.txt')
plot_filters(filt, f_test)
plot_filters(filt, f_test, param='ref')
fig=plt.figure()
plot_spec_filt(w1, filt, ax=fig.add_subplot(111))
cr1.filter = filt
i1=np.sum(w1.sp*np.conj(w1.sp))*(w1.freq_ev[1] - w1.freq_ev[0])
def transform_field(cr, wave):
print('transforming field')
wave.sp = wave.sp * cr.filter.tr
wave.sp_ref = wave.sp * cr.filter.ref
wave.f = np.fft.ifft(wave.sp)
fig = plt.figure()
plt.grid(True)
ax = fig.add_subplot(111)
plt.plot(w1.t, np.abs(w1.f))
transform_field(cr1, w1)
i2 = np.sum(w1.sp*np.conj(w1.sp))*(w1.freq_ev[1] - w1.freq_ev[0])
i3 = np.sum(w1.sp_ref*np.conj(w1.sp_ref))*(w1.freq_ev[1] - w1.freq_ev[0])
print('transmission (%)', 100*np.real(i2/i1), 'reflection (%)', 100*np.real(i3/i1))
plt.plot(w1.t, np.abs(w1.f))
ax.set_yscale('log')
plt.figure(), plt.grid(True)
plt.plot(w1.freq_ev, np.abs(w1.sp))
plt.show()
| gpl-3.0 |
warner/python-spake2 | src/spake2/ed25519_basic.py | 1 | 12317 | import binascii, hashlib, itertools
from .groups import expand_arbitrary_element_seed
Q = 2**255 - 19
L = 2**252 + 27742317777372353535851937790883648493
def inv(x):
return pow(x, Q-2, Q)
d = -121665 * inv(121666)
I = pow(2,(Q-1)//4,Q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = pow(xx,(Q+3)//8,Q)
if (x*x - xx) % Q != 0: x = (x*I) % Q
if x % 2 != 0: x = Q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % Q,By % Q]
# Extended Coordinates: x=X/Z, y=Y/Z, x*y=T/Z
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
def xform_affine_to_extended(pt):
(x, y) = pt
return (x%Q, y%Q, 1, (x*y)%Q) # (X,Y,Z,T)
def xform_extended_to_affine(pt):
(x, y, z, _) = pt
return ((x*inv(z))%Q, (y*inv(z))%Q)
def double_element(pt): # extended->extended
# dbl-2008-hwcd
(X1, Y1, Z1, _) = pt
A = (X1*X1)
B = (Y1*Y1)
C = (2*Z1*Z1)
D = (-A) % Q
J = (X1+Y1) % Q
E = (J*J-A-B) % Q
G = (D+B) % Q
F = (G-C) % Q
H = (D-B) % Q
X3 = (E*F) % Q
Y3 = (G*H) % Q
Z3 = (F*G) % Q
T3 = (E*H) % Q
return (X3, Y3, Z3, T3)
def add_elements(pt1, pt2): # extended->extended
# add-2008-hwcd-3 . Slightly slower than add-2008-hwcd-4, but -3 is
# unified, so it's safe for general-purpose addition
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
A = ((Y1-X1)*(Y2-X2)) % Q
B = ((Y1+X1)*(Y2+X2)) % Q
C = T1*(2*d)*T2 % Q
D = Z1*2*Z2 % Q
E = (B-A) % Q
F = (D-C) % Q
G = (D+C) % Q
H = (B+A) % Q
X3 = (E*F) % Q
Y3 = (G*H) % Q
T3 = (E*H) % Q
Z3 = (F*G) % Q
return (X3, Y3, Z3, T3)
def scalarmult_element_safe_slow(pt, n):
# this form is slightly slower, but tolerates arbitrary points, including
# those which are not in the main 1*L subgroup. This includes points of
# order 1 (the neutral element Zero), 2, 4, and 8.
assert n >= 0
if n==0:
return xform_affine_to_extended((0,1))
_ = double_element(scalarmult_element_safe_slow(pt, n>>1))
return add_elements(_, pt) if n&1 else _
def _add_elements_nonunfied(pt1, pt2): # extended->extended
# add-2008-hwcd-4 : NOT unified, only for pt1!=pt2. About 10% faster than
# the (unified) add-2008-hwcd-3, and safe to use inside scalarmult if you
# aren't using points of order 1/2/4/8
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
A = ((Y1-X1)*(Y2+X2)) % Q
B = ((Y1+X1)*(Y2-X2)) % Q
C = (Z1*2*T2) % Q
D = (T1*2*Z2) % Q
E = (D+C) % Q
F = (B-A) % Q
G = (B+A) % Q
H = (D-C) % Q
X3 = (E*F) % Q
Y3 = (G*H) % Q
Z3 = (F*G) % Q
T3 = (E*H) % Q
return (X3, Y3, Z3, T3)
def scalarmult_element(pt, n): # extended->extended
# This form only works properly when given points that are a member of
# the main 1*L subgroup. It will give incorrect answers when called with
# the points of order 1/2/4/8, including point Zero. (it will also work
# properly when given points of order 2*L/4*L/8*L)
assert n >= 0
if n==0:
return xform_affine_to_extended((0,1))
_ = double_element(scalarmult_element(pt, n>>1))
return _add_elements_nonunfied(_, pt) if n&1 else _
# points are encoded as 32-bytes little-endian, b255 is sign, b2b1b0 are 0
def encodepoint(P):
x = P[0]
y = P[1]
# MSB of output equals x.b0 (=x&1)
# rest of output is little-endian y
assert 0 <= y < (1<<255) # always < 0x7fff..ff
if x & 1:
y += 1<<255
return binascii.unhexlify(("%064x" % y).encode("ascii"))[::-1]
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % Q == 0
class NotOnCurve(Exception):
pass
def decodepoint(s):
unclamped = int(binascii.hexlify(s[:32][::-1]), 16)
clamp = (1 << 255) - 1
y = unclamped & clamp # clear MSB
x = xrecover(y)
if bool(x & 1) != bool(unclamped & (1<<255)): x = Q-x
P = [x,y]
if not isoncurve(P): raise NotOnCurve("decoding point that is not on curve")
return P
# scalars are encoded as 32-bytes little-endian
def bytes_to_scalar(s):
assert len(s) == 32, len(s)
return int(binascii.hexlify(s[::-1]), 16)
def bytes_to_clamped_scalar(s):
# Ed25519 private keys clamp the scalar to ensure two things:
# 1: integer value is in L/2 .. L, to avoid small-logarithm
# non-wraparaound
# 2: low-order 3 bits are zero, so a small-subgroup attack won't learn
# any information
# set the top two bits to 01, and the bottom three to 000
a_unclamped = bytes_to_scalar(s)
AND_CLAMP = (1<<254) - 1 - 7
OR_CLAMP = (1<<254)
a_clamped = (a_unclamped & AND_CLAMP) | OR_CLAMP
return a_clamped
def random_scalar(entropy_f): # 0..L-1 inclusive
# reduce the bias to a safe level by generating 256 extra bits
oversized = int(binascii.hexlify(entropy_f(32+32)), 16)
return oversized % L
# unused, in favor of common HKDF approach in groups.py
#def password_to_scalar(pw):
# oversized = hashlib.sha512(pw).digest()
# return int(binascii.hexlify(oversized), 16) % L
def scalar_to_bytes(y):
y = y % L
assert 0 <= y < 2**256
return binascii.unhexlify(("%064x" % y).encode("ascii"))[::-1]
# Elements, of various orders
def is_extended_zero(XYTZ):
# catch Zero
(X, Y, Z, T) = XYTZ
Y = Y % Q
Z = Z % Q
if X==0 and Y==Z and Y!=0:
return True
return False
class ElementOfUnknownGroup:
# This is used for points of order 2,4,8,2*L,4*L,8*L
def __init__(self, XYTZ):
assert isinstance(XYTZ, tuple)
assert len(XYTZ) == 4
self.XYTZ = XYTZ
def add(self, other):
if not isinstance(other, ElementOfUnknownGroup):
raise TypeError("elements can only be added to other elements")
sum_XYTZ = add_elements(self.XYTZ, other.XYTZ)
if is_extended_zero(sum_XYTZ):
return Zero
return ElementOfUnknownGroup(sum_XYTZ)
def scalarmult(self, s):
if isinstance(s, ElementOfUnknownGroup):
raise TypeError("elements cannot be multiplied together")
assert s >= 0
product = scalarmult_element_safe_slow(self.XYTZ, s)
return ElementOfUnknownGroup(product)
def to_bytes(self):
return encodepoint(xform_extended_to_affine(self.XYTZ))
def __eq__(self, other):
return self.to_bytes() == other.to_bytes()
def __ne__(self, other):
return not self == other
class Element(ElementOfUnknownGroup):
# this only holds elements in the main 1*L subgroup. It never holds Zero,
# or elements of order 1/2/4/8, or 2*L/4*L/8*L.
def add(self, other):
if not isinstance(other, ElementOfUnknownGroup):
raise TypeError("elements can only be added to other elements")
sum_element = ElementOfUnknownGroup.add(self, other)
if sum_element is Zero:
return sum_element
if isinstance(other, Element):
# adding two subgroup elements results in another subgroup
# element, or Zero, and we've already excluded Zero
return Element(sum_element.XYTZ)
# not necessarily a subgroup member, so assume not
return sum_element
def scalarmult(self, s):
if isinstance(s, ElementOfUnknownGroup):
raise TypeError("elements cannot be multiplied together")
# scalarmult of subgroup members can be done modulo the subgroup
# order, and using the faster non-unified function.
s = s % L
# scalarmult(s=0) gets you Zero
if s == 0:
return Zero
# scalarmult(s=1) gets you self, which is a subgroup member
# scalarmult(s<grouporder) gets you a different subgroup member
return Element(scalarmult_element(self.XYTZ, s))
# negation and subtraction only make sense for the main subgroup
def negate(self):
# slow. Prefer e.scalarmult(-pw) to e.scalarmult(pw).negate()
return Element(scalarmult_element(self.XYTZ, L-2))
def subtract(self, other):
return self.add(other.negate())
class _ZeroElement(ElementOfUnknownGroup):
def add(self, other):
return other # zero+anything = anything
def scalarmult(self, s):
return self # zero*anything = zero
def negate(self):
return self # -zero = zero
def subtract(self, other):
return self.add(other.negate())
Base = Element(xform_affine_to_extended(B))
Zero = _ZeroElement(xform_affine_to_extended((0,1))) # the neutral (identity) element
_zero_bytes = Zero.to_bytes()
def arbitrary_element(seed): # unknown DL
# We don't strictly need the uniformity provided by hashing to an
# oversized string (128 bits more than the field size), then reducing
# down to Q. But it's comforting, and it's the same technique we use for
# converting passwords/seeds to scalars (which *does* need uniformity).
hseed = expand_arbitrary_element_seed(seed, (256/8)+16)
y = int(binascii.hexlify(hseed), 16) % Q
# we try successive Y values until we find a valid point
for plus in itertools.count(0):
y_plus = (y + plus) % Q
x = xrecover(y_plus)
Pa = [x,y_plus] # no attempt to use both "positive" and "negative" X
# only about 50% of Y coordinates map to valid curve points (I think
# the other half give you points on the "twist").
if not isoncurve(Pa):
continue
P = ElementOfUnknownGroup(xform_affine_to_extended(Pa))
# even if the point is on our curve, it may not be in our particular
# (order=L) subgroup. The curve has order 8*L, so an arbitrary point
# could have order 1,2,4,8,1*L,2*L,4*L,8*L (everything which divides
# the group order).
# [I MAY BE COMPLETELY WRONG ABOUT THIS, but my brief statistical
# tests suggest it's not too far off] There are phi(x) points with
# order x, so:
# 1 element of order 1: [(x=0,y=1)=Zero]
# 1 element of order 2 [(x=0,y=-1)]
# 2 elements of order 4
# 4 elements of order 8
# L-1 elements of order L (including Base)
# L-1 elements of order 2*L
# 2*(L-1) elements of order 4*L
# 4*(L-1) elements of order 8*L
# So 50% of random points will have order 8*L, 25% will have order
# 4*L, 13% order 2*L, and 13% will have our desired order 1*L (and a
# vanishingly small fraction will have 1/2/4/8). If we multiply any
# of the 8*L points by 2, we're sure to get an 4*L point (and
# multiplying a 4*L point by 2 gives us a 2*L point, and so on).
# Multiplying a 1*L point by 2 gives us a different 1*L point. So
# multiplying by 8 gets us from almost any point into a uniform point
# on the correct 1*L subgroup.
P8 = P.scalarmult(8)
# if we got really unlucky and picked one of the 8 low-order points,
# multiplying by 8 will get us to the identity (Zero), which we check
# for explicitly.
if is_extended_zero(P8.XYTZ):
continue
# Test that we're finally in the right group. We want to scalarmult
# by L, and we want to *not* use the trick in Group.scalarmult()
# which does x%L, because that would bypass the check we care about.
# P is still an _ElementOfUnknownGroup, which doesn't use x%L because
# that's not correct for points outside the main group.
assert is_extended_zero(P8.scalarmult(L).XYTZ)
return Element(P8.XYTZ)
# never reached
def bytes_to_unknown_group_element(bytes):
# this accepts all elements, including Zero and wrong-subgroup ones
if bytes == _zero_bytes:
return Zero
XYTZ = xform_affine_to_extended(decodepoint(bytes))
return ElementOfUnknownGroup(XYTZ)
def bytes_to_element(bytes):
# this strictly only accepts elements in the right subgroup
P = bytes_to_unknown_group_element(bytes)
if P is Zero:
raise ValueError("element was Zero")
if not is_extended_zero(P.scalarmult(L).XYTZ):
raise ValueError("element is not in the right group")
# the point is in the expected 1*L subgroup, not in the 2/4/8 groups,
# or in the 2*L/4*L/8*L groups. Promote it to a correct-group Element.
return Element(P.XYTZ)
| mit |
M0ses/ansible | v2/ansible/plugins/action/synchronize.py | 3 | 6877 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os.path
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
def _get_absolute_path(self, path):
if self._task._role is not None:
original_path = path
path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _process_origin(self, host, path, user):
if not host in ['127.0.0.1', 'localhost']:
if user:
return '%s@%s:%s' % (user, host, path)
else:
return '%s:%s' % (host, path)
else:
if not ':' in path:
if not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, host, task, path, user):
transport = self._connection_info.connection
return_data = None
if not host in ['127.0.0.1', 'localhost'] or transport != "local":
if user:
return_data = '%s@%s:%s' % (user, host, path)
else:
return_data = '%s:%s' % (host, path)
else:
return_data = path
if not ':' in return_data:
if not return_data.startswith('/'):
return_data = self._get_absolute_path(path=return_data)
return return_data
def run(self, tmp=None, task_vars=dict()):
''' generates params and passes them on to the rsync module '''
original_transport = task_vars.get('ansible_connection', self._connection_info.connection)
transport_overridden = False
if task_vars.get('delegate_to') is None:
task_vars['delegate_to'] = '127.0.0.1'
# IF original transport is not local, override transport and disable sudo.
if original_transport != 'local':
task_vars['ansible_connection'] = 'local'
transport_overridden = True
self.runner.sudo = False
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
# FIXME: this doesn't appear to be used anywhere?
local_rsync_path = task_vars.get('ansible_rsync_path')
# from the perspective of the rsync call the delegate is the localhost
src_host = '127.0.0.1'
dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname'))
# allow ansible_ssh_host to be templated
dest_is_local = dest_host in ['127.0.0.1', 'localhost']
# CHECK FOR NON-DEFAULT SSH PORT
dest_port = self._task.args.get('dest_port')
inv_port = task_vars.get('ansible_ssh_port', task_vars.get('inventory_hostname'))
if inv_port != dest_port and inv_port != task_vars.get('inventory_hostname'):
dest_port = inv_port
# edge case: explicit delegate and dest_host are the same
if dest_host == task_vars.get('delegate_to'):
dest_host = '127.0.0.1'
# SWITCH SRC AND DEST PER MODE
if self._task.args.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# CHECK DELEGATE HOST INFO
use_delegate = False
# FIXME: not sure if this is in connection info yet or not...
#if conn.delegate != conn.host:
# if 'hostvars' in task_vars:
# if conn.delegate in task_vars['hostvars'] and original_transport != 'local':
# # use a delegate host instead of localhost
# use_delegate = True
# COMPARE DELEGATE, HOST AND TRANSPORT
process_args = False
if not dest_host is src_host and original_transport != 'local':
# interpret and task_vars remote host info into src or dest
process_args = True
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
if process_args or use_delegate:
user = None
if boolean(task_vars.get('set_remote_user', 'yes')):
if use_delegate:
user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user')
if not use_delegate or not user:
user = task_vars.get('ansible_ssh_user', self.runner.remote_user)
if use_delegate:
# FIXME
private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
else:
private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
if private_key is not None:
private_key = os.path.expanduser(private_key)
# use the mode to define src and dest's url
if self._task.args.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(src_host, src, user)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(dest_host, dest, user)
# Allow custom rsync path argument.
rsync_path = self._task.args.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
if not rsync_path and transport_overridden and self._connection_info.become and self._connection_info.become_method == 'sudo' and not dest_is_local:
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
self._task.args['rsync_path'] = '"%s"' % rsync_path
# run the module and store the result
result = self._execute_module('synchronize')
return result
| gpl-3.0 |
mitar/django | tests/modeltests/select_for_update/tests.py | 7 | 10812 | from __future__ import absolute_import
import sys
import time
from django.conf import settings
from django.db import transaction, connection
from django.db.utils import ConnectionHandler, DEFAULT_DB_ALIAS, DatabaseError
from django.test import (TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.utils import unittest
from .models import Person
# Some tests require threading, which might not be available. So create a
# skip-test decorator for those test functions.
try:
import threading
except ImportError:
threading = None
requires_threading = unittest.skipUnless(threading, 'requires threading')
class SelectForUpdateTests(TransactionTestCase):
def setUp(self):
transaction.enter_transaction_management(True)
transaction.managed(True)
self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can
# see this data.
transaction.commit()
# We need another database connection to test that one connection
# issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
# We need to set settings.DEBUG to True so we can capture
# the output SQL to examine.
self._old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
try:
# We don't really care if this fails - some of the tests will set
# this in the course of their run.
transaction.managed(False)
transaction.leave_transaction_management()
except transaction.TransactionManagementError:
pass
self.new_connection.close()
settings.DEBUG = self._old_debug
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
def start_blocking_transaction(self):
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
result = self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection._rollback()
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
def check_exc(self, exc):
self.assertTrue(isinstance(exc, DatabaseError))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Python 2.6.1 is the "in the wild" version affected by this, so we skip
# the test for that version.
@requires_threading
@skipUnlessDBFeature('has_select_for_update_nowait')
@unittest.skipIf(sys.version_info[:3] == (2, 6, 1), "Python version is 2.6.1")
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
# In Python 2.6 beta and some final releases, exceptions raised in __len__
# are swallowed (Python issue 1242657), so these cases return an empty
# list, rather than raising an exception. Not a lot we can do about that,
# unfortunately, due to the way Python handles list() calls internally.
# Python 2.6.1 is the "in the wild" version affected by this, so we skip
# the test for that version.
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
@unittest.skipIf(sys.version_info[:3] == (2, 6, 1), "Python version is 2.6.1")
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
transaction.enter_transaction_management(True)
transaction.managed(True)
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
transaction.commit()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError, 'Thread did not run and block'
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@requires_threading
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.check_exc(status[-1])
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_dirty_managed(self):
""" Check that a select_for_update sets the transaction to be
dirty when executed under txn management. Setting the txn dirty
means that it will be either committed or rolled back by Django,
which will release any locks held by the SELECT FOR UPDATE.
"""
people = list(Person.objects.select_for_update())
self.assertTrue(transaction.is_dirty())
@skipUnlessDBFeature('has_select_for_update')
def test_transaction_not_dirty_unmanaged(self):
""" If we're not under txn management, the txn will never be
marked as dirty.
"""
transaction.managed(False)
transaction.leave_transaction_management()
people = list(Person.objects.select_for_update())
self.assertFalse(transaction.is_dirty())
| bsd-3-clause |
dakerfp/AutobahnPython | examples/twisted/wamp/basic/server.py | 10 | 4373 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("-r", "--realm", type = str, default = "realm1",
help = "The WAMP realm to start the component in (if any).")
parser.add_argument("--endpoint", type = str, default = "tcp:8080",
help = 'Twisted server endpoint descriptor, e.g. "tcp:8080" or "unix:/tmp/mywebsocket".')
parser.add_argument("--transport", choices = ['websocket', 'rawsocket-json', 'rawsocket-msgpack'], default = "websocket",
help = 'WAMP transport type')
args = parser.parse_args()
## start Twisted logging to stdout
##
if args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
## if asked to start an embedded application component ..
##
if args.component:
## dynamically load the application component ..
##
import importlib
c = args.component.split('.')
mod, klass = '.'.join(c[:-1]), c[-1]
app = importlib.import_module(mod)
SessionKlass = getattr(app, klass)
## .. and create and add an WAMP application session to
## run next to the router
##
from autobahn.wamp import types
session_factory.add(SessionKlass(types.ComponentConfig(realm = args.realm)))
if args.transport == "websocket":
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, debug_wamp = args.debug)
transport_factory.setProtocolOptions(failByDrop = False)
elif args.transport in ['rawsocket-json', 'rawsocket-msgpack']:
## create a WAMP-over-RawSocket transport server factory
##
if args.transport == 'rawsocket-msgpack':
from autobahn.wamp.serializer import MsgPackSerializer
serializer = MsgPackSerializer()
elif args.transport == 'rawsocket-json':
from autobahn.wamp.serializer import JsonSerializer
serializer = JsonSerializer()
else:
raise Exception("should not arrive here")
from autobahn.twisted.rawsocket import WampRawSocketServerFactory
transport_factory = WampRawSocketServerFactory(session_factory, serializer, debug = args.debug)
else:
raise Exception("should not arrive here")
## start the server from an endpoint
##
server = serverFromString(reactor, args.endpoint)
server.listen(transport_factory)
## now enter the Twisted reactor loop
##
reactor.run()
| apache-2.0 |
mitsei/dlkit | dlkit/abstract_osid/commenting/queries.py | 1 | 19767 | """Implementations of commenting abstract base class queries."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class CommentQuery:
"""This is the query for searching comments.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_reference_id(self, source_id, match):
"""Sets reference ``Id``.
:param source_id: a source ``Id``
:type source_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_reference_id_terms(self):
"""Clears the reference ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
reference_id_terms = property(fdel=clear_reference_id_terms)
@abc.abstractmethod
def match_commentor_id(self, resource_id, match):
"""Sets a resource ``Id`` to match a commentor.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commentor_id_terms(self):
"""Clears the resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commentor_id_terms = property(fdel=clear_commentor_id_terms)
@abc.abstractmethod
def supports_commentor_query(self):
"""Tests if a ``ResourceQuery`` is available.
:return: ``true`` if a resource query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_commentor_query(self):
"""Gets the query for a resource query.
Multiple retrievals produce a nested ``OR`` term.
:return: the resource query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_commentor_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_commentor_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
commentor_query = property(fget=get_commentor_query)
@abc.abstractmethod
def clear_commentor_terms(self):
"""Clears the resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commentor_terms = property(fdel=clear_commentor_terms)
@abc.abstractmethod
def match_commenting_agent_id(self, agent_id, match):
"""Sets an agent ``Id``.
:param agent_id: an agent ``Id``
:type agent_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commenting_agent_id_terms(self):
"""Clears the agent ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commenting_agent_id_terms = property(fdel=clear_commenting_agent_id_terms)
@abc.abstractmethod
def supports_commenting_agent_query(self):
"""Tests if an ``AgentQuery`` is available.
:return: ``true`` if an agent query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_commenting_agent_query(self):
"""Gets the query for an agent query.
Multiple retrievals produce a nested ``OR`` term.
:return: the agent query
:rtype: ``osid.authentication.AgentQuery``
:raise: ``Unimplemented`` -- ``supports_commenting_agent_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_commenting_agent_query()`` is ``true``.*
"""
return # osid.authentication.AgentQuery
commenting_agent_query = property(fget=get_commenting_agent_query)
@abc.abstractmethod
def clear_commenting_agent_terms(self):
"""Clears the agent terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commenting_agent_terms = property(fdel=clear_commenting_agent_terms)
@abc.abstractmethod
def match_text(self, text, string_match_type, match):
"""Matches text.
:param text: the text
:type text: ``string``
:param string_match_type: a string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``text`` is not of ``string_match_type``
:raise: ``NullArgument`` -- ``text`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_text(self, match):
"""Matches a comment that has any text assigned.
:param match: ``true`` to match comments with any text, ``false`` to match comments with no text
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_text_terms(self):
"""Clears the text terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
text_terms = property(fdel=clear_text_terms)
@abc.abstractmethod
def match_rating_id(self, grade_id, match):
"""Sets a grade ``Id``.
:param grade_id: a grade ``Id``
:type grade_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rating_id_terms(self):
"""Clears the rating ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rating_id_terms = property(fdel=clear_rating_id_terms)
@abc.abstractmethod
def supports_rating_query(self):
"""Tests if a ``GradeQuery`` is available.
:return: ``true`` if a rating query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_rating_query(self):
"""Gets the query for a rating query.
Multiple retrievals produce a nested ``OR`` term.
:return: the rating query
:rtype: ``osid.grading.GradeQuery``
:raise: ``Unimplemented`` -- ``supports_rating_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_rating_query()`` is ``true``.*
"""
return # osid.grading.GradeQuery
rating_query = property(fget=get_rating_query)
@abc.abstractmethod
def match_any_rating(self, match):
"""Matches books with any rating.
:param match: ``true`` to match comments with any rating, ``false`` to match comments with no ratings
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_rating_terms(self):
"""Clears the rating terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
rating_terms = property(fdel=clear_rating_terms)
@abc.abstractmethod
def match_book_id(self, book_id, match):
"""Sets the book ``Id`` for this query to match comments assigned to books.
:param book_id: a book ``Id``
:type book_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``book_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_book_id_terms(self):
"""Clears the book ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
book_id_terms = property(fdel=clear_book_id_terms)
@abc.abstractmethod
def supports_book_query(self):
"""Tests if a ``BookQuery`` is available.
:return: ``true`` if a book query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_book_query(self):
"""Gets the query for a book query.
Multiple retrievals produce a nested ``OR`` term.
:return: the book query
:rtype: ``osid.commenting.BookQuery``
:raise: ``Unimplemented`` -- ``supports_book_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_book_query()`` is ``true``.*
"""
return # osid.commenting.BookQuery
book_query = property(fget=get_book_query)
@abc.abstractmethod
def clear_book_terms(self):
"""Clears the book terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
book_terms = property(fdel=clear_book_terms)
@abc.abstractmethod
def get_comment_query_record(self, comment_record_type):
"""Gets the comment query record corresponding to the given ``Comment`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param comment_record_type: a comment record type
:type comment_record_type: ``osid.type.Type``
:return: the comment query record
:rtype: ``osid.commenting.records.CommentQueryRecord``
:raise: ``NullArgument`` -- ``comment_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentQueryRecord
class BookQuery:
"""This is the query for searching books.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_comment_id(self, comment_id, match):
"""Sets the comment ``Id`` for this query to match comments assigned to books.
:param comment_id: a comment ``Id``
:type comment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``comment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_comment_id_terms(self):
"""Clears the comment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
comment_id_terms = property(fdel=clear_comment_id_terms)
@abc.abstractmethod
def supports_comment_query(self):
"""Tests if a comment query is available.
:return: ``true`` if a comment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_comment_query(self):
"""Gets the query for a comment.
:return: the comment query
:rtype: ``osid.commenting.CommentQuery``
:raise: ``Unimplemented`` -- ``supports_comment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_query()`` is ``true``.*
"""
return # osid.commenting.CommentQuery
comment_query = property(fget=get_comment_query)
@abc.abstractmethod
def match_any_comment(self, match):
"""Matches books with any comment.
:param match: ``true`` to match books with any comment, ``false`` to match books with no comments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_comment_terms(self):
"""Clears the comment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
comment_terms = property(fdel=clear_comment_terms)
@abc.abstractmethod
def match_ancestor_book_id(self, book_id, match):
"""Sets the book ``Id`` for this query to match books that have the specified book as an ancestor.
:param book_id: a book ``Id``
:type book_id: ``osid.id.Id``
:param match: ``true`` for a positive match, a ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``book_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_book_id_terms(self):
"""Clears the ancestor book ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_book_id_terms = property(fdel=clear_ancestor_book_id_terms)
@abc.abstractmethod
def supports_ancestor_book_query(self):
"""Tests if a ``BookQuery`` is available.
:return: ``true`` if a book query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ancestor_book_query(self):
"""Gets the query for a book.
Multiple retrievals produce a nested ``OR`` term.
:return: the book query
:rtype: ``osid.commenting.BookQuery``
:raise: ``Unimplemented`` -- ``supports_ancestor_book_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_book_query()`` is ``true``.*
"""
return # osid.commenting.BookQuery
ancestor_book_query = property(fget=get_ancestor_book_query)
@abc.abstractmethod
def match_any_ancestor_book(self, match):
"""Matches books with any ancestor.
:param match: ``true`` to match books with any ancestor, ``false`` to match root books
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_book_terms(self):
"""Clears the ancestor book terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_book_terms = property(fdel=clear_ancestor_book_terms)
@abc.abstractmethod
def match_descendant_book_id(self, book_id, match):
"""Sets the book ``Id`` for this query to match books that have the specified book as a descendant.
:param book_id: a book ``Id``
:type book_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``book_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_book_id_terms(self):
"""Clears the descendant book ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_book_id_terms = property(fdel=clear_descendant_book_id_terms)
@abc.abstractmethod
def supports_descendant_book_query(self):
"""Tests if a ``BookQuery`` is available.
:return: ``true`` if a book query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_descendant_book_query(self):
"""Gets the query for a book.
Multiple retrievals produce a nested ``OR`` term.
:return: the book query
:rtype: ``osid.commenting.BookQuery``
:raise: ``Unimplemented`` -- ``supports_descendant_book_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_book_query()`` is ``true``.*
"""
return # osid.commenting.BookQuery
descendant_book_query = property(fget=get_descendant_book_query)
@abc.abstractmethod
def match_any_descendant_book(self, match):
"""Matches books with any descendant.
:param match: ``true`` to match books with any descendant, ``false`` to match leaf books
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_book_terms(self):
"""Clears the descendant book terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_book_terms = property(fdel=clear_descendant_book_terms)
@abc.abstractmethod
def get_book_query_record(self, book_record_type):
"""Gets the book query record corresponding to the given ``Book`` record ``Type``.
Multiple record retrievals produce a nested boolean ``OR`` term.
:param book_record_type: a book record type
:type book_record_type: ``osid.type.Type``
:return: the book query record
:rtype: ``osid.commenting.records.BookQueryRecord``
:raise: ``NullArgument`` -- ``book_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.BookQueryRecord
| mit |
RadioFreeAsia/RDacity | lib-src/lv2/sratom/waflib/Context.py | 177 | 8376 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,imp,sys
from waflib import Utils,Errors,Logs
import waflib.Node
HEXVERSION=0x1071000
WAFVERSION="1.7.16"
WAFREVISION="73c1705078f8c9c51a33e20f221a309d5a94b5e1"
ABI=98
DBFILE='.wafpickle-%s-%d-%d'%(sys.platform,sys.hexversion,ABI)
APPNAME='APPNAME'
VERSION='VERSION'
TOP='top'
OUT='out'
WSCRIPT_FILE='wscript'
launch_dir=''
run_dir=''
top_dir=''
out_dir=''
waf_dir=''
local_repo=''
remote_repo='http://waf.googlecode.com/git/'
remote_locs=['waflib/extras','waflib/Tools']
g_module=None
STDOUT=1
STDERR=-1
BOTH=0
classes=[]
def create_context(cmd_name,*k,**kw):
global classes
for x in classes:
if x.cmd==cmd_name:
return x(*k,**kw)
ctx=Context(*k,**kw)
ctx.fun=cmd_name
return ctx
class store_context(type):
def __init__(cls,name,bases,dict):
super(store_context,cls).__init__(name,bases,dict)
name=cls.__name__
if name=='ctx'or name=='Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)'%name)
if not getattr(cls,'fun',None):
cls.fun=cls.cmd
global classes
classes.insert(0,cls)
ctx=store_context('ctx',(object,),{})
class Context(ctx):
errors=Errors
tools={}
def __init__(self,**kw):
try:
rd=kw['run_dir']
except KeyError:
global run_dir
rd=run_dir
self.node_class=type("Nod3",(waflib.Node.Node,),{})
self.node_class.__module__="waflib.Node"
self.node_class.ctx=self
self.root=self.node_class('',None)
self.cur_script=None
self.path=self.root.find_dir(rd)
self.stack_path=[]
self.exec_dict={'ctx':self,'conf':self,'bld':self,'opt':self}
self.logger=None
def __hash__(self):
return id(self)
def load(self,tool_list,*k,**kw):
tools=Utils.to_list(tool_list)
path=Utils.to_list(kw.get('tooldir',''))
for t in tools:
module=load_tool(t,path)
fun=getattr(module,kw.get('name',self.fun),None)
if fun:
fun(self)
def execute(self):
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self,node):
self.stack_path.append(self.cur_script)
self.cur_script=node
self.path=node.parent
def post_recurse(self,node):
self.cur_script=self.stack_path.pop()
if self.cur_script:
self.path=self.cur_script.parent
def recurse(self,dirs,name=None,mandatory=True,once=True):
try:
cache=self.recurse_cache
except AttributeError:
cache=self.recurse_cache={}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
d=os.path.join(self.path.abspath(),d)
WSCRIPT=os.path.join(d,WSCRIPT_FILE)
WSCRIPT_FUN=WSCRIPT+'_'+(name or self.fun)
node=self.root.find_node(WSCRIPT_FUN)
if node and(not once or node not in cache):
cache[node]=True
self.pre_recurse(node)
try:
function_code=node.read('rU')
exec(compile(function_code,node.abspath(),'exec'),self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node=self.root.find_node(WSCRIPT)
tup=(node,name or self.fun)
if node and(not once or tup not in cache):
cache[tup]=True
self.pre_recurse(node)
try:
wscript_module=load_module(node.abspath())
user_function=getattr(wscript_module,(name or self.fun),None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s'%(name or self.fun,node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s'%d)
def exec_command(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
Logs.debug('runner_env: kw=%s'%kw)
if self.logger:
self.logger.info(cmd)
if'stdout'not in kw:
kw['stdout']=subprocess.PIPE
if'stderr'not in kw:
kw['stderr']=subprocess.PIPE
try:
if kw['stdout']or kw['stderr']:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
ret=p.returncode
else:
out,err=(None,None)
ret=subprocess.Popen(cmd,**kw).wait()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if out:
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.debug('out: %s'%out)
else:
sys.stdout.write(out)
if err:
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.error('err: %s'%err)
else:
sys.stderr.write(err)
return ret
def cmd_and_log(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
if'quiet'in kw:
quiet=kw['quiet']
del kw['quiet']
else:
quiet=None
if'output'in kw:
to_ret=kw['output']
del kw['output']
else:
to_ret=STDOUT
kw['stdout']=kw['stderr']=subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if out and quiet!=STDOUT and quiet!=BOTH:
self.to_log('out: %s'%out)
if err and quiet!=STDERR and quiet!=BOTH:
self.to_log('err: %s'%err)
if p.returncode:
e=Errors.WafError('Command %r returned %r'%(cmd,p.returncode))
e.returncode=p.returncode
e.stderr=err
e.stdout=out
raise e
if to_ret==BOTH:
return(out,err)
elif to_ret==STDERR:
return err
return out
def fatal(self,msg,ex=None):
if self.logger:
self.logger.info('from %s: %s'%(self.path.abspath(),msg))
try:
msg='%s\n(complete log in %s)'%(msg,self.logger.handlers[0].baseFilename)
except Exception:
pass
raise self.errors.ConfigurationError(msg,ex=ex)
def to_log(self,msg):
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self,msg,result,color=None):
self.start_msg(msg)
if not isinstance(color,str):
color=result and'GREEN'or'YELLOW'
self.end_msg(result,color)
def start_msg(self,msg):
try:
if self.in_msg:
self.in_msg+=1
return
except AttributeError:
self.in_msg=0
self.in_msg+=1
try:
self.line_just=max(self.line_just,len(msg))
except AttributeError:
self.line_just=max(40,len(msg))
for x in(self.line_just*'-',msg):
self.to_log(x)
Logs.pprint('NORMAL',"%s :"%msg.ljust(self.line_just),sep='')
def end_msg(self,result,color=None):
self.in_msg-=1
if self.in_msg:
return
defcolor='GREEN'
if result==True:
msg='ok'
elif result==False:
msg='not found'
defcolor='YELLOW'
else:
msg=str(result)
self.to_log(msg)
Logs.pprint(color or defcolor,msg)
def load_special_tools(self,var,ban=[]):
global waf_dir
lst=self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py',''))
cache_modules={}
def load_module(path):
try:
return cache_modules[path]
except KeyError:
pass
module=imp.new_module(WSCRIPT_FILE)
try:
code=Utils.readf(path,m='rU')
except(IOError,OSError):
raise Errors.WafError('Could not read the file %r'%path)
module_dir=os.path.dirname(path)
sys.path.insert(0,module_dir)
exec(compile(code,path,'exec'),module.__dict__)
sys.path.remove(module_dir)
cache_modules[path]=module
return module
def load_tool(tool,tooldir=None):
if tool=='java':
tool='javaw'
elif tool=='compiler_cc':
tool='compiler_c'
else:
tool=tool.replace('++','xx')
if tooldir:
assert isinstance(tooldir,list)
sys.path=tooldir+sys.path
try:
__import__(tool)
ret=sys.modules[tool]
Context.tools[tool]=ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir,'waflib','extras',tool+'.py'))
except OSError:
try:
os.stat(os.path.join(waf_dir,'waflib','Tools',tool+'.py'))
except OSError:
d=tool
else:
d='waflib.Tools.%s'%tool
else:
d='waflib.extras.%s'%tool
__import__(d)
ret=sys.modules[d]
Context.tools[tool]=ret
return ret
| gpl-2.0 |
softak/webfaction_demo | apps/profiles/resources.py | 1 | 1364 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from tastypie import fields, http
from tastypie.authorization import Authorization
from friends.models import Friendship
from utils.tastypie_ import ModelResource
class UserResource(ModelResource):
name = fields.CharField()
avatar_small = fields.FileField(attribute='profile__avatar_small')
avatar_xsmall = fields.FileField(attribute='profile__avatar_xsmall')
is_friend = fields.BooleanField()
friendship_request_url = fields.CharField()
site_url = fields.CharField()
def dehydrate_friendship_request_url(self, bundle):
return reverse('friends.friendship_request', args=[bundle.obj.id])
def dehydrate_site_url(self, bundle):
return bundle.obj.profile.get_absolute_url()
def dehydrate_is_friend(self, bundle):
if bundle.request.user.is_authenticated():
return Friendship.objects.are_friends(bundle.obj, bundle.request.user)
else:
return False
def dehydrate_name(self, bundle):
return bundle.obj.get_full_name()
class Meta:
resource_name = 'user'
queryset = User.objects.all()
list_allowed_methods = []
detail_allowed_methods = ['get']
authorization = Authorization()
fields = ('name', 'avatar',)
| bsd-3-clause |
402231466/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/_weakrefset.py | 766 | 5570 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| gpl-3.0 |
UniversalMasterEgg8679/ansible | lib/ansible/utils/helpers.py | 34 | 1270 | # (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
'''
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
'''
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%",""))
return int((value_pct/100.0) * num_items) or min_value
else:
return int(value)
| gpl-3.0 |
garbled1/ansible | contrib/inventory/nagios_ndo.py | 74 | 3843 | #!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
try:
import configparser
except ImportError:
import ConfigParser
configparser = ConfigParser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
exit(1)
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
print("usage: --list or --host HOSTNAME")
exit(1)
else:
print("Error: Database configuration is missing. See nagios_ndo.ini.")
exit(1)
NagiosNDOInventory()
| gpl-3.0 |
hkawasaki/kawasaki-aio8-2 | common/djangoapps/util/tests/test_memcache.py | 101 | 3655 | """
Tests for memcache in util app
"""
from django.test import TestCase
from django.core.cache import get_cache
from util.memcache import safe_key
class MemcacheTest(TestCase):
"""
Test memcache key cleanup
"""
# Test whitespace, control characters, and some non-ASCII UTF-16
UNICODE_CHAR_CODES = ([c for c in range(0, 30)] + [127] +
[129, 500, 2 ** 8 - 1, 2 ** 8 + 1, 2 ** 16 - 1])
def setUp(self):
self.cache = get_cache('default')
def test_safe_key(self):
key = safe_key('test', 'prefix', 'version')
self.assertEqual(key, 'prefix:version:test')
def test_numeric_inputs(self):
# Numeric key
self.assertEqual(safe_key(1, 'prefix', 'version'), 'prefix:version:1')
# Numeric prefix
self.assertEqual(safe_key('test', 5, 'version'), '5:version:test')
# Numeric version
self.assertEqual(safe_key('test', 'prefix', 5), 'prefix:5:test')
def test_safe_key_long(self):
# Choose lengths close to memcached's cutoff (250)
for length in [248, 249, 250, 251, 252]:
# Generate a key of that length
key = 'a' * length
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for key length {0}".format(length))
def test_long_key_prefix_version(self):
# Long key
key = safe_key('a' * 300, 'prefix', 'version')
self.assertTrue(self._is_valid_key(key))
# Long prefix
key = safe_key('key', 'a' * 300, 'version')
self.assertTrue(self._is_valid_key(key))
# Long version
key = safe_key('key', 'prefix', 'a' * 300)
self.assertTrue(self._is_valid_key(key))
def test_safe_key_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a key with that character
key = unichr(unicode_char)
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_prefix_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a prefix with that character
prefix = unichr(unicode_char)
# Make the key safe
key = safe_key('test', prefix, '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_version_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a version with that character
version = unichr(unicode_char)
# Make the key safe
key = safe_key('test', '', version)
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def _is_valid_key(self, key):
"""
Test that a key is memcache-compatible.
Based on Django's validator in core.cache.backends.base
"""
# Check the length
if len(key) > 250:
return False
# Check that there are no spaces or control characters
for char in key:
if ord(char) < 33 or ord(char) == 127:
return False
return True
| agpl-3.0 |
dpiers/coderang-meteor | public/jsrepl/extern/python/reloop-closured/lib/python2.7/logging/config.py | 76 | 34326 | # Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, os, traceback, re
import types, cStringIO
try:
import thread
import threading
except ImportError:
thread = None
from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import ConfigParser
cp = ConfigParser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.readfp(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _encoded(s):
return s if isinstance(s, str) else s.encode('utf-8')
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp.get("formatters", "keys")
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
c = logging.Formatter
if "class" in opts:
class_name = cp.get(sectname, "class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp.get("handlers", "keys")
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = cp.get(sectname, "args")
args = eval(args, vars(logging))
h = klass(*args)
if "level" in opts:
level = cp.get(sectname, "level")
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
if "target" in opts:
target = cp.get(sectname,"target")
else:
target = ""
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _install_loggers(cp, handlers, disable_existing_loggers):
"""Create and install loggers"""
# configure the root first
llist = cp.get("loggers", "keys")
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
sectname = "logger_root"
root = logging.root
log = root
opts = cp.options(sectname)
if "level" in opts:
level = cp.get(sectname, "level")
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort(key=_encoded)
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
sectname = "logger_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in opts:
level = cp.get(sectname, "level")
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = 1
elif disable_existing_loggers:
logger.disabled = 1
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort(key=_encoded)
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = cStringIO.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error, e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| mit |
RobertoMalatesta/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/updatebase.py | 143 | 1902 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.api import users
from google.appengine.ext import webapp, db
class UpdateBase(webapp.RequestHandler):
def _int_from_request(self, name):
string_value = self.request.get(name)
try:
int_value = int(string_value)
return int_value
except ValueError, TypeError:
pass
return None
| bsd-3-clause |
nachandr/cfme_tests | cfme/tests/services/test_pxe_service_catalogs.py | 2 | 5962 | import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config
from cfme.infrastructure.pxe import get_template_from_config
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils import testgen
from cfme.utils.blockers import BZ
from cfme.utils.conf import cfme_data
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
test_requirements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
])
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(list(zip(argnames, argvalue_tuple)))
if args['provider'].type == "scvmm":
continue
pxe_server_name = args['provider'].data['provisioning']['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = args['provider'].data['provisioning']['pxe_kickstart']
if pxe_cust_template not in list(cfme_data.get('customization_templates', {}).keys()):
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.fixture(scope="function")
def catalog_item(appliance, provider, dialog, catalog, provisioning,
setup_pxe_servers_vm_prov):
# generate_tests makes sure these have values
pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\
pxe_image_type, pxe_vlan = list(map(
provisioning.get, (
'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
))
provisioning_data = {
'catalog': {'catalog_name': {'name': pxe_template, 'provider': provider.name},
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image},
'vm_name': random_vm_name('pxe_service')},
'environment': {'datastore_name': {'name': datastore},
'host_name': {'name': host}},
'customize': {'root_password': pxe_root_password,
'custom_template': {'name': pxe_kickstart}},
'network': {'vlan': partial_match(pxe_vlan)},
}
item_name = fauxfactory.gen_alphanumeric(15, start="cat_item_")
return appliance.collections.catalog_items.create(
provider.catalog_item_type,
name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, prov_data=provisioning_data)
@pytest.mark.meta(blockers=[BZ(1633516, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
@pytest.mark.usefixtures('setup_pxe_servers_vm_prov')
def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV PXE service catalog
Metadata:
test_flag: pxe, provision
Polarion:
assignee: nansari
casecomponent: Services
initialEstimate: 1/4h
"""
vm_name = catalog_item.prov_data['catalog']["vm_name"]
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(
f"{vm_name}0001", provider).cleanup_on_provider()
)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request(num_sec=3600)
msg = f"Provisioning failed with the message {provision_request.rest.message}"
assert provision_request.is_succeeded(), msg
| gpl-2.0 |
moijes12/oh-mainline | vendor/packages/requests/requests/packages/chardet/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| agpl-3.0 |
MarkWh1te/xueqiu_predict | p3_env/lib/python3.5/site-packages/sqlalchemy/engine/util.py | 55 | 2338 | # engine/util.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util
def connection_memoize(key):
"""Decorator, memoize a function in a connection.info stash.
Only applicable to functions which take no arguments other than a
connection. The memo will be stored in ``connection.info[key]``.
"""
@util.decorator
def decorated(fn, self, connection):
connection = connection.connect()
try:
return connection.info[key]
except KeyError:
connection.info[key] = val = fn(self, connection)
return val
return decorated
def py_fallback():
def _distill_params(multiparams, params):
"""Given arguments from the calling form *multiparams, **params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if not zero or hasattr(zero[0], '__iter__') and \
not hasattr(zero[0], 'strip'):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, 'keys'):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if hasattr(multiparams[0], '__iter__') and \
not hasattr(multiparams[0], 'strip'):
return multiparams
else:
return [multiparams]
return locals()
try:
from sqlalchemy.cutils import _distill_params
except ImportError:
globals().update(py_fallback())
| mit |
FCP-INDI/nipype | nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py | 12 | 1767 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ......testing import assert_equal
from ..fiberprocess import fiberprocess
def test_fiberprocess_inputs():
input_map = dict(args=dict(argstr='%s',
),
displacement_field=dict(argstr='--displacement_field %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fiber_file=dict(argstr='--fiber_file %s',
),
fiber_output=dict(argstr='--fiber_output %s',
hash_files=False,
),
fiber_radius=dict(argstr='--fiber_radius %f',
),
h_field=dict(argstr='--h_field %s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
index_space=dict(argstr='--index_space ',
),
noDataChange=dict(argstr='--noDataChange ',
),
no_warp=dict(argstr='--no_warp ',
),
saveProperties=dict(argstr='--saveProperties ',
),
tensor_volume=dict(argstr='--tensor_volume %s',
),
terminal_output=dict(nohash=True,
),
verbose=dict(argstr='--verbose ',
),
voxel_label=dict(argstr='--voxel_label %d',
),
voxelize=dict(argstr='--voxelize %s',
hash_files=False,
),
voxelize_count_fibers=dict(argstr='--voxelize_count_fibers ',
),
)
inputs = fiberprocess.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_fiberprocess_outputs():
output_map = dict(fiber_output=dict(),
voxelize=dict(),
)
outputs = fiberprocess.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
Yajo/website | website_logo/models/website.py | 16 | 1221 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Website(models.Model):
_inherit = 'website'
logo = fields.Binary(
string="Website logo",
help="This field holds the logo for this website, showed in header. "
"Recommended size is 180x50")
| agpl-3.0 |
zx8/youtube-dl | youtube_dl/extractor/lifenews.py | 6 | 6729 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
int_or_none,
unified_strdate,
ExtractorError,
)
class LifeNewsIE(InfoExtractor):
IE_NAME = 'lifenews'
IE_DESC = 'LIFE | NEWS'
_VALID_URL = r'http://lifenews\.ru/(?:mobile/)?(?P<section>news|video)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://lifenews.ru/news/126342',
'md5': 'e1b50a5c5fb98a6a544250f2e0db570a',
'info_dict': {
'id': '126342',
'ext': 'mp4',
'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
'thumbnail': 're:http://.*\.jpg',
'upload_date': '20140130',
}
}, {
# video in <iframe>
'url': 'http://lifenews.ru/news/152125',
'md5': '77d19a6f0886cd76bdbf44b4d971a273',
'info_dict': {
'id': '152125',
'ext': 'mp4',
'title': 'В Сети появилось видео захвата «Правым сектором» колхозных полей ',
'description': 'Жители двух поселков Днепропетровской области не простили радикалам угрозу лишения плодородных земель и пошли в лобовую. ',
'upload_date': '20150402',
'uploader': 'embed.life.ru',
}
}, {
'url': 'http://lifenews.ru/news/153461',
'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795',
'info_dict': {
'id': '153461',
'ext': 'mp4',
'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве',
'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.',
'upload_date': '20150505',
'uploader': 'embed.life.ru',
}
}, {
'url': 'http://lifenews.ru/video/13035',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
section = mobj.group('section')
webpage = self._download_webpage(
'http://lifenews.ru/%s/%s' % (section, video_id),
video_id, 'Downloading page')
videos = re.findall(r'<video.*?poster="(?P<poster>[^"]+)".*?src="(?P<video>[^"]+)".*?></video>', webpage)
iframe_link = self._html_search_regex(
'<iframe[^>]+src=["\']([^"\']+)["\']', webpage, 'iframe link', default=None)
if not videos and not iframe_link:
raise ExtractorError('No media links available for %s' % video_id)
title = self._og_search_title(webpage)
TITLE_SUFFIX = ' - Первый по срочным новостям — LIFE | NEWS'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
description = self._og_search_description(webpage)
view_count = self._html_search_regex(
r'<div class=\'views\'>\s*(\d+)\s*</div>', webpage, 'view count', fatal=False)
comment_count = self._html_search_regex(
r'<div class=\'comments\'>\s*<span class=\'counter\'>\s*(\d+)\s*</span>', webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex(
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
common_info = {
'description': description,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'upload_date': upload_date,
}
def make_entry(video_id, media, video_number=None):
cur_info = dict(common_info)
cur_info.update({
'id': video_id,
'url': media[1],
'thumbnail': media[0],
'title': title if video_number is None else '%s-video%s' % (title, video_number),
})
return cur_info
if iframe_link:
iframe_link = self._proto_relative_url(iframe_link, 'http:')
cur_info = dict(common_info)
cur_info.update({
'_type': 'url_transparent',
'id': video_id,
'title': title,
'url': iframe_link,
})
return cur_info
if len(videos) == 1:
return make_entry(video_id, videos[0])
else:
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
class LifeEmbedIE(InfoExtractor):
IE_NAME = 'life:embed'
_VALID_URL = r'http://embed\.life\.ru/embed/(?P<id>[\da-f]{32})'
_TEST = {
'url': 'http://embed.life.ru/embed/e50c2dec2867350528e2574c899b8291',
'md5': 'b889715c9e49cb1981281d0e5458fbbe',
'info_dict': {
'id': 'e50c2dec2867350528e2574c899b8291',
'ext': 'mp4',
'title': 'e50c2dec2867350528e2574c899b8291',
'thumbnail': 're:http://.*\.jpg',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for video_url in re.findall(r'"file"\s*:\s*"([^"]+)', webpage):
video_url = compat_urlparse.urljoin(url, video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='m3u8'))
else:
formats.append({
'url': video_url,
'format_id': ext,
'preference': 1,
})
self._sort_formats(formats)
thumbnail = self._search_regex(
r'"image"\s*:\s*"([^"]+)', webpage, 'thumbnail', default=None)
return {
'id': video_id,
'title': video_id,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
jessefeinman/FintechHackathon | venv/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py | 505 | 1421 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| bsd-2-clause |
fusionpig/ansible | lib/ansible/plugins/connections/libvirt_lxc.py | 140 | 5234 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import subprocess
from ansible import errors
from ansible.callbacks import vvv
import ansible.constants as C
class Connection(object):
''' Local lxc based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def _check_domain(self, domain):
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
def __init__(self, runner, host, port, *args, **kwargs):
self.lxc = host
self.cmd = self._search_executable('virsh')
self._check_domain(host)
self.runner = runner
self.host = host
# port is unused, since this is local
self.port = port
self.become_methods_supported=C.BECOME_METHODS
def connect(self, port=None):
''' connect to the lxc; nothing to do here '''
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
else:
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
return local_cmd
def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We ignore privilege escalation!
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
out_path = self._normalize_path(out_path, '/')
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(open(in_path,'rb').read())
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
in_path = self._normalize_path(in_path, '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
open(out_path,'wb').write(stdout)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
llautert/psychoPYTHON | models.py | 1 | 9417 | from time import time
from sklearn import *
import matplotlib as mpl
mpl.use('Agg')
from utils import plot_learning_curve
from keras.losses import *
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from multiprocessing import Pool
def deep_1net(X, y):
# split data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.33, random_state=42)
# transform to_categorical data
y_test = to_categorical(y_test)
y_train = to_categorical(y_train)
model = Sequential()
model.add(Dense(units=3, input_dim=2, kernel_initializer='normal', activation='relu'))
model.compile(loss=mean_absolute_error,
optimizer=SGD(lr=0.01),
metrics=['accuracy', 'mae'])
model.fit(X_train, y_train, epochs=10, batch_size=30, verbose=1, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=1)
model.save("deep1net.h5")
print('\nTest loss:', score[0])
print('Test accuracy:', score[1])
print('Test mae:', score[2])
def select_models(X_train, type_pred, is_labeled_data, is_text_data, is_number_categories_known,
is_few_important_features, is_just_looking):
names = []
models = []
if type_pred == 'category':
if is_labeled_data:
# classification
names.append("SVM")
models.append(svm.SVC())
if is_text_data:
names += ["GaussianNB",
"MultinomialNB",
"BernoulliNB"]
models += [naive_bayes.GaussianNB(),
naive_bayes.MultinomialNB(),
naive_bayes.BernoulliNB()]
else:
names += ["KNeighborsClassifier",
"LinearSVMClassifier",
"AdaboostClassifier",
"BaggingClassifier",
"ExtraTreesClassifier",
"GradientBoostingClassifier",
"RandomForestClassifier",
"SGDClassifier",
"AdditiveChi2Sampler",
"Nystroem",
"RBFSampler",
"SkewedChi2Sampler",
"LogisticRegression"]
models += [neighbors.KNeighborsClassifier(),
svm.LinearSVC(max_iter=10),
ensemble.AdaBoostClassifier(),
ensemble.BaggingClassifier(),
ensemble.ExtraTreesClassifier(),
ensemble.GradientBoostingClassifier(),
ensemble.RandomForestClassifier(),
linear_model.SGDClassifier(),
kernel_approximation.AdditiveChi2Sampler(),
kernel_approximation.Nystroem(),
kernel_approximation.RBFSampler(),
kernel_approximation.SkewedChi2Sampler(),
linear_model.LogisticRegression()]
elif is_number_categories_known:
# clustering
names += ["KMeans",
"MiniBatchKMeans",
"GMM"]
models += [cluster.KMeans(),
mixture.GMM(),
cluster.MiniBatchKMeans()]
else:
names += ["MeanShift",
"VBGMM"]
models += [cluster.MeanShift(),
mixture.VBGMM()]
elif type_pred == "quantity":
# regression
# names.append("SGDRegressor")
# models.append(linear_model.SGDRegressor())
if is_few_important_features:
names += ["Lasso",
"ElasticNet"]
models += [linear_model.Lasso(),
linear_model.ElasticNet()]
else:
names += ["Ridge",
"LinearSVMRegressor",
"RBFSVMRegressor",
"AdaboostRegressor",
"BaggingRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"RandomForestRegressor"]
models += [linear_model.Ridge(),
svm.LinearSVR(max_iter=10),
svm.SVR(kernel='rbf', max_iter=10),
ensemble.AdaBoostRegressor(n_estimators=10),
ensemble.BaggingRegressor(n_jobs=-1),
ensemble.ExtraTreesRegressor(n_jobs=-1),
ensemble.GradientBoostingRegressor(n_estimators=10),
ensemble.RandomForestRegressor(n_jobs=-1)]
elif is_just_looking:
# dimensional reduction
names.append("RandomizedPCA")
models.append(decomposition.RandomizedPCA())
names += ["Isomap",
"SpectalEmbedding",
"LocallyLinearEmbedding",
"AdditiveChi2Sampler",
"Nystroem",
"RBFSampler",
"SkewedChi2Sampler"]
models += [manifold.Isomap(),
manifold.SpectalEmbedding(),
manifold.LocallyLinearEmbedding(),
kernel_approximation.AdditiveChi2Sampler(),
kernel_approximation.Nystroem(),
kernel_approximation.RBFSampler(),
kernel_approximation.SkewedChi2Sampler()]
else:
print("tough luck")
return names, models
def train_test_model(args):
name, m1, X, y, X_train, y_train, X_test, y_test, type_pred, is_number_categories_known, is_labeled_data = args
print(m1)
if not is_number_categories_known:
plot_learning_curve(m1, X, y, name, ylim=(0.7, 1.01))
# with parallel_backend('distributed', scheduler_host='localhost:8786', scatter=[X_train, Y_train]):
m1.fit(X_train, y_train)
externals.joblib.dump(m1, "%s.pkl" % name, compress=9)
y_pred = m1.predict(X_test)
results = open('out_%s' % (name), 'w')
if type_pred == 'category' and is_labeled_data:
# classification metrics
results.write("Accuracy Score: %.2f\n" % (metrics.accuracy_score(y_test, y_pred)))
results.write("F1 Score: %.2f\n" % (metrics.f1_score(y_test, y_pred, average="weighted")))
results.write("Precision Score: %.2f\n" % (metrics.precision_score(y_test, y_pred, average="weighted")))
results.write("Recall Score: %.2f\n" % (metrics.recall_score(y_test, y_pred, average="weighted")))
elif type_pred == 'category' and is_number_categories_known:
# clusterization
results.write("Completeness Score: %.2f\n" % (metrics.completeness_score(y_test, y_pred)))
results.write("Homogeneity Score: %.2f\n" % (metrics.homogeneity_score(y_test, y_pred)))
results.write("V-Measure Score: %.2f\n" % (metrics.v_measure_score(y_test, y_pred)))
results.write(
"Adjusted Mutual Information Score: %.2f\n" % (metrics.adjusted_mutual_info_score(y_test, y_pred)))
results.write("Fowlkes-Mallows index (FMI): %.2f\n" % (metrics.fowlkes_mallows_score(y_test, y_pred)))
elif type_pred == 'quantity':
# regression
results.write("R2 Score: %.2f\n" % (metrics.r2_score(y_test, y_pred)))
results.write("Explained Variance Score: %.2f\n" % (metrics.explained_variance_score(y_test, y_pred)))
results.write("Mean Absolute Error: %.2f\n" % (metrics.mean_absolute_error(y_test, y_pred)))
results.write("Mean Squared Error: %.2f\n" % (metrics.mean_squared_error(y_test, y_pred)))
results.write("Median Absolute Error: %.2f\n" % (metrics.median_absolute_error(y_test, y_pred)))
results.close()
def autoscikit(X, y, type_pred='category', is_labeled_data=False, is_text_data=False, is_number_categories_known=False,
is_few_important_features=False, is_just_looking=False):
start_time = time()
models = []
names = []
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.33, random_state=42)
if X_train.shape[0] <= 50:
print("Error few data")
return
names, models = select_models(X_train, type_pred, is_labeled_data, is_text_data, is_number_categories_known,
is_few_important_features, is_just_looking)
pool = Pool(processes=4)
# paralelizando modelos
# register_parallel_backend('distributed', DistributedBackend)
sequence = []
# cross validation
externals.joblib.dump((X, y, X_train, y_train, X_test, y_test), "dataset.pkl", compress=9)
for name, m1 in zip(names, models):
sequence.append([name, m1, X, y, X_train, y_train, X_test, y_test, type_pred, is_number_categories_known,
is_labeled_data])
pool.map(train_test_model, sequence)
pool.close()
pool.join()
end_time = time() - start_time
print("Total elapsed time: %.2f seconds" % end_time)
| mit |
zstackio/zstack-woodpecker | integrationtest/vm/simulator/iam2/test_iam2_no_delete_admin_query.py | 1 | 15753 | '''
cover ZSTAC-22633
1.create nodelete vid
2.create,query,delete test
@author: zhaohao.chen
'''
import os
import zstackwoodpecker.test_util as test_util
import apibinding.inventory as inventory
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.zone_operations as zone_ops
import zstackwoodpecker.test_lib as test_lib
import time
import hashlib
virtual_id_uuid = None
zone_uuid = None
test_stub = test_lib.lib_get_test_stub()
statements = [{"name":"test","effect":"Deny","actions":["org.zstack.v2v.APIDeleteV2VConversionHostMsg","org.zstack.scheduler.APIRemoveSchedulerJobFromSchedulerTriggerMsg","org.zstack.header.hybrid.account.APIDeleteHybridKeySecretMsg","org.zstack.iam2.api.APIRemoveIAM2VirtualIDsFromOrganizationMsg","org.zstack.header.longjob.APIDeleteLongJobMsg","org.zstack.header.cloudformation.APIDeleteResourceStackMsg","org.zstack.monitoring.APIDetachMonitorTriggerActionFromTriggerMsg","org.zstack.iam2.api.APIDeleteIAM2ProjectTemplateMsg","org.zstack.header.cloudformation.APIDeleteStackTemplateMsg","org.zstack.header.aliyun.storage.disk.APIDeleteAliyunDiskFromLocalMsg","org.zstack.header.identity.APILogOutMsg","org.zstack.autoscaling.group.APIDeleteAutoScalingGroupMsg","org.zstack.header.aliyun.network.vpc.APIDeleteEcsVSwitchRemoteMsg","org.zstack.network.securitygroup.APIDeleteSecurityGroupMsg","org.zstack.iam2.api.APIRemoveAttributesFromIAM2VirtualIDGroupMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcVpnGatewayLocalMsg","org.zstack.header.storage.primary.APIDetachPrimaryStorageFromClusterMsg","org.zstack.header.hybrid.backup.APIDeleteBackupFileInPublicMsg","org.zstack.header.storage.database.backup.APIDeleteExportedDatabaseBackupFromBackupStorageMsg","org.zstack.iam2.api.APIRemoveRolesFromIAM2VirtualIDGroupMsg","org.zstack.header.aliyun.network.group.APIDeleteEcsSecurityGroupRuleRemoteMsg","org.zstack.header.vm.APIDeleteVmNicMsg","org.zstack.header.affinitygroup.APIDeleteAffinityGroupMsg","org.zstack.header.zone.APIDeleteZoneMsg","org.zstack.sns.APIDeleteSNSApplicationPlatformMsg","org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromRemoteMsg","org.zstack.vrouterRoute.APIDeleteVRouterRouteTableMsg","org.zstack.pciDevice.APIDeletePciDeviceMsg","org.zstack.header.network.l3.APIRemoveDnsFromL3NetworkMsg","org.zstack.storage.ceph.backup.APIRemoveMonFromCephBackupStorageMsg","org.zstack.header.image.APIExpungeImageMsg","org.zstack.header.network.l3.APIDeleteL3NetworkMsg","org.zstack.header.aliyun.network.connection.APIDeleteAliyunRouterInterfaceLocalMsg","org.zstack.network.service.lb.APIRemoveCertificateFromLoadBalancerListenerMsg","org.zstack.nas.APIDeleteNasMountTargetMsg","org.zstack.nas.APIDeleteNasFileSystemMsg","org.zstack.header.storage.primary.APIDeletePrimaryStorageMsg","org.zstack.zwatch.alarm.APIRemoveLabelFromEventSubscriptionMsg","org.zstack.ticket.iam2.api.APIDeleteIAM2TicketFlowMsg","org.zstack.header.identity.APIDeletePolicyMsg","org.zstack.network.service.portforwarding.APIDetachPortForwardingRuleMsg","org.zstack.header.host.APIDeleteHostMsg","org.zstack.header.affinitygroup.APIRemoveVmFromAffinityGroupMsg","org.zstack.header.baremetal.preconfiguration.APIDeletePreconfigurationTemplateMsg","org.zstack.iam2.api.APIRemoveAttributesFromIAM2VirtualIDMsg","org.zstack.sns.APIDeleteSNSApplicationEndpointMsg","org.zstack.header.aliyun.network.connection.APIDeleteVirtualBorderRouterLocalMsg","org.zstack.monitoring.media.APIDeleteMediaMsg","org.zstack.aliyun.pangu.APIDeleteAliyunPanguPartitionMsg","org.zstack.header.aliyun.ecs.APIDeleteEcsInstanceMsg","org.zstack.scheduler.APIDeleteSchedulerTriggerMsg","org.zstack.scheduler.APIRemoveSchedulerJobsFromSchedulerJobGroupMsg","org.zstack.zwatch.alarm.APIUnsubscribeEventMsg","org.zstack.header.identity.role.api.APIDetachRoleFromAccountMsg","org.zstack.zwatch.alarm.APIRemoveActionFromAlarmMsg","org.zstack.ldap.APIDeleteLdapBindingMsg","org.zstack.header.daho.process.APIDeleteDahoDataCenterConnectionMsg","org.zstack.monitoring.APIDeleteAlertMsg","org.zstack.header.configuration.APIDeleteInstanceOfferingMsg","org.zstack.storage.ceph.primary.APIRemoveMonFromCephPrimaryStorageMsg","org.zstack.ipsec.APIRemoveRemoteCidrsFromIPsecConnectionMsg","org.zstack.header.network.l3.APIRemoveHostRouteFromL3NetworkMsg","org.zstack.header.image.APIDeleteImageMsg","org.zstack.header.hybrid.network.eip.APIDeleteHybridEipRemoteMsg","org.zstack.header.vm.APIDeleteVmSshKeyMsg","org.zstack.header.identity.APIDetachPolicyFromUserMsg","org.zstack.iam2.api.APIRemoveAttributesFromIAM2ProjectMsg","org.zstack.iam2.api.APIRemoveAttributesFromIAM2OrganizationMsg","org.zstack.header.network.l2.APIDetachL2NetworkFromClusterMsg","org.zstack.header.aliyun.oss.APIDeleteOssBucketFileRemoteMsg","org.zstack.iam2.api.APIRemoveIAM2VirtualIDsFromGroupMsg","org.zstack.header.aliyun.storage.snapshot.APIDeleteAliyunSnapshotFromLocalMsg","org.zstack.header.aliyun.ecs.APIDeleteAllEcsInstancesFromDataCenterMsg","org.zstack.header.aliyun.network.connection.APIDeleteAliyunRouterInterfaceRemoteMsg","org.zstack.sns.platform.dingtalk.APIRemoveSNSDingTalkAtPersonMsg","org.zstack.autoscaling.template.APIDetachAutoScalingTemplateFromGroupMsg","org.zstack.aliyun.nas.message.APIDeleteAliyunNasAccessGroupMsg","org.zstack.header.storage.snapshot.APIDeleteVolumeSnapshotMsg","org.zstack.header.volume.APIDetachDataVolumeFromVmMsg","org.zstack.ipsec.APIDeleteIPsecConnectionMsg","org.zstack.header.aliyun.oss.APIDeleteOssBucketRemoteMsg","org.zstack.header.network.l2.APIDeleteL2NetworkMsg","org.zstack.iam2.api.APIDeleteIAM2VirtualIDMsg","org.zstack.header.baremetal.instance.APIDestroyBaremetalInstanceMsg","org.zstack.header.vm.cdrom.APIDeleteVmCdRomMsg","org.zstack.header.aliyun.network.vrouter.APIDeleteVirtualRouterLocalMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcIkeConfigLocalMsg","org.zstack.header.aliyun.oss.APIDeleteOssBucketNameLocalMsg","org.zstack.header.vm.APIDeleteVmConsolePasswordMsg","org.zstack.header.storage.backup.APIDeleteBackupStorageMsg","org.zstack.iam2.api.APIExpungeIAM2ProjectMsg","org.zstack.vrouterRoute.APIDetachVRouterRouteTableFromVRouterMsg","org.zstack.ticket.api.APIDeleteTicketMsg","org.zstack.iam2.api.APIDeleteIAM2ProjectMsg","org.zstack.header.aliyun.network.vpc.APIDeleteEcsVSwitchInLocalMsg","org.zstack.zwatch.alarm.sns.APIDeleteSNSTextTemplateMsg","org.zstack.iam2.api.APIDeleteIAM2OrganizationMsg","org.zstack.header.tag.APIDeleteTagMsg","org.zstack.header.aliyun.network.group.APIDeleteEcsSecurityGroupInLocalMsg","org.zstack.network.securitygroup.APIDeleteSecurityGroupRuleMsg","org.zstack.storage.fusionstor.primary.APIRemoveMonFromFusionstorPrimaryStorageMsg","org.zstack.header.daho.process.APIDeleteDahoCloudConnectionMsg","org.zstack.header.identity.APIDeleteUserMsg","org.zstack.zwatch.alarm.APIRemoveActionFromEventSubscriptionMsg","org.zstack.ticket.api.APIDeleteTicketFlowCollectionMsg","org.zstack.network.service.lb.APIDeleteLoadBalancerListenerMsg","org.zstack.storage.fusionstor.backup.APIRemoveMonFromFusionstorBackupStorageMsg","org.zstack.header.identity.APIDetachPoliciesFromUserMsg","org.zstack.tag2.APIDetachTagFromResourcesMsg","org.zstack.header.identity.role.api.APIDetachPolicyFromRoleMsg","org.zstack.storage.ceph.primary.APIDeleteCephPrimaryStoragePoolMsg","org.zstack.header.aliyun.image.APIDeleteEcsImageLocalMsg","org.zstack.header.network.service.APIDetachNetworkServiceFromL3NetworkMsg","org.zstack.zwatch.alarm.APIRemoveLabelFromAlarmMsg","org.zstack.header.vm.APIDeleteVmBootModeMsg","org.zstack.billing.APIDeleteResourcePriceMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcVpnConnectionLocalMsg","org.zstack.header.aliyun.storage.disk.APIDeleteAliyunDiskFromRemoteMsg","org.zstack.header.identity.APIDetachPolicyFromUserGroupMsg","org.zstack.header.identityzone.APIDeleteIdentityZoneInLocalMsg","org.zstack.header.vm.APIDeleteVmHostnameMsg","org.zstack.core.config.resourceconfig.APIDeleteResourceConfigMsg","org.zstack.header.aliyun.storage.snapshot.APIGCAliyunSnapshotRemoteMsg","org.zstack.zwatch.api.APIDeleteMetricDataMsg","org.zstack.header.baremetal.pxeserver.APIDetachBaremetalPxeServerFromClusterMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcUserVpnGatewayRemoteMsg","org.zstack.header.identity.APIDeleteUserGroupMsg","org.zstack.header.vm.APIDetachIsoFromVmInstanceMsg","org.zstack.header.vm.APIDestroyVmInstanceMsg","org.zstack.network.securitygroup.APIDetachSecurityGroupFromL3NetworkMsg","org.zstack.autoscaling.group.rule.trigger.APIDeleteAutoScalingRuleTriggerMsg","org.zstack.scheduler.APIDeleteSchedulerJobGroupMsg","org.zstack.header.cluster.APIDeleteClusterMsg","org.zstack.zwatch.alarm.APIDeleteAlarmMsg","org.zstack.header.network.l3.APIDeleteIpRangeMsg","org.zstack.header.core.webhooks.APIDeleteWebhookMsg","org.zstack.network.service.lb.APIDeleteLoadBalancerMsg","org.zstack.autoscaling.group.rule.APIDeleteAutoScalingRuleMsg","org.zstack.header.storage.snapshot.APIBatchDeleteVolumeSnapshotMsg","org.zstack.header.vm.APIDeleteNicQosMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcVpnConnectionRemoteMsg","org.zstack.header.vipQos.APIDeleteVipQosMsg","org.zstack.monitoring.actions.APIDeleteMonitorTriggerActionMsg","org.zstack.header.baremetal.chassis.APIDeleteBaremetalChassisMsg","org.zstack.header.volume.APIExpungeDataVolumeMsg","org.zstack.header.identity.role.api.APIRemovePolicyStatementsFromRoleMsg","org.zstack.header.aliyun.network.connection.APIDeleteConnectionBetweenL3NetWorkAndAliyunVSwitchMsg","org.zstack.header.vm.APIExpungeVmInstanceMsg","org.zstack.vpc.APIRemoveDnsFromVpcRouterMsg","org.zstack.header.configuration.APIDeleteDiskOfferingMsg","org.zstack.header.protocol.APIRemoveVRouterNetworksFromOspfAreaMsg","org.zstack.scheduler.APIDeleteSchedulerJobMsg","org.zstack.network.service.lb.APIDeleteCertificateMsg","org.zstack.header.hybrid.network.eip.APIDeleteHybridEipFromLocalMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcIpSecConfigLocalMsg","org.zstack.header.identity.role.api.APIDeleteRoleMsg","org.zstack.scheduler.APIRemoveSchedulerJobGroupFromSchedulerTriggerMsg","org.zstack.accessKey.APIDeleteAccessKeyMsg","org.zstack.header.protocol.APIDeleteVRouterOspfAreaMsg","org.zstack.header.volume.APIDeleteVolumeQosMsg","org.zstack.pciDevice.APIDeletePciDeviceOfferingMsg","org.zstack.header.storage.volume.backup.APIDeleteVmBackupMsg","org.zstack.iam2.api.APIDeleteIAM2VirtualIDGroupMsg","org.zstack.ldap.APIDeleteLdapServerMsg","org.zstack.header.storage.database.backup.APIDeleteDatabaseBackupMsg","org.zstack.network.service.portforwarding.APIDeletePortForwardingRuleMsg","org.zstack.header.aliyun.ecs.APIDeleteEcsInstanceLocalMsg","org.zstack.sns.APIUnsubscribeSNSTopicMsg","org.zstack.vrouterRoute.APIDeleteVRouterRouteEntryMsg","org.zstack.network.service.vip.APIDeleteVipMsg","org.zstack.header.storage.backup.APIDeleteExportedImageFromBackupStorageMsg","org.zstack.core.gc.APIDeleteGCJobMsg","org.zstack.network.service.eip.APIDeleteEipMsg","org.zstack.header.daho.process.APIDeleteDahoVllMsg","org.zstack.header.aliyun.network.vrouter.APIDeleteAliyunRouteEntryRemoteMsg","org.zstack.header.aliyun.network.connection.APIDeleteConnectionAccessPointLocalMsg","org.zstack.network.service.lb.APIRemoveVmNicFromLoadBalancerMsg","org.zstack.sns.APIDeleteSNSTopicMsg","org.zstack.ipsec.APIDetachL3NetworksFromIPsecConnectionMsg","org.zstack.iam2.api.APIRemoveRolesFromIAM2VirtualIDMsg","org.zstack.iam2.api.APIRemoveIAM2VirtualIDsFromProjectMsg","org.zstack.header.vm.APIDeleteVmStaticIpMsg","org.zstack.header.storage.backup.APIDetachBackupStorageFromZoneMsg","org.zstack.header.aliyun.account.APIDeleteAliyunKeySecretMsg","org.zstack.storage.device.iscsi.APIDeleteIscsiServerMsg","org.zstack.autoscaling.template.APIDeleteAutoScalingTemplateMsg","org.zstack.header.storage.volume.backup.APIDeleteVolumeBackupMsg","org.zstack.header.baremetal.pxeserver.APIDeleteBaremetalPxeServerMsg","org.zstack.aliyun.nas.message.APIDeleteAliyunNasAccessGroupRuleMsg","org.zstack.header.volume.APIDeleteDataVolumeMsg","org.zstack.header.aliyun.network.group.APIDeleteEcsSecurityGroupRemoteMsg","org.zstack.network.securitygroup.APIDeleteVmNicFromSecurityGroupMsg","org.zstack.network.l2.vxlan.vxlanNetworkPool.APIDeleteVniRangeMsg","org.zstack.header.aliyun.image.APIDeleteEcsImageRemoteMsg","org.zstack.storage.device.iscsi.APIDetachIscsiServerFromClusterMsg","org.zstack.autoscaling.group.instance.APIDeleteAutoScalingGroupInstanceMsg","org.zstack.header.identity.APIDeleteAccountMsg","org.zstack.header.storageDevice.APIDetachScsiLunFromVmInstanceMsg","org.zstack.vmware.APIDeleteVCenterMsg","org.zstack.license.APIDeleteLicenseMsg","org.zstack.header.hybrid.network.vpn.APIDeleteVpcUserVpnGatewayLocalMsg","org.zstack.header.vm.APIDetachL3NetworkFromVmMsg","org.zstack.header.identity.APIRemoveUserFromGroupMsg","org.zstack.header.aliyun.network.vpc.APIDeleteEcsVpcRemoteMsg","org.zstack.header.datacenter.APIDeleteDataCenterInLocalMsg","org.zstack.header.aliyun.network.vpc.APIDeleteEcsVpcInLocalMsg","org.zstack.network.service.eip.APIDetachEipMsg","org.zstack.monitoring.APIDeleteMonitorTriggerMsg"]}]
def test():
global virtual_id_uuid
global zone_uuid
iam2_ops.clean_iam2_enviroment()
# 1.create nodelete vid
username = "noDeleteAdmin"
password = hashlib.sha512('password').hexdigest()
role_name = "noDeleteRole"
policy_name = "noDeltePolicy"
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, password).uuid
attributes = [{"name":"__PlatformAdmin__"}, {"name":"__PlatformAdminRelatedZone__", "value": "ALL_ZONES"}]
iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes)
nodelete_role_uuid = iam2_ops.create_role(role_name).uuid
nodelete_policy_uuid = iam2_ops.create_policy(policy_name, statements).uuid
iam2_ops.attach_policy_to_role(nodelete_policy_uuid, nodelete_role_uuid)
iam2_ops.add_roles_to_iam2_virtual_id([nodelete_role_uuid], virtual_id_uuid)
nodelete_session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
# 2.create,delete,query test
zone_name = 'zone_test'
zone_create_option = test_util.ZoneOption()
zone_create_option.set_name(zone_name)
zone_uuid = zone_ops.create_zone(zone_create_option, session_uuid=nodelete_session_uuid).uuid
test_util.test_logger("@@Create test pas")
res_ops.query_resource(res_ops.ZONE, session_uuid=nodelete_session_uuid)
test_util.test_logger("@@Query test pass")
try:
zone_ops.delete_zone(zone_uuid, session_uuid=nodelete_session_uuid)
except Exception:
test_util.test_logger("@@Delete test pass")
test_util.test_pass('success test iam2 project admin basic operations!')
def error_cleanup():
global virtual_id_uuid
global zone_uuid
if virtual_id_uuid:
iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
global zone_uuid
if zone_uuid:
zone_ops.delete_zone(zone_uuid)
def env_recover():
global virtual_id_uuid
global zone_uuid
if virtual_id_uuid:
iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
global zone_uuid
if zone_uuid:
zone_ops.delete_zone(zone_uuid)
| apache-2.0 |
fedosov/django-generic-ratings | ratings/forms/__init__.py | 3 | 13110 | from __future__ import absolute_import
import time
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.utils.crypto import salted_hmac, constant_time_compare
from ratings import cookies, exceptions
from .widgets import SliderWidget, StarWidget, BootstrapWidget
class VoteForm(forms.Form):
"""
Form class to handle voting of content objects.
You can customize the app giving a custom form class, following
some rules:
- the form must define the *content_type* and *object_pk* fields
- the form's *__init__* method must take as first and second positional
arguments the target object getting voted and the ratings key
- the form must define the *get_vote* method, getting the request and
a boolean *allow_anonymous* and returning an unsaved instance of
the vote model
- the form must define the *delete* method, getting the request and
returning True if the form requests the deletion of the vote
"""
# rating data
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
key = forms.RegexField(regex=r'^[\w.+-]+$', widget=forms.HiddenInput,
required=False)
# security data
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40,
widget=forms.HiddenInput)
honeypot = forms.CharField(required=False, widget=forms.HiddenInput)
def __init__(self, target_object, key, score_range=None, score_step=None,
can_delete_vote=None, data=None, initial=None, size="sm"):
self.target_object = target_object
self.key = key
self.score_range = score_range
self.score_step = score_step
self.can_delete_vote = can_delete_vote
self.size = size
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(VoteForm, self).__init__(data=data, initial=initial)
self.fields['score'] = self.get_score_field(score_range, score_step,
can_delete_vote)
# FACTORY METHODS
def get_score_field(self, score_range, score_step, can_delete_vote):
"""
Return the score field.
Subclasses may ovveride this method in order to change
the field used to store score value.
"""
try:
_, decimals = str(score_step).split('.')
except ValueError:
field = forms.IntegerField
else:
field = forms.FloatField if int(decimals) else forms.IntegerField
widget = self.get_score_widget(score_range, score_step, can_delete_vote)
return field(widget=widget, label=u'')
def get_score_widget(self, score_range, score_step, can_delete_vote):
"""
Return the score widget.
Subclasses may ovveride this method in order to change
the widget used to display score input.
"""
return forms.TextInput
# SECURITY
def clean_security_hash(self):
"""
Check the security hash.
"""
security_hash_dict = {
'content_type': self.data.get('content_type', ''),
'object_pk': self.data.get('object_pk', ''),
'key': self.data.get('key', ''),
'timestamp': self.data.get('timestamp', ''),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data['security_hash']
if not constant_time_compare(expected_hash, actual_hash):
raise forms.ValidationError('Security hash check failed.')
return actual_hash
def clean_timestamp(self):
"""
Make sure the timestamp isn't too far (> 2 hours) in the past.
"""
timestamp = self.cleaned_data['timestamp']
if time.time() - timestamp > (2 * 60 * 60):
raise forms.ValidationError('Timestamp check failed')
return timestamp
def clean_honeypot(self):
"""
Check that nothing's been entered into the honeypot.
"""
value = self.cleaned_data['honeypot']
if value:
raise forms.ValidationError('Your vote is spam. Shame on you!')
return value
def generate_security_data(self):
"""
Generate a dict of security data for *initial* data.
"""
timestamp = int(time.time())
security_dict = {
'content_type': str(self.target_object._meta),
'object_pk': str(self.target_object._get_pk_val()),
'key': str(self.key),
'timestamp': str(timestamp),
'security_hash': self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from *self.target_object*
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type': str(self.target_object._meta),
'object_pk': str(self.target_object._get_pk_val()),
'key': str(self.key),
'timestamp': str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, key, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
key_salt = 'ratings.forms.VoteForm'
value = '-'.join((content_type, object_pk, key, timestamp))
return salted_hmac(key_salt, value).hexdigest()
# VOTE
def clean_score(self):
"""
If *score_range* was given to the form, then check if the
score is in range.
Again, if *score_step* was given, then check if the score is valid
for that step.
"""
score = self.cleaned_data['score']
self._delete_vote = False
# a 0 score means the user want to delete his vote
if score == 0:
if not self.can_delete_vote:
raise forms.ValidationError('Vote deletion is not allowed')
self._delete_vote = True
return score
# score range, if given we have to check score is in that range
if self.score_range:
if not (self.score_range[0] <= score <= self.score_range[1]):
raise forms.ValidationError('Score is not in range')
# check score steps
if self.score_step:
try:
_, decimals = str(self.score_step).split('.')
except ValueError:
decimal_places = 0
else:
decimal_places = len(decimals) if int(decimals) else 0
if not decimal_places and int(score) != score:
raise forms.ValidationError('Score is not in steps')
factor = 10 ** decimal_places
if int(score * factor) % int(self.score_step * factor):
raise forms.ValidationError('Score is not in steps')
return score
def get_vote_model(self):
"""
Return the vote model used to rate an object.
"""
from ratings import models
return models.Vote
def get_vote_data(self, request, allow_anonymous):
"""
Return two dicts of data to be used to look for a vote and to create
a vote.
Subclasses in custom ratings apps that override *get_vote_model* can
override this method too to add extra fields into a custom vote model.
If the first dict is None, then the lookup is not performed.
"""
content_type = ContentType.objects.get_for_model(self.target_object)
ip_address = request.META.get('REMOTE_ADDR')
lookups = {
'content_type': content_type,
'object_id': self.target_object.pk,
'key': self.cleaned_data['key'],
}
data = lookups.copy()
data.update({
'score': self.cleaned_data['score'],
'ip_address': ip_address,
})
if allow_anonymous:
# votes are handled by cookies
if not ip_address:
raise exceptions.DataError('Invalid ip address')
cookie_name = cookies.get_name(self.target_object, self.key)
cookie_value = request.COOKIES.get(cookie_name)
if cookie_value:
# the user maybe voted this object (it has a cookie)
lookups.update({'cookie': cookie_value, 'user__isnull': True})
data['cookie'] = cookie_value
else:
lookups = None
data['cookie'] = cookies.get_value(ip_address)
elif request.user.is_authenticated():
# votes are handled by database (django users)
lookups.update({'user': request.user, 'cookie__isnull': True})
data['user'] = request.user
else:
# something went very wrong: if anonymous votes are not allowed
# and the user is not authenticated the view should have blocked
# the voting process
raise exceptions.DataError('Anonymous user cannot vote.')
return lookups, data
def get_vote(self, request, allow_anonymous):
"""
Return an unsaved vote object based on the information in this form.
Assumes that the form is already validated and will throw a
ValueError if not.
The vote can be a brand new vote or a changed vote. If the vote is
just created then the instance's id will be None.
"""
if not self.is_valid():
raise ValueError('get_vote may only be called on valid forms')
# get vote model and data
model = self.get_vote_model()
lookups, data = self.get_vote_data(request, allow_anonymous)
if lookups is None:
return model(**data)
try:
# trying to get an existing vote
vote = model.objects.get(**lookups)
except model.DoesNotExist:
# create a brand new vote
vote = model(**data)
else:
# change data for existting vote
vote.score = data['score']
vote.ip_address = data['ip_address']
return vote
# DELETE
def delete(self, request):
"""
Return True if the form requests to delete the vote.
"""
return self._delete_vote
class SliderVoteForm(VoteForm):
"""
Handle voting using a slider widget.
In order to use this form you must load the jQuery.ui slider
javascript.
This form triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def get_score_widget(self, score_range, score_step, can_delete_vote):
return SliderWidget(score_range[0], score_range[1], score_step,
instance=self.target_object, can_delete_vote=can_delete_vote, key=self.key)
class StarVoteForm(VoteForm):
"""
Handle voting using a star widget.
In order to use this form you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This form triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def get_score_widget(self, score_range, score_step, can_delete_vote):
return StarWidget(score_range[0], score_range[1], score_step,
instance=self.target_object, can_delete_vote=can_delete_vote, key=self.key)
class BootstrapVoteForm(VoteForm):
"""
Handle voting using a star widget.
In order to use this form you must download the
jQuery Star Rating Plugin available at
https://github.com/kartik-v/bootstrap-star-rating
and then load the required javascripts and css, e.g.
"""
def get_score_widget(self, score_range, score_step, can_delete_vote):
return BootstrapWidget(score_range[0], score_range[1], score_step,
instance=self.target_object,
can_delete_vote=can_delete_vote,
key=self.key,
size=self.size)
| mit |
cancan101/tensorflow | tensorflow/tensorboard/tensorboard.py | 6 | 6737 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serve TensorFlow summary data to a web frontend.
This is a simple web server to proxy data from the event_loader to the web, and
serve static web files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
from werkzeug import serving
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_file_inspector as efi
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.plugins.debugger import plugin as debugger_plugin
from tensorflow.tensorboard.plugins.projector import plugin as projector_plugin
flags.DEFINE_string('logdir', '', """logdir specifies the directory where
TensorBoard will look to find TensorFlow event files that it can display.
TensorBoard will recursively walk the directory structure rooted at logdir,
looking for .*tfevents.* files.
You may also pass a comma separated list of log directories, and TensorBoard
will watch each directory. You can also assign names to individual log
directories by putting a colon between the name and the path, as in
tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2
""")
flags.DEFINE_boolean(
'insecure_debug_mode', False, 'Whether to run the app in debug mode. '
'This increases log verbosity, and enables debugging on server exceptions.')
flags.DEFINE_string('host', '0.0.0.0', 'What host to listen to. Defaults to '
'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to'
'disable remote access (also quiets security warnings).')
flags.DEFINE_boolean('inspect', False, """Use this flag to print out a digest
of your event files to the command line, when no data is shown on TensorBoard or
the data shown looks weird.
Example usages:
tensorboard --inspect --event_file=myevents.out
tensorboard --inspect --event_file=myevents.out --tag=loss
tensorboard --inspect --logdir=mylogdir
tensorboard --inspect --logdir=mylogdir --tag=loss
See tensorflow/python/summary/event_file_inspector.py for more info and
detailed usage.
""")
flags.DEFINE_string(
'tag', '',
'The particular tag to query for. Only used if --inspect is present')
flags.DEFINE_string(
'event_file', '',
'The particular event file to query for. Only used if --inspect is present '
'and --logdir is not specified.')
flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.')
flags.DEFINE_boolean('purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
flags.DEFINE_integer('reload_interval', 60, 'How often the backend should load '
'more data.')
FLAGS = flags.FLAGS
class Server(object):
"""A simple WSGI-compliant http server that can serve TensorBoard."""
def get_tag(self):
"""Read the TensorBoard TAG number, and return it or an empty string."""
try:
tag = resource_loader.load_resource('tensorboard/TAG').strip()
logging.info('TensorBoard is tag: %s', tag)
return tag
except IOError:
logging.info('Unable to read TensorBoard tag')
return ''
def create_app(self):
"""Creates a WSGI-compliant app than can handle TensorBoard requests.
Returns:
(function) A complete WSGI application that handles TensorBoard requests.
"""
logdir = os.path.expanduser(FLAGS.logdir)
if not logdir:
msg = ('A logdir must be specified. Run `tensorboard --help` for '
'details and examples.')
logging.error(msg)
print(msg)
return -1
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
plugins = {
debugger_plugin.PLUGIN_PREFIX_ROUTE:
debugger_plugin.DebuggerPlugin(multiplexer),
projector_plugin.PLUGIN_PREFIX_ROUTE:
projector_plugin.ProjectorPlugin(),
}
return application.TensorBoardWSGIApp(
logdir,
plugins,
multiplexer,
reload_interval=FLAGS.reload_interval)
def serve(self):
"""Starts a WSGI server that serves the TensorBoard app."""
tb_app = self.create_app()
logging.info('Starting TensorBoard in directory %s', os.getcwd())
debug = FLAGS.insecure_debug_mode
if debug:
logging.set_verbosity(logging.DEBUG)
logging.warning('TensorBoard is in debug mode. This is NOT SECURE.')
print('Starting TensorBoard %s on port %d' % (self.get_tag(), FLAGS.port))
if FLAGS.host == '0.0.0.0':
try:
host = socket.gethostbyname(socket.gethostname())
print('(You can navigate to http://%s:%d)' % (host, FLAGS.port))
except socket.gaierror:
pass
else:
print('(You can navigate to http://%s:%d)' % (FLAGS.host, FLAGS.port))
try:
serving.run_simple(
FLAGS.host,
FLAGS.port,
tb_app,
threaded=True,
use_reloader=debug,
use_evalex=debug,
use_debugger=debug)
except socket.error:
if FLAGS.port == 0:
msg = 'Unable to find any open ports.'
logging.error(msg)
print(msg)
return -2
else:
msg = 'Tried to connect to port %d, but address is in use.' % FLAGS.port
logging.error(msg)
print(msg)
return -3
def main(unused_argv=None):
if FLAGS.inspect:
logging.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(FLAGS.event_file)
efi.inspect(FLAGS.logdir, event_file, FLAGS.tag)
return 0
Server().serve()
if __name__ == '__main__':
app.run()
| apache-2.0 |
takeflight/django | tests/migrations/models.py | 35 | 1316 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class CustomModelBase(models.base.ModelBase):
pass
class ModelWithCustomBase(six.with_metaclass(CustomModelBase, models.Model)):
pass
@python_2_unicode_compatible
class UnicodeModel(models.Model):
title = models.CharField('ÚÑÍ¢ÓÐÉ', max_length=20, default='“Ðjáñgó”')
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
verbose_name = 'úñí©óðé µóðéø'
verbose_name_plural = 'úñí©óðé µóðéøß'
def __str__(self):
return self.title
class Unserializable(object):
"""
An object that migration doesn't know how to serialize.
"""
pass
class UnserializableModel(models.Model):
title = models.CharField(max_length=20, default=Unserializable())
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
class UnmigratedModel(models.Model):
"""
A model that is in a migration-less app (which this app is
if its migrations directory has not been repointed)
"""
pass
| bsd-3-clause |
morrisonwudi/zipline | tests/test_versioning.py | 30 | 3539 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas
import pickle
from nose_parameterized import parameterized
from unittest import TestCase
from zipline.finance.blotter import Order
from .serialization_cases import (
object_serialization_cases,
assert_dict_equal
)
base_state_dir = 'tests/resources/saved_state_archive'
BASE_STATE_DIR = os.path.join(
os.path.dirname(__file__),
'resources',
'saved_state_archive')
class VersioningTestCase(TestCase):
def load_state_from_disk(self, cls):
state_dir = cls.__module__ + '.' + cls.__name__
full_dir = BASE_STATE_DIR + '/' + state_dir
state_files = \
[f for f in os.listdir(full_dir) if 'State_Version_' in f]
for f_name in state_files:
f = open(full_dir + '/' + f_name, 'r')
yield pickle.load(f)
# Only test versioning in minutely mode right now
@parameterized.expand(object_serialization_cases(skip_daily=True))
def test_object_serialization(self,
_,
cls,
initargs,
di_vars,
comparison_method='dict'):
# The state generated under one version of pandas may not be
# compatible with another. To ensure that tests pass under the travis
# pandas version matrix, we only run versioning tests under the
# current version of pandas. This will need to be updated once we
# change the pandas version on prod.
if pandas.__version__ != '0.12.0':
return
# Make reference object
obj = cls(*initargs)
for k, v in di_vars.items():
setattr(obj, k, v)
# Fetch state
state_versions = self.load_state_from_disk(cls)
for version in state_versions:
# For each version inflate a new object and ensure that it
# matches the original.
newargs = version['newargs']
initargs = version['initargs']
state = version['obj_state']
if newargs is not None:
obj2 = cls.__new__(cls, *newargs)
else:
obj2 = cls.__new__(cls)
if initargs is not None:
obj2.__init__(*initargs)
obj2.__setstate__(state)
for k, v in di_vars.items():
setattr(obj2, k, v)
# The ObjectId generated on instantiation of Order will
# not be the same as the one loaded from saved state.
if cls == Order:
obj.__dict__['id'] = obj2.__dict__['id']
if comparison_method == 'repr':
self.assertEqual(obj.__repr__(), obj2.__repr__())
elif comparison_method == 'to_dict':
assert_dict_equal(obj.to_dict(), obj2.to_dict())
else:
assert_dict_equal(obj.__dict__, obj2.__dict__)
| apache-2.0 |
cristiana214/cristianachavez214-cristianachavez | python-build/python-libs/gdata/src/gdata/apps/migration/__init__.py | 168 | 8177 | #!/usr/bin/python
#
# Copyright (C) 2008 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'google-apps-apis@googlegroups.com'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class Rfc822Msg(atom.AtomBase):
"""The Migration rfc822Msg element."""
_tag = 'rfc822Msg'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['encoding'] = 'encoding'
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.encoding = 'base64'
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def Rfc822MsgFromString(xml_string):
"""Parse in the Rrc822 message from the XML definition."""
return atom.CreateClassFromXMLString(Rfc822Msg, xml_string)
class MailItemProperty(atom.AtomBase):
"""The Migration mailItemProperty element."""
_tag = 'mailItemProperty'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailItemPropertyFromString(xml_string):
"""Parse in the MailItemProperiy from the XML definition."""
return atom.CreateClassFromXMLString(MailItemProperty, xml_string)
class Label(atom.AtomBase):
"""The Migration label element."""
_tag = 'label'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['labelName'] = 'label_name'
def __init__(self, label_name=None,
extension_elements=None, extension_attributes=None,
text=None):
self.label_name = label_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
"""Parse in the mailItemProperty from the XML definition."""
return atom.CreateClassFromXMLString(Label, xml_string)
class MailEntry(gdata.GDataEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg
self.mail_item_property = mail_item_property
self.label = label
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailEntryFromString(xml_string):
"""Parse in the MailEntry from the XML definition."""
return atom.CreateClassFromXMLString(MailEntry, xml_string)
class BatchMailEntry(gdata.BatchEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
batch_operation=None, batch_id=None, batch_status=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg or None
self.mail_item_property = mail_item_property or []
self.label = label or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def BatchMailEntryFromString(xml_string):
"""Parse in the BatchMailEntry from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
class BatchMailEventFeed(gdata.BatchFeed):
"""A Migration event feed flavor of an Atom Feed."""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, interrupted=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchMailEventFeedFromString(xml_string):
"""Parse in the BatchMailEventFeed from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)
| apache-2.0 |
bm5w/pychal | 6.py | 1 | 1025 | """Python challenge number 6:
http://www.pythonchallenge.com/pc/def/channel.html"""
import os
# from os.path import normpath, join
curr_dir = os.path.abspath('channel')
# equivalent to above
# curr_dir = normpath(join(os.getcwd(), 'channel'))
import zipfile
query = u'Next nothing is '
def main(num=90052):
output = ''
while True:
with open(os.path.join(curr_dir, '{}.txt'.format(num))) as file_handle:
file_content = file_handle.read()
try:
numid = file_content.find(query)
except Exception as e:
print "exception", e
if numid != -1:
num = int(file_content[numid+len(query):])
else:
break
output += main2(num)
print output
def main2(num):
"""Get comment from zipfile."""
with zipfile.ZipFile('{}/channel.zip'.format(curr_dir), 'r') as myzip:
temp = myzip.getinfo("{}.txt".format(num))
return temp.comment
if __name__ == "__main__":
main()
| mit |
patricklaw/pip | pip/_vendor/colorama/ansi.py | 527 | 1039 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
| mit |
linzhonghong/dnspod_desktop | dnspod_api.py | 1 | 6268 | #-*- coding:utf-8 -*-
__author__ = 'linzhonghong'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import json
import urllib2,cookielib,urllib
import re
import traceback
import time
class dnspod_api(object):
def __init__(self,user="",passwd="",domain="yourdomain.com"):
self.cookies = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(self.cookies)
opnner = urllib2.build_opener(cookie_support,urllib2.HTTPHandler,urllib2.HTTPSHandler)
urllib2.install_opener(opnner)
self.user = user
self.passwd = passwd
self.headers = {}
self.headers["User-Agent"]='Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; WOW64; Trident/6.0)'
self.common_parm = self.__common_parm()
self.domian_id = self.get_domain_info(domian=domain)
#公共参数
def __common_parm(self):
common_parm = {}
common_parm['login_email'] = self.user
common_parm['login_password'] = self.passwd
common_parm['format'] = 'json'
common_parm['lang'] = 'cn'
common_parm['error_on_empty'] = 'no'
return common_parm
#创建参数
def __create_parm(self,parm=None):
parm.update(self.common_parm)
return urllib.urlencode(parm)
#发送请求并返回结果
def __post_data(self,parm,api_url=""):
request = urllib2.Request(api_url,parm,self.headers)
response = urllib2.urlopen(request).read()
ret = json.JSONDecoder().decode(response)
if ret.has_key('status'):
if ret['status']['code'] =="1":
return ("ok",ret)
else:
return (ret['status']['message'],ret)
else:
return (u'未知错误',{})
#获取指定域名ID
def get_domain_info(self,domian=""):
message,result = self.__post_data(self.__create_parm(parm={'domain':domian}),api_url='https://dnsapi.cn/Domain.Info')
return result['domain']['id']
#获记录列表
def record_list(self,sub_domain="",type="record_id",offset=""):
parm = {}
parm['domain_id'] = self.domian_id
parm['sub_domain'] = sub_domain
parm['offset'] = offset
message,result = self.__post_data(self.__create_parm(parm=parm),api_url='https://dnsapi.cn/Record.List')
if type == "total":
return message,result['records']
if type == "record_id":
record_id_list = []
for record in result['records']:
record_id_list.append(record['id'])
return message,record_id_list
if type == "record":
record_list = []
for record in result['records']:
record_list.append(dict(id=record['id'],enabled=record['enabled'],name=record['name'],type=record['type'],
value=record['value'],ttl=record['ttl'],line=record['line'],))
return message,record_list
#获取记录信息
def record_info(self,record_id):
parm = {}
parm['domain_id'] = self.domian_id
parm['record_id'] = record_id
message,result = self.__post_data(self.__create_parm(parm=parm),api_url='https://dnsapi.cn/Record.Info')
return message,result
#创建记录
def record_create(self,sub_domain="",record_type="",record_line="",value=""):
result_message='传入参数错误'
api_url='https://dnsapi.cn/Record.Create'
parm = {}
parm['domain_id'] = self.domian_id
parm['sub_domain'] = sub_domain
parm['record_type'] = record_type
parm['record_line'] = record_line
parm['value'] = value
if record_type == "CNAME":
if not value.endswith('.'):
parm['value'] = "%s."%value
message,result = self.__post_data(self.__create_parm(parm=parm),api_url=api_url)
if message == 'ok' and result['status']['code'] == '1':
return (1, {'rec_id': result['record']['id'], 'status':result['record']['status']})
else:
return (0, {})
#修改记录
def record_modify(self, record_id, sub_domain="",record_type="",record_line="",value=""):
api_url='https://dnsapi.cn/Record.Modify'
parm = {}
parm['domain_id'] = self.domian_id
parm['record_id'] = record_id
parm['sub_domain'] = sub_domain
parm['record_type'] = record_type
parm['record_line'] = record_line
parm['value'] = value
if not record_id:
return (0, '参数错误')
message,result = self.__post_data(self.__create_parm(parm=parm),api_url=api_url)
if message == 'ok' and result['status']['code'] == '1':
return (1, 'ok')
else:
return (0, '接口请求失败:%s'%result['status']['message'])
#设置状态
def record_status(self,record_id,status="",rec_name=''):
parm = {}
parm['domain_id'] = self.domian_id
parm['record_id'] = record_id
parm['status'] = status
if status != "enable" and status != "disable":
return (0, '参数错误')
message,record_info = self.record_info(record_id)
if message == 'ok':
message,result = self.__post_data(self.__create_parm(parm=parm),api_url='https://dnsapi.cn/Record.Status')
if message == 'ok' and result['status']['code'] == '1':
return (1, 'ok')
else:
return (0, '接口请求失败')
else:
return (0, '%s 不存在' % record_id)
#删除记录
def record_delete(self,record_id='',rec_name=''):
if not all([record_id]):
return 0
api_url='https://dnsapi.cn/Record.Remove'
parm = {}
parm['domain_id'] = self.domian_id
parm['record_id'] = record_id
message,result = self.__post_data(self.__create_parm(parm=parm),api_url=api_url)
if message == 'ok' and result['status']['code'] == '1':
return 1
else:
return 0
if __name__ == '__main__':
pass | gpl-2.0 |
facelessuser/ThemeScheduler | tests/validate_json_format.py | 1 | 6115 | """
Validate JSON format.
Licensed under MIT
Copyright (c) 2012-2015 Isaac Muse <isaacmuse@gmail.com>
"""
import re
import codecs
import json
RE_LINE_PRESERVE = re.compile(r"\r?\n", re.MULTILINE)
RE_COMMENT = re.compile(
r'''(?x)
(?P<comments>
/\*[^*]*\*+(?:[^/*][^*]*\*+)*/ # multi-line comments
| [ \t]*//(?:[^\r\n])* # single line comments
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quotes
| '(?:\\.|[^'\\])*' # single quotes
| .[^/"']* # everything else
)
''',
re.DOTALL
)
RE_TRAILING_COMMA = re.compile(
r'''(?x)
(
(?P<square_comma>
, # trailing comma
(?P<square_ws>[\s\r\n]*) # white space
(?P<square_bracket>\]) # bracket
)
| (?P<curly_comma>
, # trailing comma
(?P<curly_ws>[\s\r\n]*) # white space
(?P<curly_bracket>\}) # bracket
)
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quoted string
| '(?:\\.|[^'\\])*' # single quoted string
| .[^,"']* # everything else
)
''',
re.DOTALL
)
RE_LINE_INDENT_TAB = re.compile(r'^((\t+)?[^ \t\r\n][^\r\n]*)?\r?\n$')
RE_LINE_INDENT_SPACE = re.compile(r'^(((?: {4})+)?[^ \t\r\n][^\r\n]*)?\r?\n$')
RE_TRAILING_SPACES = re.compile(r'^.*?[ \t]+\r?\n?$')
E_MALFORMED = "E0"
E_COMMENTS = "E1"
E_COMMA = "E2"
W_NL_START = "W1"
W_NL_END = "W2"
W_INDENT = "W3"
W_TRAILING_SPACE = "W4"
VIOLATION_MSG = {
E_MALFORMED: 'JSON content is malformed.',
E_COMMENTS: 'Comments are not part of the JSON spec.',
E_COMMA: 'Dangling comma found.',
W_NL_START: 'Unnecessary newlines at the start of file.',
W_NL_END: 'Missing a new line at the end of the file.',
W_INDENT: 'Indentation Error.',
W_TRAILING_SPACE: 'Trailing whitespace.'
}
class CheckJsonFormat(object):
"""
Test JSON for format irregularities.
- Trailing spaces.
- Inconsistent indentation.
- New lines at end of file.
- Unnecessary newlines at start of file.
- Trailing commas.
- Malformed JSON.
"""
def __init__(self, use_tabs=False, allow_comments=False):
"""Setup the settings."""
self.use_tabs = use_tabs
self.allow_comments = allow_comments
self.fail = False
def index_lines(self, text):
"""Index the char range of each line."""
self.line_range = []
count = 1
last = 0
for m in re.finditer('\n', text):
self.line_range.append((last, m.end(0) - 1, count))
last = m.end(0)
count += 1
def get_line(self, pt):
"""Get the line from char index."""
line = None
for r in self.line_range:
if pt >= r[0] and pt <= r[1]:
line = r[2]
break
return line
def check_comments(self, text):
"""
Check for JavaScript comments.
Log them and strip them out so we can continue.
"""
def remove_comments(group):
return ''.join([x[0] for x in RE_LINE_PRESERVE.findall(group)])
def evaluate(m):
text = ''
g = m.groupdict()
if g["code"] is None:
if not self.allow_comments:
self.log_failure(E_COMMENTS, self.get_line(m.start(0)))
text = remove_comments(g["comments"])
else:
text = g["code"]
return text
content = ''.join(map(lambda m: evaluate(m), RE_COMMENT.finditer(text)))
return content
def check_dangling_commas(self, text):
"""
Check for dangling commas.
Log them and strip them out so we can continue.
"""
def check_comma(g, m, line):
# ,] -> ] or ,} -> }
self.log_failure(E_COMMA, line)
if g["square_comma"] is not None:
return g["square_ws"] + g["square_bracket"]
else:
return g["curly_ws"] + g["curly_bracket"]
def evaluate(m):
g = m.groupdict()
return check_comma(g, m, self.get_line(m.start(0))) if g["code"] is None else g["code"]
return ''.join(map(lambda m: evaluate(m), RE_TRAILING_COMMA.finditer(text)))
def log_failure(self, code, line=None):
"""
Log failure.
Log failure code, line number (if available) and message.
"""
if line:
print("%s: Line %d - %s" % (code, line, VIOLATION_MSG[code]))
else:
print("%s: %s" % (code, VIOLATION_MSG[code]))
self.fail = True
def check_format(self, file_name):
"""Initiate teh check."""
self.fail = False
with codecs.open(file_name, encoding='utf-8') as f:
count = 1
for line in f:
if count == 1 and line.strip() == '':
self.log_failure(W_NL_START, count)
if not line.endswith('\n'):
self.log_failure(W_NL_END, count)
if RE_TRAILING_SPACES.match(line):
self.log_failure(W_TRAILING_SPACE, count)
if self.use_tabs:
if (RE_LINE_INDENT_TAB if self.use_tabs else RE_LINE_INDENT_SPACE).match(line) is None:
self.log_failure(W_INDENT, count)
count += 1
f.seek(0)
text = f.read()
self.index_lines(text)
text = self.check_comments(text)
self.index_lines(text)
text = self.check_dangling_commas(text)
try:
json.loads(text)
except Exception as e:
self.log_failure(E_MALFORMED)
print(e)
return self.fail
if __name__ == "__main__":
import sys
cjf = CheckJsonFormat(False, True)
cjf.check_format(sys.argv[1])
| mit |
SpootDev/py-mysql2pgsql | mysql2pgsql/lib/mysql_reader.py | 9 | 9445 | from __future__ import with_statement, absolute_import
import re
from contextlib import closing
import MySQLdb
import MySQLdb.cursors
re_column_length = re.compile(r'\((\d+)\)')
re_column_precision = re.compile(r'\((\d+),(\d+)\)')
re_key_1 = re.compile(r'CONSTRAINT `(\w+)` FOREIGN KEY \(`(\w+)`\) REFERENCES `(\w+)` \(`(\w+)`\)')
re_key_2 = re.compile(r'KEY `(\w+)` \((.*)\)')
re_key_3 = re.compile(r'PRIMARY KEY +\((.*)\)')
class DB:
"""
Class that wraps MySQLdb functions that auto reconnects
thus (hopefully) preventing the frustrating
"server has gone away" error. Also adds helpful
helper functions.
"""
conn = None
def __init__(self, options):
args = {
'user': str(options.get('username', 'root')),
'db': options['database'],
'use_unicode': True,
'charset': 'utf8',
}
if options.get('password', None):
args['passwd'] = str(options.get('password', None))
if options.get('socket', None):
args['unix_socket'] = str(options['socket'])
else:
args['host'] = str(options.get('hostname', 'localhost'))
args['port'] = options.get('port', 3306)
args['compress'] = options.get('compress', True)
self.options = args
def connect(self):
self.conn = MySQLdb.connect(**self.options)
def close(self):
self.conn.close()
def cursor(self, cursorclass=MySQLdb.cursors.Cursor):
try:
return self.conn.cursor(cursorclass)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
return self.conn.cursor(cursorclass)
def list_tables(self):
return self.query('SHOW TABLES;')
def query(self, sql, args=(), one=False, large=False):
return self.query_one(sql, args) if one\
else self.query_many(sql, args, large)
def query_one(self, sql, args):
with closing(self.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone()
def query_many(self, sql, args, large):
with closing(self.cursor(MySQLdb.cursors.SSCursor if large else MySQLdb.cursors.Cursor)) as cur:
cur.execute(sql, args)
for row in cur:
yield row
class MysqlReader(object):
class Table(object):
def __init__(self, reader, name):
self.reader = reader
self._name = name
self._indexes = []
self._foreign_keys = []
self._triggers = []
self._columns = self._load_columns()
self._comment = self._load_table_comment()
self._load_indexes()
self._load_triggers()
def _convert_type(self, data_type):
"""Normalize MySQL `data_type`"""
if data_type.startswith('varchar'):
return 'varchar'
elif data_type.startswith('char'):
return 'char'
elif data_type in ('bit(1)', 'tinyint(1)', 'tinyint(1) unsigned'):
return 'boolean'
elif re.search(r'^smallint.* unsigned', data_type) or data_type.startswith('mediumint'):
return 'integer'
elif data_type.startswith('smallint'):
return 'tinyint'
elif data_type.startswith('tinyint') or data_type.startswith('year('):
return 'tinyint'
elif data_type.startswith('bigint') and 'unsigned' in data_type:
return 'numeric'
elif re.search(r'^int.* unsigned', data_type) or \
(data_type.startswith('bigint') and 'unsigned' not in data_type):
return 'bigint'
elif data_type.startswith('int'):
return 'integer'
elif data_type.startswith('float'):
return 'float'
elif data_type.startswith('decimal'):
return 'decimal'
elif data_type.startswith('double'):
return 'double precision'
else:
return data_type
def _load_columns(self):
fields = []
for row in self.reader.db.query('SHOW FULL COLUMNS FROM `%s`' % self.name):
res = ()
for field in row:
if type(field) == unicode:
res += field.encode('utf8'),
else:
res += field,
length_match = re_column_length.search(res[1])
precision_match = re_column_precision.search(res[1])
length = length_match.group(1) if length_match else \
precision_match.group(1) if precision_match else None
name = res[0]
comment = res[8]
field_type = self._convert_type(res[1])
desc = {
'name': name,
'table_name': self.name,
'type': field_type,
'length': int(length) if length else None,
'decimals': precision_match.group(2) if precision_match else None,
'null': res[3] == 'YES' or field_type.startswith('enum') or field_type in ('date', 'datetime', 'timestamp'),
'primary_key': res[4] == 'PRI',
'auto_increment': res[6] == 'auto_increment',
'default': res[5] if not res[5] == 'NULL' else None,
'comment': comment,
'select': '`%s`' % name if not field_type.startswith('enum') else
'CASE `%(name)s` WHEN "" THEN NULL ELSE `%(name)s` END' % {'name': name},
}
fields.append(desc)
for field in (f for f in fields if f['auto_increment']):
res = self.reader.db.query('SELECT MAX(`%s`) FROM `%s`;' % (field['name'], self.name), one=True)
field['maxval'] = int(res[0]) if res[0] else 0
return fields
def _load_table_comment(self):
table_status = self.reader.db.query('SHOW TABLE STATUS WHERE Name="%s"' % self.name, one=True)
comment = table_status[17]
return comment
def _load_indexes(self):
explain = self.reader.db.query('SHOW CREATE TABLE `%s`' % self.name, one=True)
explain = explain[1]
for line in explain.split('\n'):
if ' KEY ' not in line:
continue
index = {}
match_data = re_key_1.search(line)
if match_data:
index['name'] = match_data.group(1)
index['column'] = match_data.group(2)
index['ref_table'] = match_data.group(3)
index['ref_column'] = match_data.group(4)
self._foreign_keys.append(index)
continue
match_data = re_key_2.search(line)
if match_data:
index['name'] = match_data.group(1)
index['columns'] = [re.search(r'`(\w+)`', col).group(1) for col in match_data.group(2).split(',')]
index['unique'] = 'UNIQUE' in line
self._indexes.append(index)
continue
match_data = re_key_3.search(line)
if match_data:
index['primary'] = True
index['columns'] = [re.sub(r'\(\d+\)', '', col.replace('`', '')) for col in match_data.group(1).split(',')]
self._indexes.append(index)
continue
def _load_triggers(self):
explain = self.reader.db.query('SHOW TRIGGERS WHERE `table` = \'%s\'' % self.name)
for row in explain:
if type(row) is tuple:
trigger = {}
trigger['name'] = row[0]
trigger['event'] = row[1]
trigger['statement'] = row[3]
trigger['timing'] = row[4]
trigger['statement'] = re.sub('^BEGIN', '', trigger['statement'])
trigger['statement'] = re.sub('^END', '', trigger['statement'], flags=re.MULTILINE)
trigger['statement'] = re.sub('`', '', trigger['statement'])
self._triggers.append(trigger)
@property
def name(self):
return self._name
@property
def columns(self):
return self._columns
@property
def comment(self):
return self._comment
@property
def indexes(self):
return self._indexes
@property
def foreign_keys(self):
return self._foreign_keys
@property
def triggers(self):
return self._triggers
@property
def query_for(self):
return 'SELECT %(column_names)s FROM `%(table_name)s`' % {
'table_name': self.name,
'column_names': ', '. join(c['select'] for c in self.columns)}
def __init__(self, options):
self.db = DB(options)
@property
def tables(self):
return (self.Table(self, t[0]) for t in self.db.list_tables())
def read(self, table):
return self.db.query(table.query_for, large=True)
def close(self):
self.db.close()
| mit |
rpdillon/wikid | wikid/docutils/parsers/__init__.py | 70 | 1457 | # $Id: __init__.py 5618 2008-07-28 08:37:32Z strank $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
from docutils import Component
class Parser(Component):
component_type = 'parser'
config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if parser_name in _parser_aliases:
parser_name = _parser_aliases[parser_name]
module = __import__(parser_name, globals(), locals())
return module.Parser
| gpl-3.0 |
Zelgadis87/Sick-Beard | lib/hachoir_parser/video/flv.py | 90 | 4787 | """
FLV video parser.
Documentation:
- FLV File format: http://osflash.org/flv
- libavformat from ffmpeg project
- flashticle: Python project to read Flash (SWF and FLV with AMF metadata)
http://undefined.org/python/#flashticle
Author: Victor Stinner
Creation date: 4 november 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt24, UInt32, NullBits, NullBytes,
Bit, Bits, String, RawBytes, Enum)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_parser.audio.mpeg_audio import Frame
from lib.hachoir_parser.video.amf import AMFObject
from lib.hachoir_core.tools import createDict
SAMPLING_RATE = {
0: ( 5512, "5.5 kHz"),
1: (11025, "11 kHz"),
2: (22050, "22.1 kHz"),
3: (44100, "44.1 kHz"),
}
SAMPLING_RATE_VALUE = createDict(SAMPLING_RATE, 0)
SAMPLING_RATE_TEXT = createDict(SAMPLING_RATE, 1)
AUDIO_CODEC_MP3 = 2
AUDIO_CODEC_NAME = {
0: u"Uncompressed",
1: u"ADPCM",
2: u"MP3",
5: u"Nellymoser 8kHz mono",
6: u"Nellymoser",
}
VIDEO_CODEC_NAME = {
2: u"Sorensen H.263",
3: u"Screen video",
4: u"On2 VP6",
}
FRAME_TYPE = {
1: u"keyframe",
2: u"inter frame",
3: u"disposable inter frame",
}
class Header(FieldSet):
def createFields(self):
yield String(self, "signature", 3, "FLV format signature", charset="ASCII")
yield UInt8(self, "version")
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "type_flags_audio")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "type_flags_video")
yield UInt32(self, "data_offset")
def parseAudio(parent, size):
yield Enum(Bits(parent, "codec", 4, "Audio codec"), AUDIO_CODEC_NAME)
yield Enum(Bits(parent, "sampling_rate", 2, "Sampling rate"), SAMPLING_RATE_TEXT)
yield Bit(parent, "is_16bit", "16-bit or 8-bit per sample")
yield Bit(parent, "is_stereo", "Stereo or mono channel")
size -= 1
if 0 < size:
if parent["codec"].value == AUDIO_CODEC_MP3 :
yield Frame(parent, "music_data", size=size*8)
else:
yield RawBytes(parent, "music_data", size)
def parseVideo(parent, size):
yield Enum(Bits(parent, "frame_type", 4, "Frame type"), FRAME_TYPE)
yield Enum(Bits(parent, "codec", 4, "Video codec"), VIDEO_CODEC_NAME)
if 1 < size:
yield RawBytes(parent, "data", size-1)
def parseAMF(parent, size):
while parent.current_size < parent.size:
yield AMFObject(parent, "entry[]")
class Chunk(FieldSet):
tag_info = {
8: ("audio[]", parseAudio, ""),
9: ("video[]", parseVideo, ""),
18: ("metadata", parseAMF, ""),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (11 + self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
else:
self.parser = None
def createFields(self):
yield UInt8(self, "tag")
yield UInt24(self, "size", "Content size")
yield UInt24(self, "timestamp", "Timestamp in millisecond")
yield NullBytes(self, "reserved", 4)
size = self["size"].value
if size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "content", size)
def getSampleRate(self):
try:
return SAMPLING_RATE_VALUE[self["sampling_rate"].value]
except LookupError:
return None
class FlvFile(Parser):
PARSER_TAGS = {
"id": "flv",
"category": "video",
"file_ext": ("flv",),
"mime": (u"video/x-flv",),
"min_size": 9*4,
"magic": (
# Signature, version=1, flags=5 (video+audio), header size=9
("FLV\1\x05\0\0\0\x09", 0),
# Signature, version=1, flags=5 (video), header size=9
("FLV\1\x01\0\0\0\x09", 0),
),
"description": u"Macromedia Flash video"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != "FLV":
return "Wrong file signature"
if self["header/data_offset"].value != 9:
return "Unknown data offset in main header"
return True
def createFields(self):
yield Header(self, "header")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
while not self.eof:
yield Chunk(self, "chunk[]")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
def createDescription(self):
return u"Macromedia Flash video version %s" % self["header/version"].value
| gpl-3.0 |
puttarajubr/commcare-hq | custom/bihar/__init__.py | 1 | 1119 | from custom.bihar.reports import supervisor, due_list, mch_reports
from custom.bihar.reports.indicators import reports as indicators
BIHAR_DOMAINS = ('care-bihar', 'bihar')
CUSTOM_REPORTS = (
('Custom Reports', (
supervisor.MainNavReport,
due_list.DueListSelectionReport,
due_list.DueListNav,
due_list.VaccinationSummary,
due_list.VaccinationSummaryToday,
due_list.VaccinationSummaryTomorrow,
due_list.VaccinationSummary2Days,
due_list.VaccinationSummary3Days,
due_list.VaccinationClientList,
supervisor.ToolsNavReport,
supervisor.ReferralListReport,
supervisor.EDDCalcReport,
supervisor.BMICalcReport,
supervisor.SubCenterSelectionReport,
indicators.IndicatorNav,
indicators.IndicatorSummaryReport,
indicators.IndicatorClientSelectNav,
indicators.IndicatorClientList,
indicators.IndicatorCharts,
indicators.MyPerformanceReport,
indicators.MyPerformanceList,
mch_reports.MotherMCHRegister,
mch_reports.ChildMCHRegister
)),
)
| bsd-3-clause |
msebire/intellij-community | python/lib/Lib/distutils/bcppcompiler.py | 85 | 15086 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
| apache-2.0 |
jianajavier/pnc-cli | test/integration/test_productversions_api.py | 1 | 2388 | import pytest
import conftest
from pnc_cli import utils
from pnc_cli import productversions
from pnc_cli.swagger_client.apis import ProductversionsApi
from test import testutils
@pytest.fixture(scope='function', autouse=True)
def get_versions_api():
global versions_api
versions_api = ProductversionsApi(utils.get_api_client())
def test_get_all_invalid_param():
testutils.assert_raises_typeerror(versions_api, 'get_all')
def test_get_all():
product_versions = versions_api.get_all(page_index=0, page_size=1000000, sort='', q='').content
assert product_versions is not None
def test_create_new_product_version_invalid_param():
testutils.assert_raises_typeerror(versions_api, 'create_new_product_version')
def test_create_new_product_version(new_version):
product_versions = [v.id for v in versions_api.get_all(page_size=1000000).content]
assert new_version.id in product_versions
def test_get_specific_no_id():
testutils.assert_raises_valueerror(versions_api, 'get_specific', id=None)
def test_get_specific_invalid_param():
testutils.assert_raises_typeerror(versions_api, 'get_specific', id=1)
def test_get_specific(new_version):
retrieved_version = versions_api.get_specific(id=new_version.id).content
assert new_version.to_dict() == retrieved_version.to_dict()
def test_update_no_id():
testutils.assert_raises_valueerror(versions_api, 'update', id=None)
def test_update_invalid_param():
testutils.assert_raises_typeerror(versions_api, 'update', id=1)
# currently unable to update build_configuration_ids
def test_update(new_version):
new_version.version = conftest.get_unique_version(new_version.product_id)
versions_api.update(id=new_version.id, body=new_version)
updated = versions_api.get_specific(id=new_version.id).content
assert updated.version == new_version.version
def test_version_exists(new_version):
assert productversions.version_exists(new_version.id)
def test_get_build_configuration_sets_no_id():
testutils.assert_raises_valueerror(versions_api, 'get_build_configuration_sets', id=None)
def test_get_build_configuration_sets_invalid_param():
testutils.assert_raises_typeerror(versions_api, 'get_build_configuration_sets', id=1)
def test_get_build_configuration_sets():
sets = versions_api.get_build_configuration_sets(id=1)
assert sets is not None
| apache-2.0 |
MatthieuBizien/scikit-learn | benchmarks/bench_lasso.py | 111 | 3364 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
krattai/noo-ebs | docs/zeroMQ-guide2/examples/Python/bstarsrv.py | 1 | 4193 | # Binary Star Server
#
# Author: Dan Colish <dcolish@gmail.com>
from argparse import ArgumentParser
import time
from zhelpers import zmq
STATE_PRIMARY = 1
STATE_BACKUP = 2
STATE_ACTIVE = 3
STATE_PASSIVE = 4
PEER_PRIMARY = 1
PEER_BACKUP = 2
PEER_ACTIVE = 3
PEER_PASSIVE = 4
CLIENT_REQUEST = 5
HEARTBEAT = 1000
class BStarState(object):
def __init__(self, state, event, peer_expiry):
self.state = state
self.event = event
self.peer_expiry = peer_expiry
class BStarException(Exception):
pass
fsm_states = {
STATE_PRIMARY: {
PEER_BACKUP: ("I: connected to backup (slave), ready as master",
STATE_ACTIVE),
PEER_ACTIVE: ("I: connected to backup (master), ready as slave",
STATE_PASSIVE)
},
STATE_BACKUP: {
PEER_ACTIVE: ("I: connected to primary (master), ready as slave",
STATE_PASSIVE),
CLIENT_REQUEST: ("", False)
},
STATE_ACTIVE: {
PEER_ACTIVE: ("E: fatal error - dual masters, aborting", False)
},
STATE_PASSIVE: {
PEER_PRIMARY: ("I: primary (slave) is restarting, ready as master",
STATE_ACTIVE),
PEER_BACKUP: ("I: backup (slave) is restarting, ready as master",
STATE_ACTIVE),
PEER_PASSIVE: ("E: fatal error - dual slaves, aborting", False),
CLIENT_REQUEST: (CLIENT_REQUEST, True) # Say true, check peer later
}
}
def run_fsm(fsm):
# There are some transitional states we do not want to handle
state_dict = fsm_states.get(fsm.state, {})
res = state_dict.get(fsm.event)
if res:
msg, state = res
else:
return
if state == False:
raise BStarException(msg)
elif msg == CLIENT_REQUEST:
assert fsm.peer_expiry > 0
if int(time.time() * 1000) > fsm.peer_expiry:
fsm.state = STATE_ACTIVE
else:
raise BStarException()
else:
print msg
fsm.state = state
def main():
parser = ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-p", "--primary", action="store_true", default=False)
group.add_argument("-b", "--backup", action="store_true", default=False)
args = parser.parse_args()
ctx = zmq.Context()
statepub = ctx.socket(zmq.PUB)
statesub = ctx.socket(zmq.SUB)
statesub.setsockopt(zmq.SUBSCRIBE, "")
frontend = ctx.socket(zmq.ROUTER)
fsm = BStarState(0, 0, 0)
if args.primary:
print "I: Primary master, waiting for backup (slave)"
frontend.bind("tcp://*:5001")
statepub.bind("tcp://*:5003")
statesub.connect("tcp://localhost:5004")
fsm.state = STATE_PRIMARY
elif args.backup:
print "I: Backup slave, waiting for primary (master)"
frontend.bind("tcp://*:5002")
statepub.bind("tcp://*:5004")
statesub.connect("tcp://localhost:5003")
statesub.setsockopt(zmq.SUBSCRIBE, "")
fsm.state = STATE_BACKUP
send_state_at = int(time.time() * 1000 + HEARTBEAT)
poller = zmq.Poller()
poller.register(frontend, zmq.POLLIN)
poller.register(statesub, zmq.POLLIN)
while True:
time_left = send_state_at - int(time.time() * 1000)
if time_left < 0:
time_left = 0
socks = dict(poller.poll(time_left))
if socks.get(frontend) == zmq.POLLIN:
msg = frontend.recv_multipart()
fsm.event = CLIENT_REQUEST
try:
run_fsm(fsm)
frontend.send_multipart(msg)
except BStarException:
del msg
if socks.get(statesub) == zmq.POLLIN:
msg = statesub.recv()
fsm.event = int(msg)
del msg
try:
run_fsm(fsm)
fsm.peer_expiry = int(time.time() * 1000) + (2 * HEARTBEAT)
except BStarException:
break
if int(time.time() * 1000) >= send_state_at:
statepub.send("%d" % fsm.state)
send_state_at = int(time.time() * 1000) + HEARTBEAT
if __name__ == '__main__':
main()
| bsd-2-clause |
virgree/odoo | addons/base_gengo/__init__.py | 377 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
grnet/synnefo | snf-admin-app/synnefo_admin/admin/resources/networks/filters.py | 2 | 2559 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from synnefo.db.models import Network
import django_filters
from synnefo_admin.admin.queries_common import (process_queries, model_filter,
get_model_field)
@model_filter
def filter_network(queryset, queries):
q = process_queries("network", queries)
return queryset.filter(q)
@model_filter
def filter_user(queryset, queries):
q = process_queries("user", queries)
ids = get_model_field("user", q, 'uuid')
return queryset.filter(userid__in=ids)
@model_filter
def filter_vm(queryset, queries):
q = process_queries("vm", queries)
ids = get_model_field("vm", q, 'id')
return queryset.filter(machines__id__in=ids)
@model_filter
def filter_ip(queryset, queries):
q = process_queries("ip", queries)
ids = get_model_field("ip", q, 'nic__network__id')
return queryset.filter(id__in=ids)
@model_filter
def filter_project(queryset, queries):
q = process_queries("project", queries)
ids = get_model_field("project", q, 'uuid')
return queryset.filter(project__in=ids)
class NetworkFilterSet(django_filters.FilterSet):
"""A collection of filters for networks.
This filter collection is based on django-filter's FilterSet.
"""
net = django_filters.CharFilter(label='Network', action=filter_network)
user = django_filters.CharFilter(label='OF User', action=filter_user)
vm = django_filters.CharFilter(label='HAS VM', action=filter_vm)
ip = django_filters.CharFilter(label='HAS IP', action=filter_ip)
proj = django_filters.CharFilter(label='OF Project', action=filter_project)
state = django_filters.MultipleChoiceFilter(
label='Status', name='state', choices=Network.OPER_STATES)
class Meta:
model = Network
fields = ('net', 'state', 'public', 'drained', 'user', 'vm', 'ip',
'proj')
| gpl-3.0 |
cesarmarinhorj/ansible | test/units/template/test_templar.py | 23 | 4193 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible import constants as C
from ansible.errors import *
from ansible.plugins import filter_loader, lookup_loader, module_loader
from ansible.plugins.strategies import SharedPluginLoaderObj
from ansible.template import Templar
from units.mock.loader import DictDataLoader
class TestTemplar(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_templar_simple(self):
fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
shared_loader = SharedPluginLoaderObj()
variables = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
)
templar = Templar(loader=fake_loader, variables=variables)
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("foo", convert_bare=True), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
#FIXME: lookup ignores fake file and returns error
#self.assertEqual(templar.template("{{lookup('file', '/path/to/my_file.txt')}}"), "foo")
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
| gpl-3.0 |
chromium2014/src | chrome/common/extensions/docs/server2/timer.py | 122 | 1702 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
class Timer(object):
'''A simple timer which starts when constructed and stops when Stop is called.
'''
def __init__(self):
self._start = time.time()
self._elapsed = None
def Stop(self):
'''Stops the timer. Must only be called once. Returns |self|.
'''
assert self._elapsed is None
self._elapsed = time.time() - self._start
return self
def With(self, other):
'''Returns a new stopped Timer with this Timer's elapsed time + |other|'s.
Both Timers must already be stopped.
'''
assert self._elapsed is not None
assert other._elapsed is not None
self_and_other = Timer()
self_and_other._start = min(self._start, other._start)
self_and_other._elapsed = self._elapsed + other._elapsed
return self_and_other
def FormatElapsed(self):
'''Returns the elapsed time as a string in a pretty format; as a whole
number in either seconds or milliseconds depending on which is more
appropriate. Must already be Stopped.
'''
assert self._elapsed is not None
elapsed = self._elapsed
if elapsed < 1:
elapsed = int(elapsed * 1000)
unit = 'ms'
else:
elapsed = int(elapsed)
unit = 'second' if elapsed == 1 else 'seconds'
return '%s %s' % (elapsed, unit)
def TimerClosure(closure, *args, **optargs):
'''A shorthand for timing a single function call. Returns a tuple of
(closure return value, timer).
'''
timer = Timer()
try:
return closure(*args, **optargs), timer
finally:
timer.Stop()
| bsd-3-clause |
yunqing/AR | preprocess.py | 1 | 9393 | import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
from nltk.stem.lancaster import LancasterStemmer
from gensim import corpora, models, similarities
import logging
# data
articles = [line.strip() for line in file('raw-data.txt')]
articles_title = [article.split('"')[1] for article in articles]
articles_abstract = [article.split('"')[3] for article in articles]
user_info_train = [line.strip().split(',') for line in file('user-info-train.txt')]
user_info_test = [line.strip().split(',') for line in file('user-info-test.txt')]
# user info train
train = {}
for i in range(1, 5552):
train[str(i)] = []
# a gap blank line must be above this line
for array in user_info_train:
train[array[0]].append(int(array[1]))
# a gap blank line must be above this line
# user info test
test = {}
for i in range(1, 5552):
test[str(i)] = []
# a gap blank line must be above this line
for array in user_info_test:
test[array[0]].append(int(array[1]))
# preprocess to get stemmed abstracts
rule_brace = re.compile(r'[\{\}]')
rule = re.compile(r'[^\sa-zA-Z]')
# remove '{' and '}'
temp_articles_abstract = [rule_brace.sub('', abstract) for abstract in articles_abstract]
# replace non-english characters(such as digit, punctuation, and other symbols) with space
temp_articles_abstract = [rule.sub(' ', abstract) for abstract in temp_articles_abstract]
# tokenize and convert to lower case
articles_abstract_tokenized = [[word.lower() for word in word_tokenize(abstract)] for abstract in temp_articles_abstract]
english_stopwords = stopwords.words('english')
# remove stopwords
articles_abstract_tokenized = [[word for word in abstract if not word in english_stopwords] for abstract in articles_abstract_tokenized]
# stem
st = LancasterStemmer()
abstracts_stemmed = [[st.stem(word) for word in abstract] for abstract in articles_abstract_tokenized]
abstracts = {}
for i in range(0, len(abstracts_stemmed)):
abstracts[str(i+1)] = abstracts_stemmed[i]
# tfidf
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dictionary = corpora.Dictionary(abstracts_stemmed)
corpus = [dictionary.doc2bow(text) for text in abstracts_stemmed]
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
# lsi
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=100)
# lsi index all
corpus_lsi = lsi[corpus]
index_lsi = similarities.MatrixSimilarity(lsi[corpus], num_features=100)
# lda
lda = models.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=200)
def recommend(userid, train, test, abstracts, dictionary, lsi):
# for each user
print userid
abstracts_user = []
for docid in train[str(userid)]:
# for each doc the user likes
abstracts_user.append(abstracts[str(docid)])
temp_corpus = [dictionary.doc2bow(text) for text in abstracts_user]
temp_corpus_lsi = lsi[temp_corpus]
# index
temp_index_lsi = similarities.MatrixSimilarity(temp_corpus_lsi)
similarity = []
for docid in test[str(userid)]:
query_abstract = abstracts[str(docid)]
query_bow = dictionary.doc2bow(query_abstract)
# lsi query
query_lsi = lsi[query_bow]
sims_lsi = temp_index_lsi[query_lsi]
sort_sims_lsi = sorted(enumerate(sims_lsi), key=lambda item: -item[1])
# print sort_sims_lsi[0][1]
similarity.append((docid, sort_sims_lsi[0][1], train[str(userid)][sort_sims_lsi[0][0]]))
# print temp_score
similarity.sort(key=lambda x:x[1], reverse=True)
# print similarity[0:5]
recom_list[str(userid)] = similarity[0:5]
recom_list = {}
for userid in range(1, 2):
recommend(userid, train, test, abstracts, dictionary, lsi)
recom_result = {}
for userid in range(1, 2):
recom_result[str(userid)] = [tuple[0] for tuple in recom_list[str(userid)]]
return recom_result
recom_list = {}
similarity = []
def recommend(train, test, abstracts, dictionary, lsi):
for userid in range(1, 2):
# for each user
print userid
abstracts_user = []
for docid in train[str(userid)]:
# for each doc the user likes
abstracts_user.append(abstracts[str(docid)])
temp_corpus = [dictionary.doc2bow(text) for text in abstracts_user]
temp_corpus_lsi = lsi[temp_corpus]
# index
temp_index_lsi = similarities.MatrixSimilarity(temp_corpus_lsi)
similarity = []
for docid in test[str(userid)]:
query_abstract = abstracts[str(docid)]
query_bow = dictionary.doc2bow(query_abstract)
# lsi query
query_lsi = lsi[query_bow]
sims_lsi = temp_index_lsi[query_lsi]
sort_sims_lsi = sorted(enumerate(sims_lsi), key=lambda item: -item[1])
# print sort_sims_lsi[0][1]
similarity.append((docid, sort_sims_lsi[0][1], train[str(userid)][sort_sims_lsi[0][0]]))
# print temp_score
similarity.sort(key=lambda x:x[1], reverse=True)
# print similarity[0:5]
recom_list[str(userid)] = similarity[0:5]
recom_result = {}
for userid in range(1, 2):
recom_result[str(userid)] = [tuple[0] for tuple in recom_list[str(userid)]]
return recom_result
# recommend lsi
recom_list = {}
for userid in range(1, 2):
# for each user
print userid
abstracts_user = []
for docid in train[str(userid)]:
# for each doc the user likes
abstracts_user.append(abstracts[str(docid)])
temp_corpus = [dictionary.doc2bow(text) for text in abstracts_user]
temp_corpus_lsi = lsi[temp_corpus]
# index
temp_index_lsi = similarities.MatrixSimilarity(temp_corpus_lsi)
similarity = []
for docid in test[str(userid)]:
query_abstract = abstracts[str(docid)]
query_bow = dictionary.doc2bow(query_abstract)
# lsi query
query_lsi = lsi[query_bow]
sims_lsi = temp_index_lsi[query_lsi]
sort_sims_lsi = sorted(enumerate(sims_lsi), key=lambda item: -item[1])
# print sort_sims_lsi[0][1]
similarity.append((docid, sort_sims_lsi[0][1], train[str(userid)][sort_sims_lsi[0][0]]))
# print temp_score
similarity.sort(key=lambda x:x[1], reverse=True)
# print similarity[0:5]
recom_list[str(userid)] = similarity[0:5]
# recommend lda
recom_list = {}
for userid in range(1, 2):
# for each user
print userid
abstracts_user = []
for docid in train[str(userid)]:
# for each doc the user likes
abstracts_user.append(abstracts[str(docid)])
temp_corpus = [dictionary.doc2bow(text) for text in abstracts_user]
temp_corpus_lsi = lda[temp_corpus]
# index
temp_index_lsi = similarities.MatrixSimilarity(temp_corpus_lsi, num_features=100)
similarity = []
for docid in test[str(userid)]:
query_abstract = abstracts[str(docid)]
query_bow = dictionary.doc2bow(query_abstract)
# lsi query
query_lsi = lda[query_bow]
print docid, ": ", query_lsi
sims_lsi = temp_index_lsi[query_lsi]
sort_sims_lsi = sorted(enumerate(sims_lsi), key=lambda item: -item[1])
# print sort_sims_lsi[0][1]
similarity.append((docid, sort_sims_lsi[0][1], train[str(userid)][sort_sims_lsi[0][0]]))
# print temp_score
similarity.sort(key=lambda x:x[1], reverse=True)
# print similarity[0:5]
recom_list[str(userid)] = similarity[0:5]
recom_result = {}
for userid in range(1, 2):
recom_result[str(userid)] = [tuple[0] for tuple in recom_list[str(userid)]]
# segment train data to 5 blocks
import random
import copy
train_blocks = {}
for userid in range(1,5552):
train_blocks[str(userid)] = {}
for blockid in range(0,5):
train_blocks[str(userid)][str(blockid)] = []
for userid in range(1,5552):
# userid = 1
num = len(train[str(userid)])
block_size = int(round(float(num)/5))
temp_train_list = copy.deepcopy(train[str(userid)])
random.shuffle(temp_train_list)
for i in range(0, num):
# for each doc the user likes
if i / block_size is 4:
for j in range(i, num):
train_blocks[str(userid)]['4'].append(temp_train_list[j])
break
train_blocks[str(userid)][str(i / block_size)].append(temp_train_list[i])
def recommend_single_user(train, test, abstracts, dictionary, lsi):
# for each user
abstracts_user = []
for docid in train:
# for each doc the user likes
abstracts_user.append(abstracts[str(docid)])
temp_corpus = [dictionary.doc2bow(text) for text in abstracts_user]
temp_corpus_lsi = lsi[temp_corpus]
# index
temp_index_lsi = similarities.MatrixSimilarity(temp_corpus_lsi)
temp_similarity = []
for docid in test:
temp_query_abstract = abstracts[str(docid)]
temp_query_bow = dictionary.doc2bow(temp_query_abstract)
# lsi query
temp_query_lsi = lsi[temp_query_bow]
temp_sims_lsi = temp_index_lsi[temp_query_lsi]
temp_sort_sims_lsi = sorted(enumerate(temp_sims_lsi), key=lambda item: -item[1])
# print sort_sims_lsi[0][1]
temp_similarity.append((docid, temp_sort_sims_lsi[0][1], train[temp_sort_sims_lsi[0][0]]))
# print temp_score
temp_similarity.sort(key=lambda x:x[1], reverse=True)
# print similarity[0:5]
return temp_similarity
import math
def mean_squared_error(temp_similarity):
temp = 0
for tuple in temp_similarity:
temp += (1 - tuple[1]) * (1 - tuple[1])
return math.sqrt(float(temp)/len(temp_similarity))
def average_error(temp_similarity):
temp = 0
for tuple in temp_similarity:
temp += (1 - tuple[1])
return (float(temp) / len(temp_similarity))
train_cv = []
test_cv = []
block_omit = 4
userid = 4
for i in range(0, 5):
# for each train block
if i is not block_omit:
for docid in train_blocks[str(userid)][str(i)]:
train_cv.append(docid)
else:
for docid in train_blocks[str(userid)][str(i)]:
test_cv.append(docid)
temp_similarity = recommend_single_user(train_cv, test_cv, abstracts, dictionary, lsi)
| mit |
sridevikoushik31/openstack | nova/api/openstack/compute/contrib/flavor_disabled.py | 39 | 3161 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Disabled API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
authorize = extensions.soft_extension_authorizer('compute', 'flavor_disabled')
class FlavorDisabledController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:disabled" % Flavor_disabled.alias
flavor[key] = db_flavor['disabled']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
resp_obj.attach(xml=FlavorDisabledTemplate())
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
resp_obj.attach(xml=FlavorsDisabledTemplate())
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_disabled(extensions.ExtensionDescriptor):
"""Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = "OS-FLV-DISABLED"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_disabled/api/v1.1")
updated = "2012-08-29T00:00:00+00:00"
def get_controller_extensions(self):
controller = FlavorDisabledController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def make_flavor(elem):
elem.set('{%s}disabled' % Flavor_disabled.namespace,
'%s:disabled' % Flavor_disabled.alias)
class FlavorDisabledTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavor', selector='flavor')
make_flavor(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Flavor_disabled.alias: Flavor_disabled.namespace})
class FlavorsDisabledTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavors')
elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors')
make_flavor(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Flavor_disabled.alias: Flavor_disabled.namespace})
| apache-2.0 |
GheRivero/ansible | lib/ansible/modules/network/aci/aci_aep_to_domain.py | 26 | 8771 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_aep_to_domain
short_description: Bind AEPs to Physical or Virtual Domains (infra:RsDomP)
description:
- Bind AEPs to Physical or Virtual Domains on Cisco ACI fabrics.
notes:
- The C(aep) and C(domain) parameters should exist before using this module.
The M(aci_aep) and M(aci_domain) can be used for these.
- More information about the internal APIC class B(infra:RsDomP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.5'
options:
aep:
description:
- The name of the Attachable Access Entity Profile.
aliases: [ aep_name ]
domain:
description:
- Name of the physical or virtual domain being associated with the AEP.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ fc, l2dom, l3dom, phys, vmm ]
aliases: [ type ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add AEP to domain binding
aci_aep_to_domain: &binding_present
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: present
- name: Remove AEP to domain binding
aci_aep_to_domain: &binding_absent
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: absent
- name: Query our AEP to domain binding
aci_aep_to_domain:
host: apic
username: admin
password: SomeSecretPassword
aep: test_aep
domain: phys_dom
domain_type: phys
state: query
- name: Query all AEP to domain bindings
aci_aep_to_domain: &binding_query
host: apic
username: admin
password: SomeSecretPassword
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
aep=dict(type='str', aliases=['aep_name']), # Not required for querying all objects
domain=dict(type='str', aliases=['domain_name', 'domain_profile']), # Not required for querying all objects
domain_type=dict(type='str', choices=['fc', 'l2dom', 'l3dom', 'phys', 'vmm'], aliases=['type']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
vm_provider=dict(type='str', choices=['cloudfoundry', 'kubernetes', 'microsoft', 'openshift', 'openstack', 'redhat', 'vmware']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['aep', 'domain', 'domain_type']],
['state', 'present', ['aep', 'domain', 'domain_type']],
],
required_together=[
['domain', 'domain_type'],
],
)
aep = module.params['aep']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
state = module.params['state']
# Report when vm_provider is set when type is not virtual
if domain_type != 'vmm' and vm_provider is not None:
module.fail_json(msg="Domain type '{0}' cannot have a 'vm_provider'".format(domain_type))
# Compile the full domain for URL building
if domain_type == 'fc':
domain_mo = 'uni/fc-{0}'.format(domain)
elif domain_type == 'l2dom':
domain_mo = 'uni/l2dom-{0}'.format(domain)
elif domain_type == 'l3dom':
domain_mo = 'uni/l3dom-{0}'.format(domain)
elif domain_type == 'phys':
domain_mo = 'uni/phys-{0}'.format(domain)
elif domain_type == 'vmm':
domain_mo = 'uni/vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
else:
domain_mo = None
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAttEntityP',
aci_rn='infra/attentp-{0}'.format(aep),
filter_target='eq(infraAttEntityP.name, "{0}")'.format(aep),
module_object=aep,
),
subclass_1=dict(
aci_class='infraRsDomP',
aci_rn='rsdomP-[{0}]'.format(domain_mo),
filter_target='eq(infraRsDomP.tDn, "{0}")'.format(domain_mo),
module_object=domain_mo,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraRsDomP',
class_config=dict(tDn=domain_mo),
)
aci.get_diff(aci_class='infraRsDomP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
ghdk/networkx | networkx/algorithms/cycles.py | 30 | 16789 | """
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import networkx as nx
from networkx.utils import *
from networkx.algorithms.traversal.edgedfs import helper_funcs, edge_dfs
__all__ = [
'cycle_basis','simple_cycles','recursive_simple_cycles', 'find_cycle'
]
__author__ = "\n".join(['Jon Olav Vik <jonovik@gmail.com>',
'Dan Schult <dschult@colgate.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cycle_basis(G,root=None):
""" Returns a list of cycles which form a basis for cycles of G.
A basis for cycles of a network is a minimal collection of
cycles such that any cycle in the network can be written
as a sum of cycles in the basis. Here summation of cycles
is defined as "exclusive or" of the edges. Cycle bases are
useful, e.g. when deriving equations for electric circuits
using Kirchhoff's Laws.
Parameters
----------
G : NetworkX Graph
root : node, optional
Specify starting node for basis.
Returns
-------
A list of cycle lists. Each cycle list is a list of nodes
which forms a cycle (loop) in G.
Examples
--------
>>> G=nx.Graph()
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([0,3,4,5])
>>> print(nx.cycle_basis(G,0))
[[3, 4, 5, 0], [1, 2, 3, 0]]
Notes
-----
This is adapted from algorithm CACM 491 [1]_.
References
----------
.. [1] Paton, K. An algorithm for finding a fundamental set of
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
See Also
--------
simple_cycles
"""
gnodes=set(G.nodes())
cycles=[]
while gnodes: # loop over connected components
if root is None:
root=gnodes.pop()
stack=[root]
pred={root:root}
used={root:set()}
while stack: # walk the spanning tree finding cycles
z=stack.pop() # use last-in so cycles easier to find
zused=used[z]
for nbr in G[z]:
if nbr not in used: # new node
pred[nbr]=z
stack.append(nbr)
used[nbr]=set([z])
elif nbr == z: # self loops
cycles.append([z])
elif nbr not in zused:# found a cycle
pn=used[nbr]
cycle=[nbr,z]
p=pred[z]
while p not in pn:
cycle.append(p)
p=pred[p]
cycle.append(p)
cycles.append(cycle)
used[nbr].add(z)
gnodes-=set(pred)
root=None
return cycles
@not_implemented_for('undirected')
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This is a nonrecursive, iterator/generator version of Johnson's
algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
cycle_generator: generator
A generator that produces elementary cycles of the graph. Each cycle is
a list of nodes with the first and last nodes being the same.
Examples
--------
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> len(list(nx.simple_cycles(G)))
5
To filter the cycles so that they don't include certain nodes or edges,
copy your graph and eliminate those nodes or edges before calling
>>> copyG = G.copy()
>>> copyG.remove_nodes_from([1])
>>> copyG.remove_edges_from([(0, 1)])
>>> len(list(nx.simple_cycles(copyG)))
3
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c`
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
.. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.
G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
.. [3] A search strategy for the elementary cycles of a directed graph.
J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
v. 16, no. 2, 192-204, 1976.
See Also
--------
cycle_basis
"""
def _unblock(thisnode,blocked,B):
stack=set([thisnode])
while stack:
node=stack.pop()
if node in blocked:
blocked.remove(node)
stack.update(B[node])
B[node].clear()
# Johnson's algorithm requires some ordering of the nodes.
# We assign the arbitrary ordering given by the strongly connected comps
# There is no need to track the ordering as each node removed as processed.
subG = type(G)(G.edges_iter()) # save the actual graph so we can mutate it here
# We only take the edges because we do not want to
# copy edge and node attributes here.
sccs = list(nx.strongly_connected_components(subG))
while sccs:
scc=sccs.pop()
# order of scc determines ordering of nodes
startnode = scc.pop()
# Processing node runs "circuit" routine from recursive version
path=[startnode]
blocked = set() # vertex: blocked from search?
closed = set() # nodes involved in a cycle
blocked.add(startnode)
B=defaultdict(set) # graph portions that yield no elementary circuit
stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs
while stack:
thisnode,nbrs = stack[-1]
if nbrs:
nextnode = nbrs.pop()
# print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode
# f=raw_input("pause")
if nextnode == startnode:
yield path[:]
closed.update(path)
# print "Found a cycle",path,closed
elif nextnode not in blocked:
path.append(nextnode)
stack.append( (nextnode,list(subG[nextnode])) )
closed.discard(nextnode)
blocked.add(nextnode)
continue
# done with nextnode... look for more neighbors
if not nbrs: # no more nbrs
if thisnode in closed:
_unblock(thisnode,blocked,B)
else:
for nbr in subG[thisnode]:
if thisnode not in B[nbr]:
B[nbr].add(thisnode)
stack.pop()
# assert path[-1]==thisnode
path.pop()
# done processing this node
subG.remove_node(startnode)
H=subG.subgraph(scc) # make smaller to avoid work in SCC routine
sccs.extend(list(nx.strongly_connected_components(H)))
@not_implemented_for('undirected')
def recursive_simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
A simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This version uses a recursive algorithm to build a list of cycles.
You should probably use the iterator version caled simple_cycles().
Warning: This recursive version uses lots of RAM!
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.recursive_simple_cycles(G)
[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is `O((n+e)(c+1))` for `n` nodes, `e` edges and `c`
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
simple_cycles, cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path[:])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result
def find_cycle(G, source=None, orientation='original'):
"""
Returns the edges of a cycle found via a directed, depth-first traversal.
Parameters
----------
G : graph
A directed/undirected graph/multigraph.
source : node, list of nodes
The node from which the traversal begins. If ``None``, then a source
is chosen arbitrarily and repeatedly until all edges from each node in
the graph are searched.
orientation : 'original' | 'reverse' | 'ignore'
For directed graphs and directed multigraphs, edge traversals need not
respect the original orientation of the edges. When set to 'reverse',
then every edge will be traversed in the reverse direction. When set to
'ignore', then each directed edge is treated as a single undirected
edge that can be traversed in either direction. For undirected graphs
and undirected multigraphs, this parameter is meaningless and is not
consulted by the algorithm.
Returns
-------
edges : directed edges
A list of directed edges indicating the path taken for the loop. If
no cycle is found, then ``edges`` will be an empty list. For graphs, an
edge is of the form (u, v) where ``u`` and ``v`` are the tail and head
of the edge as determined by the traversal. For multigraphs, an edge is
of the form (u, v, key), where ``key`` is the key of the edge. When the
graph is directed, then ``u`` and ``v`` are always in the order of the
actual directed edge. If orientation is 'ignore', then an edge takes
the form (u, v, key, direction) where direction indicates if the edge
was followed in the forward (tail to head) or reverse (head to tail)
direction. When the direction is forward, the value of ``direction``
is 'forward'. When the direction is reverse, the value of ``direction``
is 'reverse'.
Examples
--------
In this example, we construct a DAG and find, in the first call, that there
are no directed cycles, and so an exception is raised. In the second call,
we ignore edge orientations and find that there is an undirected cycle.
Note that the second call finds a directed cycle while effectively
traversing an undirected graph, and so, we found an "undirected cycle".
This means that this DAG structure does not form a directed tree (which
is also known as a polytree).
>>> import networkx as nx
>>> G = nx.DiGraph([(0,1), (0,2), (1,2)])
>>> try:
... find_cycle(G, orientation='original')
... except:
... pass
...
>>> list(find_cycle(G, orientation='ignore'))
[(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')]
"""
out_edge, key, tailhead = helper_funcs(G, orientation)
explored = set()
cycle = []
final_node = None
for start_node in G.nbunch_iter(source):
if start_node in explored:
# No loop is possible.
continue
edges = []
# All nodes seen in this iteration of edge_dfs
seen = {start_node}
# Nodes in active path.
active_nodes = {start_node}
previous_node = None
for edge in edge_dfs(G, start_node, orientation):
# Determine if this edge is a continuation of the active path.
tail, head = tailhead(edge)
if previous_node is not None and tail != previous_node:
# This edge results from backtracking.
# Pop until we get a node whose head equals the current tail.
# So for example, we might have:
# (0,1), (1,2), (2,3), (1,4)
# which must become:
# (0,1), (1,4)
while True:
try:
popped_edge = edges.pop()
except IndexError:
edges = []
active_nodes = {tail}
break
else:
popped_head = tailhead(popped_edge)[1]
active_nodes.remove(popped_head)
if edges:
last_head = tailhead(edges[-1])[1]
if tail == last_head:
break
edges.append(edge)
if head in active_nodes:
# We have a loop!
cycle.extend(edges)
final_node = head
break
elif head in explored:
# Then we've already explored it. No loop is possible.
break
else:
seen.add(head)
active_nodes.add(head)
previous_node = head
if cycle:
break
else:
explored.update(seen)
else:
assert(len(cycle) == 0)
raise nx.exception.NetworkXNoCycle('No cycle found.')
# We now have a list of edges which ends on a cycle.
# So we need to remove from the beginning edges that are not relevant.
for i, edge in enumerate(cycle):
tail, head = tailhead(edge)
if tail == final_node:
break
return cycle[i:]
| bsd-3-clause |
Vixionar/django | django/core/management/commands/createcachetable.py | 342 | 4389 | from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help='Does not create the table, just prints the SQL that would '
'be run.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
dry_run = options.get('dry_run')
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| bsd-3-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py2/libpasteurize/fixes/fix_raise.py | 71 | 1099 | u"""Fixer for 'raise E(V).with_traceback(T)' -> 'raise E, V, T'"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Comma, Node, Leaf, token, syms
class FixRaise(fixer_base.BaseFix):
PATTERN = u"""
raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >]
[trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >"""
def transform(self, node, results):
name, val, trc = (results.get(u"name"), results.get(u"val"), results.get(u"trc"))
chain = results.get(u"chain")
if chain is not None:
self.warning(node, u"explicit exception chaining is not supported in Python 2")
chain.prev_sibling.remove()
chain.remove()
if trc is not None:
val = val[0] if val else Leaf(token.NAME, u"None")
val.prefix = trc.prefix = u" "
kids = [Leaf(token.NAME, u"raise"), name.clone(), Comma(),
val.clone(), Comma(), trc.clone()]
raise_stmt = Node(syms.raise_stmt, kids)
node.replace(raise_stmt)
| isc |
Zac-HD/home-assistant | homeassistant/components/switch/acer_projector.py | 18 | 5066 | """
Use serial protocol of Acer projector to obtain state of the projector.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/switch.acer_projector/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_ON, STATE_OFF, STATE_UNKNOWN, CONF_NAME, CONF_FILENAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyserial==3.1.1']
_LOGGER = logging.getLogger(__name__)
CONF_TIMEOUT = 'timeout'
CONF_WRITE_TIMEOUT = 'write_timeout'
DEFAULT_NAME = 'Acer Projector'
DEFAULT_TIMEOUT = 1
DEFAULT_WRITE_TIMEOUT = 1
ECO_MODE = 'ECO Mode'
ICON = 'mdi:projector'
INPUT_SOURCE = 'Input Source'
LAMP = 'Lamp'
LAMP_HOURS = 'Lamp Hours'
MODEL = 'Model'
# Commands known to the projector
CMD_DICT = {LAMP: '* 0 Lamp ?\r',
LAMP_HOURS: '* 0 Lamp\r',
INPUT_SOURCE: '* 0 Src ?\r',
ECO_MODE: '* 0 IR 052\r',
MODEL: '* 0 IR 035\r',
STATE_ON: '* 0 IR 001\r',
STATE_OFF: '* 0 IR 002\r'}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_FILENAME): cv.isdevice,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_WRITE_TIMEOUT, default=DEFAULT_WRITE_TIMEOUT):
cv.positive_int,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Connect with serial port and return Acer Projector."""
serial_port = config.get(CONF_FILENAME)
name = config.get(CONF_NAME)
timeout = config.get(CONF_TIMEOUT)
write_timeout = config.get(CONF_WRITE_TIMEOUT)
add_devices([AcerSwitch(serial_port, name, timeout, write_timeout)])
class AcerSwitch(SwitchDevice):
"""Represents an Acer Projector as an switch."""
def __init__(self, serial_port, name, timeout, write_timeout, **kwargs):
"""Init of the Acer projector."""
import serial
self.ser = serial.Serial(
port=serial_port, timeout=timeout, write_timeout=write_timeout,
**kwargs)
self._serial_port = serial_port
self._name = name
self._state = False
self._available = False
self._attributes = {
LAMP_HOURS: STATE_UNKNOWN,
INPUT_SOURCE: STATE_UNKNOWN,
ECO_MODE: STATE_UNKNOWN,
}
self.update()
def _write_read(self, msg):
"""Write to the projector and read the return."""
import serial
ret = ""
# Sometimes the projector won't answer for no reason or the projector
# was disconnected during runtime.
# This way the projector can be reconnected and will still work
try:
if not self.ser.is_open:
self.ser.open()
msg = msg.encode('utf-8')
self.ser.write(msg)
# Size is an experience value there is no real limit.
# AFAIK there is no limit and no end character so we will usually
# need to wait for timeout
ret = self.ser.read_until(size=20).decode('utf-8')
except serial.SerialException:
_LOGGER.error('Problem comunicating with %s', self._serial_port)
self.ser.close()
return ret
def _write_read_format(self, msg):
"""Write msg, obtain awnser and format output."""
# awnsers are formated as ***\rawnser\r***
awns = self._write_read(msg)
match = re.search(r'\r(.+)\r', awns)
if match:
return match.group(1)
return STATE_UNKNOWN
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def name(self):
"""Return name of the projector."""
return self._name
@property
def is_on(self):
"""Return if the projector is turned on."""
return self._state
@property
def state_attributes(self):
"""Return state attributes."""
return self._attributes
def update(self):
"""Get the latest state from the projector."""
msg = CMD_DICT[LAMP]
awns = self._write_read_format(msg)
if awns == 'Lamp 1':
self._state = True
self._available = True
elif awns == 'Lamp 0':
self._state = False
self._available = True
else:
self._available = False
for key in self._attributes:
msg = CMD_DICT.get(key, None)
if msg:
awns = self._write_read_format(msg)
self._attributes[key] = awns
def turn_on(self):
"""Turn the projector on."""
msg = CMD_DICT[STATE_ON]
self._write_read(msg)
self._state = STATE_ON
def turn_off(self):
"""Turn the projector off."""
msg = CMD_DICT[STATE_OFF]
self._write_read(msg)
self._state = STATE_OFF
| apache-2.0 |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/ttLib/tables/otConverters.py | 1 | 54321 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl, floatToFixed as fl2fi, ensureVersionIsLong as fi2ve,
versionToFixed as ve2fi)
from fontTools.misc.textTools import pad, safeEval
from fontTools.ttLib import getSearchRange
from .otBase import (CountReference, FormatSwitchingBaseTable,
OTTableReader, OTTableWriter, ValueRecordFactory)
from .otTables import (lookupTypes, AATStateTable, AATState, AATAction,
ContextualMorphAction, LigatureMorphAction,
MorxSubtable)
from functools import partial
import struct
import logging
log = logging.getLogger(__name__)
istuple = lambda t: isinstance(t, tuple)
def buildConverters(tableSpec, tableNamespace):
"""Given a table spec from otData.py, build a converter object for each
field of the table. This is called for each table in otData.py, and
the results are assigned to the corresponding class in otTables.py."""
converters = []
convertersByName = {}
for tp, name, repeat, aux, descr in tableSpec:
tableName = name
if name.startswith("ValueFormat"):
assert tp == "uint16"
converterClass = ValueFormat
elif name.endswith("Count") or name in ("StructLength", "MorphType"):
converterClass = {
"uint8": ComputedUInt8,
"uint16": ComputedUShort,
"uint32": ComputedULong,
}[tp]
elif name == "SubTable":
converterClass = SubTable
elif name == "ExtSubTable":
converterClass = ExtSubTable
elif name == "SubStruct":
converterClass = SubStruct
elif name == "FeatureParams":
converterClass = FeatureParams
elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
converterClass = StructWithLength
else:
if not tp in converterMapping and '(' not in tp:
tableName = tp
converterClass = Struct
else:
converterClass = eval(tp, tableNamespace, converterMapping)
if tp in ('MortChain', 'MortSubtable', 'MorxChain'):
tableClass = tableNamespace.get(tp)
else:
tableClass = tableNamespace.get(tableName)
if tableClass is not None:
conv = converterClass(name, repeat, aux, tableClass=tableClass)
else:
conv = converterClass(name, repeat, aux)
if name in ["SubTable", "ExtSubTable", "SubStruct"]:
conv.lookupTypes = tableNamespace['lookupTypes']
# also create reverse mapping
for t in conv.lookupTypes.values():
for cls in t.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
if name == "FeatureParams":
conv.featureParamTypes = tableNamespace['featureParamTypes']
conv.defaultFeatureParams = tableNamespace['FeatureParams']
for cls in conv.featureParamTypes.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
converters.append(conv)
assert name not in convertersByName, name
convertersByName[name] = conv
return converters, convertersByName
class _MissingItem(tuple):
__slots__ = ()
try:
from collections import UserList
except ImportError:
from UserList import UserList
class _LazyList(UserList):
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def __getitem__(self, k):
if isinstance(k, slice):
indices = range(*k.indices(len(self)))
return [self[i] for i in indices]
item = self.data[k]
if isinstance(item, _MissingItem):
self.reader.seek(self.pos + item[0] * self.recordSize)
item = self.conv.read(self.reader, self.font, {})
self.data[k] = item
return item
def __add__(self, other):
if isinstance(other, _LazyList):
other = list(other)
elif isinstance(other, list):
pass
else:
return NotImplemented
return list(self) + other
def __radd__(self, other):
if not isinstance(other, list):
return NotImplemented
return other + list(self)
class BaseConverter(object):
"""Base class for converter objects. Apart from the constructor, this
is an abstract class."""
def __init__(self, name, repeat, aux, tableClass=None):
self.name = name
self.repeat = repeat
self.aux = aux
self.tableClass = tableClass
self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize']
self.isLookupType = name.endswith("LookupType") or name == "MorphType"
self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "VarRegionCount", "MappingCount", "RegionAxisCount", 'DesignAxisCount', 'DesignAxisRecordSize', 'AxisValueCount', 'ValueRecordSize']
def readArray(self, reader, font, tableDict, count):
"""Read an array of values from the reader."""
lazy = font.lazy and count > 8
if lazy:
recordSize = self.getRecordSize(reader)
if recordSize is NotImplemented:
lazy = False
if not lazy:
l = []
for i in range(count):
l.append(self.read(reader, font, tableDict))
return l
else:
l = _LazyList()
l.reader = reader.copy()
l.pos = l.reader.pos
l.font = font
l.conv = self
l.recordSize = recordSize
l.extend(_MissingItem([i]) for i in range(count))
reader.advance(count * recordSize)
return l
def getRecordSize(self, reader):
if hasattr(self, 'staticSize'): return self.staticSize
return NotImplemented
def read(self, reader, font, tableDict):
"""Read a value from the reader."""
raise NotImplementedError(self)
def writeArray(self, writer, font, tableDict, values):
for i, value in enumerate(values):
self.write(writer, font, tableDict, value, i)
def write(self, writer, font, tableDict, value, repeatIndex=None):
"""Write a value to the writer."""
raise NotImplementedError(self)
def xmlRead(self, attrs, content, font):
"""Read a value from XML."""
raise NotImplementedError(self)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
"""Write a value to XML."""
raise NotImplementedError(self)
class SimpleValue(BaseConverter):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return attrs["value"]
class IntValue(SimpleValue):
def xmlRead(self, attrs, content, font):
return int(attrs["value"], 0)
class Long(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readLong()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(value)
class ULong(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readULong()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeULong(value)
class Flags32(ULong):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", "0x%08X" % value)])
xmlWriter.newline()
class Short(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(value)
class UShort(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readUShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(value)
class Int8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readInt8()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeInt8(value)
class UInt8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readUInt8()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt8(value)
class UInt24(IntValue):
staticSize = 3
def read(self, reader, font, tableDict):
return reader.readUInt24()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt24(value)
class ComputedInt(IntValue):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is not None:
xmlWriter.comment("%s=%s" % (name, value))
xmlWriter.newline()
class ComputedUInt8(ComputedInt, UInt8):
pass
class ComputedUShort(ComputedInt, UShort):
pass
class ComputedULong(ComputedInt, ULong):
pass
class Tag(SimpleValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readTag()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeTag(value)
class GlyphID(SimpleValue):
staticSize = 2
def readArray(self, reader, font, tableDict, count):
glyphOrder = font.getGlyphOrder()
gids = reader.readUShortArray(count)
try:
l = [glyphOrder[gid] for gid in gids]
except IndexError:
# Slower, but will not throw an IndexError on an invalid glyph id.
l = [font.getGlyphName(gid) for gid in gids]
return l
def read(self, reader, font, tableDict):
return font.getGlyphName(reader.readUShort())
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(font.getGlyphID(value))
class NameID(UShort):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
nameTable = font.get("name") if font else None
if nameTable:
name = nameTable.getDebugName(value)
xmlWriter.write(" ")
if name:
xmlWriter.comment(name)
else:
xmlWriter.comment("missing from name table")
log.warning("name id %d missing from name table" % value)
xmlWriter.newline()
class FloatValue(SimpleValue):
def xmlRead(self, attrs, content, font):
return float(attrs["value"])
class DeciPoints(FloatValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readUShort() / 10
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(round(value * 10))
class Fixed(FloatValue):
staticSize = 4
def read(self, reader, font, tableDict):
return fi2fl(reader.readLong(), 16)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(fl2fi(value, 16))
class F2Dot14(FloatValue):
staticSize = 2
def read(self, reader, font, tableDict):
return fi2fl(reader.readShort(), 14)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(fl2fi(value, 14))
class Version(BaseConverter):
staticSize = 4
def read(self, reader, font, tableDict):
value = reader.readLong()
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
return value
def write(self, writer, font, tableDict, value, repeatIndex=None):
value = fi2ve(value)
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
writer.writeLong(value)
def xmlRead(self, attrs, content, font):
value = attrs["value"]
value = ve2fi(value)
return value
def xmlWrite(self, xmlWriter, font, value, name, attrs):
value = fi2ve(value)
value = "0x%08x" % value
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
@staticmethod
def fromFloat(v):
return fl2fi(v, 16)
class Char64(SimpleValue):
"""An ASCII string with up to 64 characters.
Unused character positions are filled with 0x00 bytes.
Used in Apple AAT fonts in the `gcid` table.
"""
staticSize = 64
def read(self, reader, font, tableDict):
data = reader.readData(self.staticSize)
zeroPos = data.find(b"\0")
if zeroPos >= 0:
data = data[:zeroPos]
s = tounicode(data, encoding="ascii", errors="replace")
if s != tounicode(data, encoding="ascii", errors="ignore"):
log.warning('replaced non-ASCII characters in "%s"' %
s)
return s
def write(self, writer, font, tableDict, value, repeatIndex=None):
data = tobytes(value, encoding="ascii", errors="replace")
if data != tobytes(value, encoding="ascii", errors="ignore"):
log.warning('replacing non-ASCII characters in "%s"' %
value)
if len(data) > self.staticSize:
log.warning('truncating overlong "%s" to %d bytes' %
(value, self.staticSize))
data = (data + b"\0" * self.staticSize)[:self.staticSize]
writer.writeData(data)
class Struct(BaseConverter):
def getRecordSize(self, reader):
return self.tableClass and self.tableClass.getRecordSize(reader)
def read(self, reader, font, tableDict):
table = self.tableClass()
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
value.compile(writer, font)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
if attrs:
# If there are attributes (probably index), then
# don't drop this even if it's NULL. It will mess
# up the array indices of the containing element.
xmlWriter.simpletag(name, attrs + [("empty", 1)])
xmlWriter.newline()
else:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, attrs, name=name)
def xmlRead(self, attrs, content, font):
if "empty" in attrs and safeEval(attrs["empty"]):
return None
table = self.tableClass()
Format = attrs.get("Format")
if Format is not None:
table.Format = int(Format)
noPostRead = not hasattr(table, 'postRead')
if noPostRead:
# TODO Cache table.hasPropagated.
cleanPropagation = False
for conv in table.getConverters():
if conv.isPropagated:
cleanPropagation = True
if not hasattr(font, '_propagator'):
font._propagator = {}
propagator = font._propagator
assert conv.name not in propagator, (conv.name, propagator)
setattr(table, conv.name, None)
propagator[conv.name] = CountReference(table.__dict__, conv.name)
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
table.fromXML(name, attrs, content, font)
else:
pass
table.populateDefaults(propagator=getattr(font, '_propagator', None))
if noPostRead:
if cleanPropagation:
for conv in table.getConverters():
if conv.isPropagated:
propagator = font._propagator
del propagator[conv.name]
if not propagator:
del font._propagator
return table
def __repr__(self):
return "Struct of " + repr(self.tableClass)
class StructWithLength(Struct):
def read(self, reader, font, tableDict):
pos = reader.pos
table = self.tableClass()
table.decompile(reader, font)
reader.seek(pos + table.StructLength)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
for convIndex, conv in enumerate(value.getConverters()):
if conv.name == "StructLength":
break
lengthIndex = len(writer.items) + convIndex
if isinstance(value, FormatSwitchingBaseTable):
lengthIndex += 1 # implicit Format field
deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize]
before = writer.getDataLength()
value.StructLength = deadbeef
value.compile(writer, font)
length = writer.getDataLength() - before
lengthWriter = writer.getSubWriter()
conv.write(lengthWriter, font, tableDict, length)
assert(writer.items[lengthIndex] ==
b"\xde\xad\xbe\xef"[:conv.staticSize])
writer.items[lengthIndex] = lengthWriter.getAllData()
class Table(Struct):
longOffset = False
staticSize = 2
def readOffset(self, reader):
return reader.readUShort()
def writeNullOffset(self, writer):
if self.longOffset:
writer.writeULong(0)
else:
writer.writeUShort(0)
def read(self, reader, font, tableDict):
offset = self.readOffset(reader)
if offset == 0:
return None
table = self.tableClass()
reader = reader.getSubReader(offset)
if font.lazy:
table.reader = reader
table.font = font
else:
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
if value is None:
self.writeNullOffset(writer)
else:
subWriter = writer.getSubWriter()
subWriter.longOffset = self.longOffset
subWriter.name = self.name
if repeatIndex is not None:
subWriter.repeatIndex = repeatIndex
writer.writeSubTable(subWriter)
value.compile(subWriter, font)
class LTable(Table):
longOffset = True
staticSize = 4
def readOffset(self, reader):
return reader.readULong()
# TODO Clean / merge the SubTable and SubStruct
class SubStruct(Struct):
def getConverter(self, tableType, lookupType):
tableClass = self.lookupTypes[tableType][lookupType]
return self.__class__(self.name, self.repeat, self.aux, tableClass)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
class SubTable(Table):
def getConverter(self, tableType, lookupType):
tableClass = self.lookupTypes[tableType][lookupType]
return self.__class__(self.name, self.repeat, self.aux, tableClass)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
class ExtSubTable(LTable, SubTable):
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
Table.write(self, writer, font, tableDict, value, repeatIndex)
class FeatureParams(Table):
def getConverter(self, featureTag):
tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ValueFormat(IntValue):
staticSize = 2
def __init__(self, name, repeat, aux, tableClass=None):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
def read(self, reader, font, tableDict):
format = reader.readUShort()
reader[self.which] = ValueRecordFactory(format)
return format
def write(self, writer, font, tableDict, format, repeatIndex=None):
writer.writeUShort(format)
writer[self.which] = ValueRecordFactory(format)
class ValueRecord(ValueFormat):
def getRecordSize(self, reader):
return 2 * len(reader[self.which])
def read(self, reader, font, tableDict):
return reader[self.which].readValueRecord(reader, font)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer[self.which].writeValueRecord(writer, font, value)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, self.name, attrs)
def xmlRead(self, attrs, content, font):
from .otBase import ValueRecord
value = ValueRecord()
value.fromXML(None, attrs, content, font)
return value
class AATLookup(BaseConverter):
BIN_SEARCH_HEADER_SIZE = 10
def __init__(self, name, repeat, aux, tableClass):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
if issubclass(self.tableClass, SimpleValue):
self.converter = self.tableClass(name='Value', repeat=None, aux=None)
else:
self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass)
def read(self, reader, font, tableDict):
format = reader.readUShort()
if format == 0:
return self.readFormat0(reader, font)
elif format == 2:
return self.readFormat2(reader, font)
elif format == 4:
return self.readFormat4(reader, font)
elif format == 6:
return self.readFormat6(reader, font)
elif format == 8:
return self.readFormat8(reader, font)
else:
assert False, "unsupported lookup format: %d" % format
def write(self, writer, font, tableDict, value, repeatIndex=None):
values = list(sorted([(font.getGlyphID(glyph), val)
for glyph, val in value.items()]))
# TODO: Also implement format 4.
formats = list(sorted(filter(None, [
self.buildFormat0(writer, font, values),
self.buildFormat2(writer, font, values),
self.buildFormat6(writer, font, values),
self.buildFormat8(writer, font, values),
])))
# We use the format ID as secondary sort key to make the output
# deterministic when multiple formats have same encoded size.
dataSize, lookupFormat, writeMethod = formats[0]
pos = writer.getDataLength()
writeMethod()
actualSize = writer.getDataLength() - pos
assert actualSize == dataSize, (
"AATLookup format %d claimed to write %d bytes, but wrote %d" %
(lookupFormat, dataSize, actualSize))
@staticmethod
def writeBinSearchHeader(writer, numUnits, unitSize):
writer.writeUShort(unitSize)
writer.writeUShort(numUnits)
searchRange, entrySelector, rangeShift = \
getSearchRange(n=numUnits, itemSize=unitSize)
writer.writeUShort(searchRange)
writer.writeUShort(entrySelector)
writer.writeUShort(rangeShift)
def buildFormat0(self, writer, font, values):
numGlyphs = len(font.getGlyphOrder())
if len(values) != numGlyphs:
return None
valueSize = self.converter.staticSize
return (2 + numGlyphs * valueSize, 0,
lambda: self.writeFormat0(writer, font, values))
def writeFormat0(self, writer, font, values):
writer.writeUShort(0)
for glyphID_, value in values:
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
def buildFormat2(self, writer, font, values):
segStart, segValue = values[0]
segEnd = segStart
segments = []
for glyphID, curValue in values[1:]:
if glyphID != segEnd + 1 or curValue != segValue:
segments.append((segStart, segEnd, segValue))
segStart = segEnd = glyphID
segValue = curValue
else:
segEnd = glyphID
segments.append((segStart, segEnd, segValue))
valueSize = self.converter.staticSize
numUnits, unitSize = len(segments) + 1, valueSize + 4
return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2,
lambda: self.writeFormat2(writer, font, segments))
def writeFormat2(self, writer, font, segments):
writer.writeUShort(2)
valueSize = self.converter.staticSize
numUnits, unitSize = len(segments), valueSize + 4
self.writeBinSearchHeader(writer, numUnits, unitSize)
for firstGlyph, lastGlyph, value in segments:
writer.writeUShort(lastGlyph)
writer.writeUShort(firstGlyph)
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
writer.writeUShort(0xFFFF)
writer.writeUShort(0xFFFF)
writer.writeData(b'\x00' * valueSize)
def buildFormat6(self, writer, font, values):
valueSize = self.converter.staticSize
numUnits, unitSize = len(values), valueSize + 2
return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6,
lambda: self.writeFormat6(writer, font, values))
def writeFormat6(self, writer, font, values):
writer.writeUShort(6)
valueSize = self.converter.staticSize
numUnits, unitSize = len(values), valueSize + 2
self.writeBinSearchHeader(writer, numUnits, unitSize)
for glyphID, value in values:
writer.writeUShort(glyphID)
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
writer.writeUShort(0xFFFF)
writer.writeData(b'\x00' * valueSize)
def buildFormat8(self, writer, font, values):
minGlyphID, maxGlyphID = values[0][0], values[-1][0]
if len(values) != maxGlyphID - minGlyphID + 1:
return None
valueSize = self.converter.staticSize
return (6 + len(values) * valueSize, 8,
lambda: self.writeFormat8(writer, font, values))
def writeFormat8(self, writer, font, values):
firstGlyphID = values[0][0]
writer.writeUShort(8)
writer.writeUShort(firstGlyphID)
writer.writeUShort(len(values))
for _, value in values:
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
def readFormat0(self, reader, font):
numGlyphs = len(font.getGlyphOrder())
data = self.converter.readArray(
reader, font, tableDict=None, count=numGlyphs)
return {font.getGlyphName(k): value
for k, value in enumerate(data)}
def readFormat2(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize, numUnits = reader.readUShort(), reader.readUShort()
assert unitSize >= 4 + self.converter.staticSize, unitSize
for i in range(numUnits):
reader.seek(pos + i * unitSize + 12)
last = reader.readUShort()
first = reader.readUShort()
value = self.converter.read(reader, font, tableDict=None)
if last != 0xFFFF:
for k in range(first, last + 1):
mapping[font.getGlyphName(k)] = value
return mapping
def readFormat4(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize = reader.readUShort()
assert unitSize >= 6, unitSize
for i in range(reader.readUShort()):
reader.seek(pos + i * unitSize + 12)
last = reader.readUShort()
first = reader.readUShort()
offset = reader.readUShort()
if last != 0xFFFF:
dataReader = reader.getSubReader(0) # relative to current position
dataReader.seek(pos + offset) # relative to start of table
data = self.converter.readArray(
dataReader, font, tableDict=None,
count=last - first + 1)
for k, v in enumerate(data):
mapping[font.getGlyphName(first + k)] = v
return mapping
def readFormat6(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize = reader.readUShort()
assert unitSize >= 2 + self.converter.staticSize, unitSize
for i in range(reader.readUShort()):
reader.seek(pos + i * unitSize + 12)
glyphID = reader.readUShort()
value = self.converter.read(
reader, font, tableDict=None)
if glyphID != 0xFFFF:
mapping[font.getGlyphName(glyphID)] = value
return mapping
def readFormat8(self, reader, font):
first = reader.readUShort()
count = reader.readUShort()
data = self.converter.readArray(
reader, font, tableDict=None, count=count)
return {font.getGlyphName(first + k): value
for (k, value) in enumerate(data)}
def xmlRead(self, attrs, content, font):
value = {}
for element in content:
if isinstance(element, tuple):
name, a, eltContent = element
if name == "Lookup":
value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
return value
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for glyph, value in sorted(value.items()):
self.converter.xmlWrite(
xmlWriter, font, value=value,
name="Lookup", attrs=[("glyph", glyph)])
xmlWriter.endtag(name)
xmlWriter.newline()
# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup
# followed by an offset to a glyph data table. Other than usual, the
# offsets in the AATLookup are not relative to the beginning of
# the beginning of the 'ankr' table, but relative to the glyph data table.
# So, to find the anchor data for a glyph, one needs to add the offset
# to the data table to the offset found in the AATLookup, and then use
# the sum of these two offsets to find the actual data.
class AATLookupWithDataOffset(BaseConverter):
def read(self, reader, font, tableDict):
lookupOffset = reader.readULong()
dataOffset = reader.readULong()
lookupReader = reader.getSubReader(lookupOffset)
lookup = AATLookup('DataOffsets', None, None, UShort)
offsets = lookup.read(lookupReader, font, tableDict)
result = {}
for glyph, offset in offsets.items():
dataReader = reader.getSubReader(offset + dataOffset)
item = self.tableClass()
item.decompile(dataReader, font)
result[glyph] = item
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
# We do not work with OTTableWriter sub-writers because
# the offsets in our AATLookup are relative to our data
# table, for which we need to provide an offset value itself.
# It might have been possible to somehow make a kludge for
# performing this indirect offset computation directly inside
# OTTableWriter. But this would have made the internal logic
# of OTTableWriter even more complex than it already is,
# so we decided to roll our own offset computation for the
# contents of the AATLookup and associated data table.
offsetByGlyph, offsetByData, dataLen = {}, {}, 0
compiledData = []
for glyph in sorted(value, key=font.getGlyphID):
subWriter = OTTableWriter()
value[glyph].compile(subWriter, font)
data = subWriter.getAllData()
offset = offsetByData.get(data, None)
if offset == None:
offset = dataLen
dataLen = dataLen + len(data)
offsetByData[data] = offset
compiledData.append(data)
offsetByGlyph[glyph] = offset
# For calculating the offsets to our AATLookup and data table,
# we can use the regular OTTableWriter infrastructure.
lookupWriter = writer.getSubWriter()
lookupWriter.longOffset = True
lookup = AATLookup('DataOffsets', None, None, UShort)
lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
dataWriter = writer.getSubWriter()
dataWriter.longOffset = True
writer.writeSubTable(lookupWriter)
writer.writeSubTable(dataWriter)
for d in compiledData:
dataWriter.writeData(d)
def xmlRead(self, attrs, content, font):
lookup = AATLookup('DataOffsets', None, None, self.tableClass)
return lookup.xmlRead(attrs, content, font)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
lookup = AATLookup('DataOffsets', None, None, self.tableClass)
lookup.xmlWrite(xmlWriter, font, value, name, attrs)
class MorxSubtableConverter(BaseConverter):
_PROCESSING_ORDERS = {
# bits 30 and 28 of morx.CoverageFlags; see morx spec
(False, False): "LayoutOrder",
(True, False): "ReversedLayoutOrder",
(False, True): "LogicalOrder",
(True, True): "ReversedLogicalOrder",
}
_PROCESSING_ORDERS_REVERSED = {
val: key for key, val in _PROCESSING_ORDERS.items()
}
def __init__(self, name, repeat, aux):
BaseConverter.__init__(self, name, repeat, aux)
def _setTextDirectionFromCoverageFlags(self, flags, subtable):
if (flags & 0x20) != 0:
subtable.TextDirection = "Any"
elif (flags & 0x80) != 0:
subtable.TextDirection = "Vertical"
else:
subtable.TextDirection = "Horizontal"
def read(self, reader, font, tableDict):
pos = reader.pos
m = MorxSubtable()
m.StructLength = reader.readULong()
flags = reader.readUInt8()
orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
self._setTextDirectionFromCoverageFlags(flags, m)
m.Reserved = reader.readUShort()
m.Reserved |= (flags & 0xF) << 16
m.MorphType = reader.readUInt8()
m.SubFeatureFlags = reader.readULong()
tableClass = lookupTypes["morx"].get(m.MorphType)
if tableClass is None:
assert False, ("unsupported 'morx' lookup type %s" %
m.MorphType)
# To decode AAT ligatures, we need to know the subtable size.
# The easiest way to pass this along is to create a new reader
# that works on just the subtable as its data.
headerLength = reader.pos - pos
data = reader.data[
reader.pos
: reader.pos + m.StructLength - headerLength]
assert len(data) == m.StructLength - headerLength
subReader = OTTableReader(data=data, tableTag=reader.tableTag)
m.SubStruct = tableClass()
m.SubStruct.decompile(subReader, font)
reader.seek(pos + m.StructLength)
return m
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
xmlWriter.comment("StructLength=%d" % value.StructLength)
xmlWriter.newline()
xmlWriter.simpletag("TextDirection", value=value.TextDirection)
xmlWriter.newline()
xmlWriter.simpletag("ProcessingOrder",
value=value.ProcessingOrder)
xmlWriter.newline()
if value.Reserved != 0:
xmlWriter.simpletag("Reserved",
value="0x%04x" % value.Reserved)
xmlWriter.newline()
xmlWriter.comment("MorphType=%d" % value.MorphType)
xmlWriter.newline()
xmlWriter.simpletag("SubFeatureFlags",
value="0x%08x" % value.SubFeatureFlags)
xmlWriter.newline()
value.SubStruct.toXML(xmlWriter, font)
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
m = MorxSubtable()
covFlags = 0
m.Reserved = 0
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "CoverageFlags":
# Only in XML from old versions of fonttools.
covFlags = safeEval(eltAttrs["value"])
orderKey = ((covFlags & 0x40) != 0,
(covFlags & 0x10) != 0)
m.ProcessingOrder = self._PROCESSING_ORDERS[
orderKey]
self._setTextDirectionFromCoverageFlags(
covFlags, m)
elif eltName == "ProcessingOrder":
m.ProcessingOrder = eltAttrs["value"]
assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder
elif eltName == "TextDirection":
m.TextDirection = eltAttrs["value"]
assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection
elif eltName == "Reserved":
m.Reserved = safeEval(eltAttrs["value"])
elif eltName == "SubFeatureFlags":
m.SubFeatureFlags = safeEval(eltAttrs["value"])
elif eltName.endswith("Morph"):
m.fromXML(eltName, eltAttrs, eltContent, font)
else:
assert False, eltName
m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
return m
def write(self, writer, font, tableDict, value, repeatIndex=None):
covFlags = (value.Reserved & 0x000F0000) >> 16
reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
value.ProcessingOrder]
covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
covFlags |= 0x40 if reverseOrder else 0
covFlags |= 0x20 if value.TextDirection == "Any" else 0
covFlags |= 0x10 if logicalOrder else 0
value.CoverageFlags = covFlags
lengthIndex = len(writer.items)
before = writer.getDataLength()
value.StructLength = 0xdeadbeef
# The high nibble of value.Reserved is actuallly encoded
# into coverageFlags, so we need to clear it here.
origReserved = value.Reserved # including high nibble
value.Reserved = value.Reserved & 0xFFFF # without high nibble
value.compile(writer, font)
value.Reserved = origReserved # restore original value
assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
length = writer.getDataLength() - before
writer.items[lengthIndex] = struct.pack(">L", length)
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader
# TODO: Untangle the implementation of the various lookup-specific formats.
class STXHeader(BaseConverter):
def __init__(self, name, repeat, aux, tableClass):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
assert issubclass(self.tableClass, AATAction)
self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
if issubclass(self.tableClass, ContextualMorphAction):
self.perGlyphLookup = AATLookup("PerGlyphLookup",
None, None, GlyphID)
else:
self.perGlyphLookup = None
def read(self, reader, font, tableDict):
table = AATStateTable()
pos = reader.pos
classTableReader = reader.getSubReader(0)
stateArrayReader = reader.getSubReader(0)
entryTableReader = reader.getSubReader(0)
actionReader = None
ligaturesReader = None
table.GlyphClassCount = reader.readULong()
classTableReader.seek(pos + reader.readULong())
stateArrayReader.seek(pos + reader.readULong())
entryTableReader.seek(pos + reader.readULong())
if self.perGlyphLookup is not None:
perGlyphTableReader = reader.getSubReader(0)
perGlyphTableReader.seek(pos + reader.readULong())
if issubclass(self.tableClass, LigatureMorphAction):
actionReader = reader.getSubReader(0)
actionReader.seek(pos + reader.readULong())
ligComponentReader = reader.getSubReader(0)
ligComponentReader.seek(pos + reader.readULong())
ligaturesReader = reader.getSubReader(0)
ligaturesReader.seek(pos + reader.readULong())
numLigComponents = (ligaturesReader.pos
- ligComponentReader.pos) // 2
assert numLigComponents >= 0
table.LigComponents = \
ligComponentReader.readUShortArray(numLigComponents)
table.Ligatures = self._readLigatures(ligaturesReader, font)
table.GlyphClasses = self.classLookup.read(classTableReader,
font, tableDict)
numStates = int((entryTableReader.pos - stateArrayReader.pos)
/ (table.GlyphClassCount * 2))
for stateIndex in range(numStates):
state = AATState()
table.States.append(state)
for glyphClass in range(table.GlyphClassCount):
entryIndex = stateArrayReader.readUShort()
state.Transitions[glyphClass] = \
self._readTransition(entryTableReader,
entryIndex, font,
actionReader)
if self.perGlyphLookup is not None:
table.PerGlyphLookups = self._readPerGlyphLookups(
table, perGlyphTableReader, font)
return table
def _readTransition(self, reader, entryIndex, font, actionReader):
transition = self.tableClass()
entryReader = reader.getSubReader(
reader.pos + entryIndex * transition.staticSize)
transition.decompile(entryReader, font, actionReader)
return transition
def _readLigatures(self, reader, font):
limit = len(reader.data)
numLigatureGlyphs = (limit - reader.pos) // 2
return [font.getGlyphName(g)
for g in reader.readUShortArray(numLigatureGlyphs)]
def _countPerGlyphLookups(self, table):
# Somewhat annoyingly, the morx table does not encode
# the size of the per-glyph table. So we need to find
# the maximum value that MorphActions use as index
# into this table.
numLookups = 0
for state in table.States:
for t in state.Transitions.values():
if isinstance(t, ContextualMorphAction):
if t.MarkIndex != 0xFFFF:
numLookups = max(
numLookups,
t.MarkIndex + 1)
if t.CurrentIndex != 0xFFFF:
numLookups = max(
numLookups,
t.CurrentIndex + 1)
return numLookups
def _readPerGlyphLookups(self, table, reader, font):
pos = reader.pos
lookups = []
for _ in range(self._countPerGlyphLookups(table)):
lookupReader = reader.getSubReader(0)
lookupReader.seek(pos + reader.readULong())
lookups.append(
self.perGlyphLookup.read(lookupReader, font, {}))
return lookups
def write(self, writer, font, tableDict, value, repeatIndex=None):
glyphClassWriter = OTTableWriter()
self.classLookup.write(glyphClassWriter, font, tableDict,
value.GlyphClasses, repeatIndex=None)
glyphClassData = pad(glyphClassWriter.getAllData(), 4)
glyphClassCount = max(value.GlyphClasses.values()) + 1
glyphClassTableOffset = 16 # size of STXHeader
if self.perGlyphLookup is not None:
glyphClassTableOffset += 4
actionData, actionIndex = None, None
if issubclass(self.tableClass, LigatureMorphAction):
glyphClassTableOffset += 12
actionData, actionIndex = \
self._compileLigActions(value, font)
actionData = pad(actionData, 4)
stateArrayData, entryTableData = self._compileStates(
font, value.States, glyphClassCount, actionIndex)
stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
entryTableOffset = stateArrayOffset + len(stateArrayData)
perGlyphOffset = entryTableOffset + len(entryTableData)
perGlyphData = \
pad(self._compilePerGlyphLookups(value, font), 4)
ligComponentsData = self._compileLigComponents(value, font)
ligaturesData = self._compileLigatures(value, font)
if actionData is None:
actionOffset = None
ligComponentsOffset = None
ligaturesOffset = None
else:
assert len(perGlyphData) == 0
actionOffset = entryTableOffset + len(entryTableData)
ligComponentsOffset = actionOffset + len(actionData)
ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
writer.writeULong(glyphClassCount)
writer.writeULong(glyphClassTableOffset)
writer.writeULong(stateArrayOffset)
writer.writeULong(entryTableOffset)
if self.perGlyphLookup is not None:
writer.writeULong(perGlyphOffset)
if actionOffset is not None:
writer.writeULong(actionOffset)
writer.writeULong(ligComponentsOffset)
writer.writeULong(ligaturesOffset)
writer.writeData(glyphClassData)
writer.writeData(stateArrayData)
writer.writeData(entryTableData)
writer.writeData(perGlyphData)
if actionData is not None:
writer.writeData(actionData)
if ligComponentsData is not None:
writer.writeData(ligComponentsData)
if ligaturesData is not None:
writer.writeData(ligaturesData)
def _compileStates(self, font, states, glyphClassCount, actionIndex):
stateArrayWriter = OTTableWriter()
entries, entryIDs = [], {}
for state in states:
for glyphClass in range(glyphClassCount):
transition = state.Transitions[glyphClass]
entryWriter = OTTableWriter()
transition.compile(entryWriter, font,
actionIndex)
entryData = entryWriter.getAllData()
assert len(entryData) == transition.staticSize, ( \
"%s has staticSize %d, "
"but actually wrote %d bytes" % (
repr(transition),
transition.staticSize,
len(entryData)))
entryIndex = entryIDs.get(entryData)
if entryIndex is None:
entryIndex = len(entries)
entryIDs[entryData] = entryIndex
entries.append(entryData)
stateArrayWriter.writeUShort(entryIndex)
stateArrayData = pad(stateArrayWriter.getAllData(), 4)
entryTableData = pad(bytesjoin(entries), 4)
return stateArrayData, entryTableData
def _compilePerGlyphLookups(self, table, font):
if self.perGlyphLookup is None:
return b""
numLookups = self._countPerGlyphLookups(table)
assert len(table.PerGlyphLookups) == numLookups, (
"len(AATStateTable.PerGlyphLookups) is %d, "
"but the actions inside the table refer to %d" %
(len(table.PerGlyphLookups), numLookups))
writer = OTTableWriter()
for lookup in table.PerGlyphLookups:
lookupWriter = writer.getSubWriter()
lookupWriter.longOffset = True
self.perGlyphLookup.write(lookupWriter, font,
{}, lookup, None)
writer.writeSubTable(lookupWriter)
return writer.getAllData()
def _compileLigActions(self, table, font):
assert issubclass(self.tableClass, LigatureMorphAction)
actions = set()
for state in table.States:
for _glyphClass, trans in state.Transitions.items():
actions.add(trans.compileLigActions())
result, actionIndex = b"", {}
# Sort the compiled actions in decreasing order of
# length, so that the longer sequence come before the
# shorter ones. For each compiled action ABCD, its
# suffixes BCD, CD, and D do not be encoded separately
# (in case they occur); instead, we can just store an
# index that points into the middle of the longer
# sequence. Every compiled AAT ligature sequence is
# terminated with an end-of-sequence flag, which can
# only be set on the last element of the sequence.
# Therefore, it is sufficient to consider just the
# suffixes.
for a in sorted(actions, key=lambda x:(-len(x), x)):
if a not in actionIndex:
for i in range(0, len(a), 4):
suffix = a[i:]
suffixIndex = (len(result) + i) // 4
actionIndex.setdefault(
suffix, suffixIndex)
result += a
assert len(result) % self.tableClass.staticSize == 0
return (result, actionIndex)
def _compileLigComponents(self, table, font):
if not hasattr(table, "LigComponents"):
return None
writer = OTTableWriter()
for component in table.LigComponents:
writer.writeUShort(component)
return writer.getAllData()
def _compileLigatures(self, table, font):
if not hasattr(table, "Ligatures"):
return None
writer = OTTableWriter()
for glyphName in table.Ligatures:
writer.writeUShort(font.getGlyphID(glyphName))
return writer.getAllData()
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount)
xmlWriter.newline()
for g, klass in sorted(value.GlyphClasses.items()):
xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
xmlWriter.newline()
for stateIndex, state in enumerate(value.States):
xmlWriter.begintag("State", index=stateIndex)
xmlWriter.newline()
for glyphClass, trans in sorted(state.Transitions.items()):
trans.toXML(xmlWriter, font=font,
attrs={"onGlyphClass": glyphClass},
name="Transition")
xmlWriter.endtag("State")
xmlWriter.newline()
for i, lookup in enumerate(value.PerGlyphLookups):
xmlWriter.begintag("PerGlyphLookup", index=i)
xmlWriter.newline()
for glyph, val in sorted(lookup.items()):
xmlWriter.simpletag("Lookup", glyph=glyph,
value=val)
xmlWriter.newline()
xmlWriter.endtag("PerGlyphLookup")
xmlWriter.newline()
if hasattr(value, "LigComponents"):
xmlWriter.begintag("LigComponents")
xmlWriter.newline()
for i, val in enumerate(getattr(value, "LigComponents")):
xmlWriter.simpletag("LigComponent", index=i,
value=val)
xmlWriter.newline()
xmlWriter.endtag("LigComponents")
xmlWriter.newline()
self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
xmlWriter.endtag(name)
xmlWriter.newline()
def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
if not hasattr(value, "Ligatures"):
return
xmlWriter.begintag("Ligatures")
xmlWriter.newline()
for i, g in enumerate(getattr(value, "Ligatures")):
xmlWriter.simpletag("Ligature", index=i, glyph=g)
xmlWriter.newline()
xmlWriter.endtag("Ligatures")
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
table = AATStateTable()
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "GlyphClass":
glyph = eltAttrs["glyph"]
value = eltAttrs["value"]
table.GlyphClasses[glyph] = safeEval(value)
elif eltName == "State":
state = self._xmlReadState(eltAttrs, eltContent, font)
table.States.append(state)
elif eltName == "PerGlyphLookup":
lookup = self.perGlyphLookup.xmlRead(
eltAttrs, eltContent, font)
table.PerGlyphLookups.append(lookup)
elif eltName == "LigComponents":
table.LigComponents = \
self._xmlReadLigComponents(
eltAttrs, eltContent, font)
elif eltName == "Ligatures":
table.Ligatures = \
self._xmlReadLigatures(
eltAttrs, eltContent, font)
table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
return table
def _xmlReadState(self, attrs, content, font):
state = AATState()
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "Transition":
glyphClass = safeEval(eltAttrs["onGlyphClass"])
transition = self.tableClass()
transition.fromXML(eltName, eltAttrs,
eltContent, font)
state.Transitions[glyphClass] = transition
return state
def _xmlReadLigComponents(self, attrs, content, font):
ligComponents = []
for eltName, eltAttrs, _eltContent in filter(istuple, content):
if eltName == "LigComponent":
ligComponents.append(
safeEval(eltAttrs["value"]))
return ligComponents
def _xmlReadLigatures(self, attrs, content, font):
ligs = []
for eltName, eltAttrs, _eltContent in filter(istuple, content):
if eltName == "Ligature":
ligs.append(eltAttrs["glyph"])
return ligs
class CIDGlyphMap(BaseConverter):
def read(self, reader, font, tableDict):
numCIDs = reader.readUShort()
result = {}
for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
if glyphID != 0xFFFF:
result[cid] = font.getGlyphName(glyphID)
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
items = {cid: font.getGlyphID(glyph)
for cid, glyph in value.items()}
count = max(items) + 1 if items else 0
writer.writeUShort(count)
for cid in range(count):
writer.writeUShort(items.get(cid, 0xFFFF))
def xmlRead(self, attrs, content, font):
result = {}
for eName, eAttrs, _eContent in filter(istuple, content):
if eName == "CID":
result[safeEval(eAttrs["cid"])] = \
eAttrs["glyph"].strip()
return result
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for cid, glyph in sorted(value.items()):
if glyph is not None and glyph != 0xFFFF:
xmlWriter.simpletag(
"CID", cid=cid, glyph=glyph)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
class GlyphCIDMap(BaseConverter):
def read(self, reader, font, tableDict):
glyphOrder = font.getGlyphOrder()
count = reader.readUShort()
cids = reader.readUShortArray(count)
if count > len(glyphOrder):
log.warning("GlyphCIDMap has %d elements, "
"but the font has only %d glyphs; "
"ignoring the rest" %
(count, len(glyphOrder)))
result = {}
for glyphID in range(min(len(cids), len(glyphOrder))):
cid = cids[glyphID]
if cid != 0xFFFF:
result[glyphOrder[glyphID]] = cid
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
items = {font.getGlyphID(g): cid
for g, cid in value.items()
if cid is not None and cid != 0xFFFF}
count = max(items) + 1 if items else 0
writer.writeUShort(count)
for glyphID in range(count):
writer.writeUShort(items.get(glyphID, 0xFFFF))
def xmlRead(self, attrs, content, font):
result = {}
for eName, eAttrs, _eContent in filter(istuple, content):
if eName == "CID":
result[eAttrs["glyph"]] = \
safeEval(eAttrs["value"])
return result
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for glyph, cid in sorted(value.items()):
if cid is not None and cid != 0xFFFF:
xmlWriter.simpletag(
"CID", glyph=glyph, value=cid)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
class DeltaValue(BaseConverter):
def read(self, reader, font, tableDict):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
minusOffset = 1 << nBits
mask = (1 << nBits) - 1
signMask = 1 << (nBits - 1)
DeltaValue = []
tmp, shift = 0, 0
for i in range(nItems):
if shift == 0:
tmp, shift = reader.readUShort(), 16
shift = shift - nBits
value = (tmp >> shift) & mask
if value & signMask:
value = value - minusOffset
DeltaValue.append(value)
return DeltaValue
def write(self, writer, font, tableDict, value, repeatIndex=None):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
DeltaValue = value
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
assert len(DeltaValue) == nItems
mask = (1 << nBits) - 1
tmp, shift = 0, 16
for value in DeltaValue:
shift = shift - nBits
tmp = tmp | ((value & mask) << shift)
if shift == 0:
writer.writeUShort(tmp)
tmp, shift = 0, 16
if shift != 16:
writer.writeUShort(tmp)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
class VarIdxMapValue(BaseConverter):
def read(self, reader, font, tableDict):
fmt = tableDict['EntryFormat']
nItems = tableDict['MappingCount']
innerBits = 1 + (fmt & 0x000F)
innerMask = (1<<innerBits) - 1
outerMask = 0xFFFFFFFF - innerMask
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
read = {
1: reader.readUInt8,
2: reader.readUShort,
3: reader.readUInt24,
4: reader.readULong,
}[entrySize]
mapping = []
for i in range(nItems):
raw = read()
idx = ((raw & outerMask) << outerShift) | (raw & innerMask)
mapping.append(idx)
return mapping
def write(self, writer, font, tableDict, value, repeatIndex=None):
fmt = tableDict['EntryFormat']
mapping = value
writer['MappingCount'].setValue(len(mapping))
innerBits = 1 + (fmt & 0x000F)
innerMask = (1<<innerBits) - 1
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
write = {
1: writer.writeUInt8,
2: writer.writeUShort,
3: writer.writeUInt24,
4: writer.writeULong,
}[entrySize]
for idx in mapping:
raw = ((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask)
write(raw)
class VarDataValue(BaseConverter):
def read(self, reader, font, tableDict):
values = []
regionCount = tableDict["VarRegionCount"]
shortCount = tableDict["NumShorts"]
for i in range(min(regionCount, shortCount)):
values.append(reader.readShort())
for i in range(min(regionCount, shortCount), regionCount):
values.append(reader.readInt8())
for i in range(regionCount, shortCount):
reader.readInt8()
return values
def write(self, writer, font, tableDict, value, repeatIndex=None):
regionCount = tableDict["VarRegionCount"]
shortCount = tableDict["NumShorts"]
for i in range(min(regionCount, shortCount)):
writer.writeShort(value[i])
for i in range(min(regionCount, shortCount), regionCount):
writer.writeInt8(value[i])
for i in range(regionCount, shortCount):
writer.writeInt8(0)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
converterMapping = {
# type class
"int8": Int8,
"int16": Short,
"uint8": UInt8,
"uint8": UInt8,
"uint16": UShort,
"uint24": UInt24,
"uint32": ULong,
"char64": Char64,
"Flags32": Flags32,
"Version": Version,
"Tag": Tag,
"GlyphID": GlyphID,
"NameID": NameID,
"DeciPoints": DeciPoints,
"Fixed": Fixed,
"F2Dot14": F2Dot14,
"struct": Struct,
"Offset": Table,
"LOffset": LTable,
"ValueRecord": ValueRecord,
"DeltaValue": DeltaValue,
"VarIdxMapValue": VarIdxMapValue,
"VarDataValue": VarDataValue,
# AAT
"CIDGlyphMap": CIDGlyphMap,
"GlyphCIDMap": GlyphCIDMap,
"MortChain": StructWithLength,
"MortSubtable": StructWithLength,
"MorxChain": StructWithLength,
"MorxSubtable": MorxSubtableConverter,
# "Template" types
"AATLookup": lambda C: partial(AATLookup, tableClass=C),
"AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
"STXHeader": lambda C: partial(STXHeader, tableClass=C),
"OffsetTo": lambda C: partial(Table, tableClass=C),
"LOffsetTo": lambda C: partial(LTable, tableClass=C),
}
| mit |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/keyword_plan_ad_group_keyword_service/transports/base.py | 1 | 4439 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import keyword_plan_ad_group_keyword
from google.ads.googleads.v8.services.types import keyword_plan_ad_group_keyword_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class KeywordPlanAdGroupKeywordServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for KeywordPlanAdGroupKeywordService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_keyword_plan_ad_group_keyword: gapic_v1.method.wrap_method(
self.get_keyword_plan_ad_group_keyword,
default_timeout=None,
client_info=client_info,
),
self.mutate_keyword_plan_ad_group_keywords: gapic_v1.method.wrap_method(
self.mutate_keyword_plan_ad_group_keywords,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_keyword_plan_ad_group_keyword(self) -> typing.Callable[
[keyword_plan_ad_group_keyword_service.GetKeywordPlanAdGroupKeywordRequest],
keyword_plan_ad_group_keyword.KeywordPlanAdGroupKeyword]:
raise NotImplementedError
@property
def mutate_keyword_plan_ad_group_keywords(self) -> typing.Callable[
[keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsRequest],
keyword_plan_ad_group_keyword_service.MutateKeywordPlanAdGroupKeywordsResponse]:
raise NotImplementedError
__all__ = (
'KeywordPlanAdGroupKeywordServiceTransport',
)
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/pyu2f-0.1.4/pyu2f/tests/model_test.py | 7 | 1918 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pyu2f.model."""
import json
import sys
from pyu2f import errors
from pyu2f import model
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest # pylint: disable=g-import-not-at-top
else:
import unittest # pylint: disable=g-import-not-at-top
class ModelTest(unittest.TestCase):
def testClientDataRegistration(self):
cd = model.ClientData(model.ClientData.TYP_REGISTRATION, b'ABCD',
'somemachine')
obj = json.loads(cd.GetJson())
self.assertEquals(len(list(obj.keys())), 3)
self.assertEquals(obj['typ'], model.ClientData.TYP_REGISTRATION)
self.assertEquals(obj['challenge'], 'QUJDRA')
self.assertEquals(obj['origin'], 'somemachine')
def testClientDataAuth(self):
cd = model.ClientData(model.ClientData.TYP_AUTHENTICATION, b'ABCD',
'somemachine')
obj = json.loads(cd.GetJson())
self.assertEquals(len(list(obj.keys())), 3)
self.assertEquals(obj['typ'], model.ClientData.TYP_AUTHENTICATION)
self.assertEquals(obj['challenge'], 'QUJDRA')
self.assertEquals(obj['origin'], 'somemachine')
def testClientDataInvalid(self):
self.assertRaises(errors.InvalidModelError, model.ClientData, 'foobar',
b'ABCD', 'somemachine')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
thomasvs/morituri | morituri/result/logger.py | 2 | 5017 | # -*- Mode: Python; test-case-name: morituri.test.test_result_logger -*-
# vi:si:et:sw=4:sts=4:ts=4
# Morituri - for those about to RIP
# Copyright (C) 2009 Thomas Vander Stichele
# This file is part of morituri.
#
# morituri is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# morituri is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with morituri. If not, see <http://www.gnu.org/licenses/>.
import time
from morituri.common import common
from morituri.configure import configure
from morituri.result import result
class MorituriLogger(result.Logger):
def log(self, ripResult, epoch=time.time()):
"""
@type ripResult: L{morituri.result.result.RipResult}
"""
lines = self.logRip(ripResult, epoch=epoch)
return '\n'.join(lines)
def logRip(self, ripResult, epoch):
lines = []
### global
lines.append("Logfile created by: morituri %s" % configure.version)
# FIXME: when we localize this, see #49 to handle unicode properly.
import locale
old = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
date = time.strftime("%b %d %H:%M:%S", time.localtime(epoch))
locale.setlocale(locale.LC_TIME, old)
lines.append("Logfile created on: %s" % date)
lines.append("")
# album
lines.append("Album: %s - %s" % (ripResult.artist, ripResult.title))
lines.append("")
lines.append("CDDB disc id: %s" % ripResult. table.getCDDBDiscId())
lines.append("MusicBrainz disc id: %s" % ripResult. table.getMusicBrainzDiscId())
lines.append("MusicBrainz lookup URL: %s" % ripResult. table.getMusicBrainzSubmitURL())
lines.append("")
# drive
lines.append(
"Drive: vendor %s, model %s" % (
ripResult.vendor, ripResult.model))
lines.append("")
lines.append("Read offset correction: %d" %
ripResult.offset)
lines.append("")
# toc
lines.append("Table of Contents:")
lines.append("")
lines.append(
" Track | Start | Length")
lines.append(
" ------------------------------------------------")
table = ripResult.table
for t in table.tracks:
start = t.getIndex(1).absolute
length = table.getTrackLength(t.number)
lines.append(
" %2d | %6d - %s | %6d - %s" % (
t.number,
start, common.framesToMSF(start),
length, common.framesToMSF(length)))
lines.append("")
lines.append("")
### per-track
for t in ripResult.tracks:
lines.extend(self.trackLog(t))
lines.append('')
return lines
def trackLog(self, trackResult):
lines = []
lines.append('Track %2d' % trackResult.number)
lines.append('')
lines.append(' Filename %s' % trackResult.filename)
lines.append('')
if trackResult.pregap:
lines.append(' Pre-gap: %s' % common.framesToMSF(
trackResult.pregap))
lines.append('')
lines.append(' Peak level %.1f %%' % (trackResult.peak * 100.0))
if trackResult.copyspeed:
lines.append(' Extraction Speed (Copy) %.4f X' % (
trackResult.copyspeed))
if trackResult.testspeed:
lines.append(' Extraction Speed (Test) %.4f X' % (
trackResult.testspeed))
if trackResult.copycrc is not None:
lines.append(' Copy CRC %08X' % trackResult.copycrc)
if trackResult.testcrc is not None:
lines.append(' Test CRC %08X' % trackResult.testcrc)
if trackResult.testcrc == trackResult.copycrc:
lines.append(' Copy OK')
else:
lines.append(" WARNING: CRCs don't match!")
else:
lines.append(" WARNING: no CRC check done")
if trackResult.accurip:
lines.append(' Accurately ripped (confidence %d) [%08X]' % (
trackResult.ARDBConfidence, trackResult.ARCRC))
else:
if trackResult.ARDBCRC:
lines.append(' Cannot be verified as accurate '
'[%08X], AccurateRip returned [%08X]' % (
trackResult.ARCRC, trackResult.ARDBCRC))
else:
lines.append(' Track not present in AccurateRip database')
return lines
| gpl-3.0 |
mattuuh7/incubator-airflow | airflow/contrib/hooks/vertica_hook.py | 60 | 1381 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from vertica_python import connect
from airflow.hooks.dbapi_hook import DbApiHook
class VerticaHook(DbApiHook):
'''
Interact with Vertica.
'''
conn_name_attr = 'vertica_conn_id'
default_conn_name = 'vertica_default'
supports_autocommit = True
def get_conn(self):
"""
Returns verticaql connection object
"""
conn = self.get_connection(self.vertica_conn_id)
conn_config = {
"user": conn.login,
"password": conn.password or '',
"database": conn.schema,
}
conn_config["host"] = conn.host or 'localhost'
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
conn = connect(**conn_config)
return conn
| apache-2.0 |
iniqua/plecost | plecost_lib/api.py | 1 | 3488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Plecost: Wordpress vulnerabilities finder
#
# @url: http://iniqua.com/labs/
# @url: https://github.com/iniqua/plecost
#
# @author:Francisco J. Gomez aka ffranz (http://iniqua.com/)
# @author:Daniel Garcia aka cr0hn (http://www.cr0hn.com/me/)
#
# Copyright (c) 2015, Iniqua Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This file contains API calls for all Plecost functions
"""
import warnings
from sys import version_info
from .libs.reporters import * # noqa
from .libs.data import PlecostOptions
from .libs.versions import find_versions # noqa
warnings.simplefilter("ignore")
# --------------------------------------------------------------------------
#
# Command line options
#
# --------------------------------------------------------------------------
def run(config):
"""
Main function of libs:
- Find WordPress versions
- Find outdated plugins
:param config: PlecostOptions option instance
:type config: `PlecostOptions`
:raises: PlecostTargetNotAvailable, PlecostNotWordPressFound, PlecostWordListNotFound
"""
# --------------------------------------------------------------------------
# Checks Python version
# --------------------------------------------------------------------------
if version_info < (3, 3):
raise RuntimeError("You need Python 3.3.x or higher to run Plecost")
# Check reporter
if config.report_filename is not None:
# Select appropriate report.
reporter_function = get_reporter(config.report_filename)
# Find wordpress and plugins versions
data = find_versions(config)
# Generate reports
if config.report_filename is not None:
report = reporter_function(config.report_filename)
# Get report content
content = report.generate(data)
# Save report content
report.save(content)
__version__ = "1.1.2"
__all__ = ["run", "find_versions", "PlecostOptions",
"PlecostInvalidReportFormat"] | bsd-3-clause |
avalonfr/avalon_core_v1 | dep/libmpq/bindings/python/mpq.py | 17 | 10752 | """wrapper for libmpq"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import ctypes
import ctypes.util
import os
libmpq = ctypes.CDLL(ctypes.util.find_library("mpq"))
class Error(Exception):
pass
errors = {
-1: (IOError, "open"),
-2: (IOError, "close"),
-3: (IOError, "seek"),
-4: (IOError, "read"),
-5: (IOError, "write"),
-6: (MemoryError,),
-7: (Error, "file is not an mpq or is corrupted"),
-8: (AssertionError, "not initialized"),
-9: (AssertionError, "buffer size too small"),
-10: (IndexError, "file not in archive"),
-11: (AssertionError, "decrypt"),
-12: (AssertionError, "unpack"),
}
def check_error(result, func, arguments, errors=errors):
try:
error = errors[result]
except KeyError:
return result
else:
raise error[0](*error[1:])
libmpq.libmpq__version.restype = ctypes.c_char_p
libmpq.libmpq__archive_open.errcheck = check_error
libmpq.libmpq__archive_close.errcheck = check_error
libmpq.libmpq__archive_packed_size.errcheck = check_error
libmpq.libmpq__archive_unpacked_size.errcheck = check_error
libmpq.libmpq__archive_offset.errcheck = check_error
libmpq.libmpq__archive_version.errcheck = check_error
libmpq.libmpq__archive_files.errcheck = check_error
libmpq.libmpq__file_packed_size.errcheck = check_error
libmpq.libmpq__file_unpacked_size.errcheck = check_error
libmpq.libmpq__file_offset.errcheck = check_error
libmpq.libmpq__file_blocks.errcheck = check_error
libmpq.libmpq__file_encrypted.errcheck = check_error
libmpq.libmpq__file_compressed.errcheck = check_error
libmpq.libmpq__file_imploded.errcheck = check_error
libmpq.libmpq__file_number.errcheck = check_error
libmpq.libmpq__file_read.errcheck = check_error
libmpq.libmpq__block_open_offset.errcheck = check_error
libmpq.libmpq__block_close_offset.errcheck = check_error
libmpq.libmpq__block_unpacked_size.errcheck = check_error
libmpq.libmpq__block_read.errcheck = check_error
__version__ = libmpq.libmpq__version()
class Reader(object):
def __init__(self, file, libmpq=libmpq):
self._file = file
self._pos = 0
self._buf = []
self._cur_block = 0
libmpq.libmpq__block_open_offset(self._file._archive._mpq,
self._file.number)
def __iter__(self):
return self
def __repr__(self):
return "iter(%r)" % self._file
def seek(self, offset, whence=os.SEEK_SET, os=os):
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
offset += self._pos
elif whence == os.SEEK_END:
offset += self._file.unpacked_size
else:
raise ValueError, "invalid whence"
if offset >= self._pos:
self.read(offset - self._pos)
else:
self._pos = 0
self._buf = []
self._cur_block = 0
self.read(offset)
def tell(self):
return self._pos
def _read_block(self, ctypes=ctypes, libmpq=libmpq):
block_size = ctypes.c_uint64()
libmpq.libmpq__block_unpacked_size(self._file._archive._mpq,
self._file.number, self._cur_block, ctypes.byref(block_size))
block_data = ctypes.create_string_buffer(block_size.value)
libmpq.libmpq__block_read(self._file._archive._mpq,
self._file.number, self._cur_block,
block_data, ctypes.c_uint64(len(block_data)), None)
self._buf.append(block_data.raw)
self._cur_block += 1
def read(self, size=-1):
while size < 0 or sum(map(len, self._buf)) < size:
if self._cur_block == self._file.blocks:
break
self._read_block()
buf = "".join(self._buf)
if size < 0:
ret = buf
self._buf = []
else:
ret = buf[:size]
self._buf = [buf[size:]]
self._pos += len(ret)
return ret
def readline(self, os=os):
line = []
while True:
char = self.read(1)
if char == "":
break
if char not in '\r\n' and line and line[-1] in '\r\n':
self.seek(-1, os.SEEK_CUR)
break
line.append(char)
return ''.join(line)
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, sizehint=-1):
res = []
while sizehint < 0 or sum(map(len, res)) < sizehint:
line = self.readline()
if not line:
break
res.append(line)
return res
xreadlines = __iter__
def __del__(self, libmpq=libmpq):
libmpq.libmpq__block_close_offset(self._file._archive._mpq,
self._file.number)
class File(object):
def __init__(self, archive, number, ctypes=ctypes, libmpq=libmpq):
self._archive = archive
self.number = number
for name, atype in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("blocks", ctypes.c_uint32),
("encrypted", ctypes.c_uint32),
("compressed", ctypes.c_uint32),
("imploded", ctypes.c_uint32),
]:
data = atype()
func = getattr(libmpq, "libmpq__file_"+name)
func(self._archive._mpq, self.number, ctypes.byref(data))
setattr(self, name, data.value)
def __str__(self, ctypes=ctypes, libmpq=libmpq):
data = ctypes.create_string_buffer(self.unpacked_size)
libmpq.libmpq__file_read(self._archive._mpq, self.number,
data, ctypes.c_uint64(len(data)), None)
return data.raw
def __repr__(self):
return "%r[%i]" % (self._archive, self.number)
def __iter__(self, Reader=Reader):
return Reader(self)
class Archive(object):
def __init__(self, source, ctypes=ctypes, File=File, libmpq=libmpq):
self._source = source
if isinstance(source, File):
assert not source.encrypted
assert not source.compressed
assert not source.imploded
self.filename = source._archive.filename
offset = source._archive.offset + source.offset
else:
self.filename = source
offset = -1
self._mpq = ctypes.c_void_p()
libmpq.libmpq__archive_open(ctypes.byref(self._mpq), self.filename,
ctypes.c_uint64(offset))
self._opened = True
for field_name, field_type in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("version", ctypes.c_uint32),
("files", ctypes.c_uint32),
]:
func = getattr(libmpq, "libmpq__archive_" + field_name)
data = field_type()
func(self._mpq, ctypes.byref(data))
setattr(self, field_name, data.value)
def __del__(self, libmpq=libmpq):
if getattr(self, "_opened", False):
libmpq.libmpq__archive_close(self._mpq)
def __len__(self):
return self.files
def __contains__(self, item, ctypes=ctypes, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_uint32()
try:
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
except IndexError:
return False
return True
return 0 <= item < self.files
def __getitem__(self, item, ctypes=ctypes, File=File, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_int()
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
item = data.value
else:
if not 0 <= item < self.files:
raise IndexError, "file not in archive"
return File(self, item)
def __repr__(self):
return "mpq.Archive(%r)" % self._source
# Remove clutter - everything except Error and Archive.
del os, check_error, ctypes, errors, File, libmpq, Reader
if __name__ == "__main__":
import sys, random
archive = Archive(sys.argv[1])
print repr(archive)
for k, v in archive.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (4 - 1), k, v
assert '(listfile)' in archive
assert 0 in archive
assert len(archive) == archive.files
files = [x.strip() for x in archive['(listfile)']]
files.extend(xrange(archive.files))
for key in files: #sys.argv[2:] if sys.argv[2:] else xrange(archive.files):
file = archive[key]
print
print " " * (4 - 1), repr(file)
for k, v in file.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (8 - 1), k, v
a = str(file)
b = iter(file).read()
reader = iter(file)
c = []
while True:
l = random.randrange(1, 10)
d = reader.read(l)
if not d: break
assert len(d) <= l
c.append(d)
c = "".join(c)
d = []
reader.seek(0)
for line in reader:
d.append(line)
d = "".join(d)
assert a == b == c == d, map(hash, [a,b,c,d])
assert len(a) == file.unpacked_size
repr(iter(file))
reader.seek(0)
a = reader.readlines()
reader.seek(0)
b = list(reader)
assert a == b
| gpl-2.0 |
ovnicraft/odoo | addons/lunch/__init__.py | 440 | 1031 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lunch
import report
import wizard
| agpl-3.0 |
devs4v/devs4v-information-retrieval15 | project/venv/lib/python2.7/site-packages/django/contrib/sessions/backends/db.py | 37 | 2943 | import logging
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
return Session.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session # isort:skip
| mit |
DevTable/gantryd | gantryd/client.py | 2 | 7460 | from runtime.manager import RuntimeManager
from config.GantryConfig import Configuration
from config.object import ConfigParseException
from gantryd.componentwatcher import ComponentWatcher
from gantryd.machinestate import MachineState
from gantryd.componentstate import ComponentState, STOPPED_STATUS, KILLED_STATUS
from gantryd.etcdpaths import getProjectConfigPath
from util import report, fail, ReportLevels
import etcd
import uuid
import atexit
import threading
import time
import socket
import json
import logging
REPORT_TTL = 60 # Report that this machine is running, every 60 seconds
class GantryDClient(object):
""" A client in gantryd. """
def __init__(self, etcdHost, projectName, etcdPort):
self.project_name = projectName
self.runtime_manager = None
self.components = []
self.is_running = False
# Generate a unique ID for this machine/client.
self.machine_id = str(uuid.uuid1())
# Logging.
self.logger = logging.getLogger(__name__)
# Initialize the etcd client that we'll use.
self.etcd_client = etcd.Client(host=etcdHost, port=etcdPort)
# Initialize the thread used for reporting the status of this machine to etcd.
self.reporting_thread = threading.Thread(target=self.reportMachineStatus, args=[])
self.reporting_thread.daemon = True
def getConfigJSON(self):
""" Returns the project's config JSON or raises an exception if none. """
# Lookup the project on etcd. If none, report an error.
config_json = None
try:
self.logger.debug('Looking up configuration for project %s in etcd', self.project_name)
config_json = self.etcd_client.get(getProjectConfigPath(self.project_name)).value
except KeyError as k:
self.logger.exception(k)
fail('Unknown project ' + self.project_name, project=self.project_name)
return config_json
def getConfig(self):
""" Returns the project's config or raises an exception if none. """
config_json = self.getConfigJSON()
# Parse the project's configuration and save it.
try:
self.config = Configuration.parse(config_json)
except ConfigParseException as cpe:
fail('Error parsing gantry config', project=self.project_name, exception=cpe)
except Exception as e:
self.logger.exception(e)
return self.config
def setConfig(self, config):
""" Sets the project's config in etcd. """
config_json = json.dumps(config)
self.logger.debug('Updating configuration for project %s', self.project_name)
self.etcd_client.set(getProjectConfigPath(self.project_name), config_json)
def stopComponents(self, component_names):
""" Tells all the given components on all systems to stop. """
self.initialize(component_names)
report('Marking components as stopped', project=self.project_name)
for component in self.components:
report('Marking component as stopped', project=self.project_name, component=component,
level = ReportLevels.EXTRA)
state = ComponentState(self.project_name, component, self.etcd_client)
state.setStatus(STOPPED_STATUS)
def killComponents(self, component_names):
""" Tells all the given components on all systems to die. """
self.initialize(component_names)
report('Marking components as killed', project=self.project_name)
for component in self.components:
report('Marking component as killed', project=self.project_name, component=component,
level = ReportLevels.EXTRA)
state = ComponentState(self.project_name, component, self.etcd_client)
state.setStatus(KILLED_STATUS)
def markUpdated(self, component_names):
""" Tells all the given components to update themselves. """
self.initialize(component_names)
report('Updating the image IDs on components', project=self.project_name)
for component in self.components:
image_id = component.getImageId()
state = ComponentState(self.project_name, component, self.etcd_client)
report('Component %s->%s' % (component.getName(), image_id[0:12]), project=self.project_name,
component = component)
state.setReadyStatus(image_id)
def listStatus(self):
""" Lists the status of all components in this project. """
self.getConfig()
self.initialize([c.name for c in self.config.components])
print "%-20s %-20s %-20s" % ('COMPONENT', 'STATUS', 'IMAGE ID')
for component in self.components:
state = ComponentState(self.project_name, component, self.etcd_client).getState()
status = ComponentState.getStatusOf(state)
imageid = ComponentState.getImageIdOf(state)
print "%-20s %-20s %-20s" % (component.getName(), status, imageid)
def run(self, component_names):
""" Runs the given components on this machine. """
self.initialize(component_names)
# Register a handler to remove this machine from the list when the daemon is
# shutdown. The controller will also occasionally ping a machine to verify it
# is present.
self.logger.debug('Registering exit listener')
atexit.register(self.handleExit)
# Start the thread to register this machine as being part of the project.
self.startReporter()
# Start watcher thread(s), one for each component, to see when to update them.
report('Gantryd running', project=self.project_name)
for component in self.components:
self.logger.debug('Starting component watcher for component: %s', component.getName())
watcher = ComponentWatcher(component, self.project_name, self.machine_id, self.etcd_client)
watcher.start()
# And sleep until new stuff comes in.
while True:
time.sleep(1)
########################################################################
def initialize(self, component_names):
""" Initializes this client for working with the components given. """
# Load the project configuration.
self.getConfig()
# Initialize the runtime manager.
self.runtime_manager = RuntimeManager(self.config)
# Find all the components for this machine.
for component_name in component_names:
component = self.runtime_manager.getComponent(component_name)
if not component:
fail('Unknown component named ' + component_name, project=self.project_name)
self.components.append(component)
def handleExit(self):
""" Function executed when the Python system exits. This unregisters the machine in etcd. """
self.is_running = False
try:
machine_state = MachineState(self.project_name, self.machine_id, self.etcd_client)
machine_state.removeMachine()
# Shut down the runtime manager if we have one
if self.runtime_manager is not None:
self.runtime_manager.join()
except Exception as e:
self.logger.exception(e)
pass
def startReporter(self):
""" Starts reporting that this machine is running. """
self.is_running = True
self.reporting_thread.start()
def reportMachineStatus(self):
""" Reports that this machine has running components. """
while self.is_running:
# Perform the update.
self.logger.debug('Reporting status for machine %s to etcd', self.machine_id)
machine_state = MachineState(self.project_name, self.machine_id, self.etcd_client)
machine_state.registerMachine([c.getName() for c in self.components], ttl=REPORT_TTL)
# Sleep for the TTL minus a few seconds.
time.sleep(REPORT_TTL - 5)
| apache-2.0 |
akshatharaj/django | tests/generic_views/test_edit.py | 199 | 19217 | from __future__ import unicode_literals
import warnings
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.client import RequestFactory
from django.utils.deprecation import RemovedInDjango110Warning
from django.views.generic.base import View
from django.views.generic.edit import CreateView, FormMixin, ModelFormMixin
from . import views
from .models import Artist, Author
from .test_forms import AuthorForm
class FormMixinTests(SimpleTestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertIsNone(default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
def test_get_form(self):
class TestFormMixin(FormMixin):
request = RequestFactory().get('/')
self.assertIsInstance(
TestFormMixin().get_form(forms.Form), forms.Form,
'get_form() should use provided form class.'
)
class FormClassTestFormMixin(TestFormMixin):
form_class = forms.Form
self.assertIsInstance(
FormClassTestFormMixin().get_form(), forms.Form,
'get_form() should fallback to get_form_class() if none is provided.'
)
def test_get_form_missing_form_class_default_value(self):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
class MissingDefaultValue(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
def get_form(self, form_class):
return form_class(**self.get_form_kwargs())
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, RemovedInDjango110Warning)
self.assertEqual(
str(w[0].message),
'`generic_views.test_edit.MissingDefaultValue.get_form` method '
'must define a default value for its `form_class` argument.'
)
self.assertIsInstance(
MissingDefaultValue().get_form(), forms.Form,
)
def test_get_context_data(self):
class FormContext(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
self.assertIsInstance(FormContext().get_context_data()['form'], forms.Form)
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, '/list/authors/')
class ModelFormMixinTests(SimpleTestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, '/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_create_with_interpolated_redirect(self):
res = self.client.post(
'/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/authors/create/interpolate_redirect_nonascii/',
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
def test_define_both_fields_and_form_class(self):
class MyCreateView(CreateView):
model = Author
form_class = AuthorForm
fields = ['name']
message = "Specifying both 'fields' and 'form_class' is not permitted."
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect_nonascii/' % a.pk,
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
# Also test with escaped chars in URL
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/{}/delete/interpolate_redirect_nonascii/'.format(a.pk))
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/%C3%A9dit/authors/create/?deleted={}'.format(a.pk))
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
| bsd-3-clause |
mrichards42/xword | scripts/xworddebug/wxFB.py | 1 | 1821 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 10 2016)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class DebugDialog
###########################################################################
class DebugDialog ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Lua Debug", pos = wx.DefaultPosition, size = wx.Size( 400,600 ), style = wx.DEFAULT_FRAME_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.FRAME_TOOL_WINDOW|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.panel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer2 = wx.BoxSizer( wx.VERTICAL )
self.text = wx.TextCtrl( self.panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.TE_MULTILINE )
self.text.SetFont( wx.Font( 8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Consolas" ) )
bSizer2.Add( self.text, 1, wx.ALL|wx.EXPAND, 5 )
self.panel.SetSizer( bSizer2 )
self.panel.Layout()
bSizer2.Fit( self.panel )
bSizer1.Add( self.panel, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.Hide )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def Hide( self, event ):
event.Skip()
| gpl-3.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_visual_scenes/visual_scene/asset/modified/modified/modified.py | 2 | 5469 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
import sys, string, os
from xml.dom import minidom, Node
from datetime import datetime, timedelta
from Core.Common.FUtils import FindXmlChild, GetXmlContent, ParseDate
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_visual_scenes', 'visual_scene', 'asset', 'modified']
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def CheckDate(self, context):
# Get the <modified> time for the input file
root = minidom.parse(context.GetInputFilename()).documentElement
inputDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "modified")))
if inputDate == None:
context.Log("FAILED: Couldn't read <modified> value from test input file.")
return None
# Get the output file
outputFilenames = context.GetStepOutputFilenames("Export")
if len(outputFilenames) == 0:
context.Log("FAILED: There are no export steps.")
return None
# Get the <modified> time for the output file
root = minidom.parse(outputFilenames[0]).documentElement
outputDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "modified")))
if outputDate == None:
context.Log("FAILED: Couldn't read <modified> value from the exported file.")
return None
# Modified data must be greater than or equal to original date to pass
if (outputDate - inputDate) < timedelta(0):
context.Log("FAILED: <modified> is not preserved.")
context.Log("The original <modified> time is " + str(inputDate))
context.Log("The exported <modified> time is " + str(outputDate))
return False
context.Log("PASSED: <modified> element is preserved or updated correctly.")
return True
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.status_exemplary = self.CheckDate(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
egaxegax/django-dbcartajs | django/core/paginator.py | 103 | 5245 | from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
return list(self.object_list)[index]
# The following four methods are only necessary for Python <2.6
# compatibility (this class could just extend 2.6's collections.Sequence).
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum([1 for v in self if v == value])
# End of compatibility methods.
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| gpl-2.0 |
cdjones32/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/tests/test_dviread.py | 7 | 1814 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| mit |
vvuk/servo | tests/wpt/css-tests/tools/html5lib/html5lib/html5parser.py | 423 | 117297 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| mpl-2.0 |
skbly7/serc | website/wiki/plugins/attachments/wiki_plugin.py | 14 | 1547 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.conf.urls import url, include
from django.utils.translation import ugettext as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.attachments import views
from wiki.plugins.attachments import models
from wiki.plugins.attachments import settings
from wiki.plugins.attachments.markdown_extensions import AttachmentExtension
from wiki.plugins.notifications.settings import ARTICLE_EDIT
from wiki.plugins.notifications.util import truncate_title
class AttachmentPlugin(BasePlugin):
slug = settings.SLUG
urlpatterns = {
'article': [url('', include('wiki.plugins.attachments.urls'))]
}
article_tab = (_('Attachments'), "fa fa-file")
article_view = views.AttachmentView().dispatch
# List of notifications to construct signal handlers for. This
# is handled inside the notifications plugin.
notifications = [{
'model': models.AttachmentRevision,
'message': lambda obj: (
_("A file was changed: %s")
if not obj.deleted
else
_("A file was deleted: %s")
) % truncate_title(obj.get_filename()),
'key': ARTICLE_EDIT,
'created': True,
'get_article': lambda obj: obj.attachment.article}
]
markdown_extensions = [AttachmentExtension()]
def __init__(self):
# print "I WAS LOADED!"
pass
registry.register(AttachmentPlugin)
| mit |
NooNameR/bravo_kernel_3.0 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Imlunar/DJStew | cogs/downloader.py | 17 | 26747 | from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
from cogs.utils.chat_formatting import pagify, box
from __main__ import send_cmd_help, set_cog
import os
from subprocess import run as sp_run, PIPE
import shutil
from asyncio import as_completed
from setuptools import distutils
import discord
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from time import time
from importlib.util import find_spec
from copy import deepcopy
NUM_THREADS = 4
REPO_NONEX = 0x1
REPO_CLONE = 0x2
REPO_SAME = 0x4
REPOS_LIST = "https://twentysix26.github.io/Red-Docs/red_cog_approved_repos/"
DISCLAIMER = ("You're about to add a 3rd party repository. The creator of Red"
" and its community have no responsibility for any potential "
"damage that the content of 3rd party repositories might cause."
"\nBy typing 'I agree' you declare to have read and understand "
"the above message. This message won't be shown again until the"
" next reboot.")
class UpdateError(Exception):
pass
class CloningError(UpdateError):
pass
class RequirementFail(UpdateError):
pass
class Downloader:
"""Cog downloader/installer."""
def __init__(self, bot):
self.bot = bot
self.disclaimer_accepted = False
self.path = os.path.join("data", "downloader")
self.file_path = os.path.join(self.path, "repos.json")
# {name:{url,cog1:{installed},cog1:{installed}}}
self.repos = dataIO.load_json(self.file_path)
self.executor = ThreadPoolExecutor(NUM_THREADS)
self._do_first_run()
def save_repos(self):
dataIO.save_json(self.file_path, self.repos)
@commands.group(pass_context=True)
@checks.is_owner()
async def cog(self, ctx):
"""Additional cogs management"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@cog.group(pass_context=True)
async def repo(self, ctx):
"""Repo management commands"""
if ctx.invoked_subcommand is None or \
isinstance(ctx.invoked_subcommand, commands.Group):
await send_cmd_help(ctx)
return
@repo.command(name="add", pass_context=True)
async def _repo_add(self, ctx, repo_name: str, repo_url: str):
"""Adds repo to available repo lists
Warning: Adding 3RD Party Repositories is at your own
Risk."""
if not self.disclaimer_accepted:
await self.bot.say(DISCLAIMER)
answer = await self.bot.wait_for_message(timeout=30,
author=ctx.message.author)
if answer is None:
await self.bot.say('Not adding repo.')
return
elif "i agree" not in answer.content.lower():
await self.bot.say('Not adding repo.')
return
else:
self.disclaimer_accepted = True
self.repos[repo_name] = {}
self.repos[repo_name]['url'] = repo_url
try:
self.update_repo(repo_name)
except CloningError:
await self.bot.say("That repository link doesn't seem to be "
"valid.")
del self.repos[repo_name]
return
self.populate_list(repo_name)
self.save_repos()
data = self.get_info_data(repo_name)
if data:
msg = data.get("INSTALL_MSG")
if msg:
await self.bot.say(msg[:2000])
await self.bot.say("Repo '{}' added.".format(repo_name))
@repo.command(name="remove")
async def _repo_del(self, repo_name: str):
"""Removes repo from repo list. COGS ARE NOT REMOVED."""
def remove_readonly(func, path, excinfo):
os.chmod(path, 0o755)
func(path)
if repo_name not in self.repos:
await self.bot.say("That repo doesn't exist.")
return
del self.repos[repo_name]
try:
shutil.rmtree(os.path.join(self.path, repo_name), onerror=remove_readonly)
except FileNotFoundError:
pass
self.save_repos()
await self.bot.say("Repo '{}' removed.".format(repo_name))
@cog.command(name="list")
async def _send_list(self, repo_name=None):
"""Lists installable cogs
Repositories list:
https://twentysix26.github.io/Red-Docs/red_cog_approved_repos/"""
retlist = []
if repo_name and repo_name in self.repos:
msg = "Available cogs:\n"
for cog in sorted(self.repos[repo_name].keys()):
if 'url' == cog:
continue
data = self.get_info_data(repo_name, cog)
if data and data.get("HIDDEN") is True:
continue
if data:
retlist.append([cog, data.get("SHORT", "")])
else:
retlist.append([cog, ''])
else:
if self.repos:
msg = "Available repos:\n"
for repo_name in sorted(self.repos.keys()):
data = self.get_info_data(repo_name)
if data:
retlist.append([repo_name, data.get("SHORT", "")])
else:
retlist.append([repo_name, ""])
else:
await self.bot.say("You haven't added a repository yet.\n"
"Start now! {}".format(REPOS_LIST))
return
col_width = max(len(row[0]) for row in retlist) + 2
for row in retlist:
msg += "\t" + "".join(word.ljust(col_width) for word in row) + "\n"
msg += "\nRepositories list: {}".format(REPOS_LIST)
for page in pagify(msg, delims=['\n'], shorten_by=8):
await self.bot.say(box(page))
@cog.command()
async def info(self, repo_name: str, cog: str=None):
"""Shows info about the specified cog"""
if cog is not None:
cogs = self.list_cogs(repo_name)
if cog in cogs:
data = self.get_info_data(repo_name, cog)
if data:
msg = "{} by {}\n\n".format(cog, data["AUTHOR"])
msg += data["NAME"] + "\n\n" + data["DESCRIPTION"]
await self.bot.say(box(msg))
else:
await self.bot.say("The specified cog has no info file.")
else:
await self.bot.say("That cog doesn't exist."
" Use cog list to see the full list.")
else:
data = self.get_info_data(repo_name)
if data is None:
await self.bot.say("That repo does not exist or the"
" information file is missing for that repo"
".")
return
name = data.get("NAME", None)
name = repo_name if name is None else name
author = data.get("AUTHOR", "Unknown")
desc = data.get("DESCRIPTION", "")
msg = ("```{} by {}```\n\n{}".format(name, author, desc))
await self.bot.say(msg)
@cog.command(hidden=True)
async def search(self, *terms: str):
"""Search installable cogs"""
pass # TO DO
@cog.command(pass_context=True)
async def update(self, ctx):
"""Updates cogs"""
tasknum = 0
num_repos = len(self.repos)
min_dt = 0.5
burst_inc = 0.1/(NUM_THREADS)
touch_n = tasknum
touch_t = time()
def regulate(touch_t, touch_n):
dt = time() - touch_t
if dt + burst_inc*(touch_n) > min_dt:
touch_n = 0
touch_t = time()
return True, touch_t, touch_n
return False, touch_t, touch_n + 1
tasks = []
for r in self.repos:
task = partial(self.update_repo, r)
task = self.bot.loop.run_in_executor(self.executor, task)
tasks.append(task)
base_msg = "Downloading updated cogs, please wait... "
status = ' %d/%d repos updated' % (tasknum, num_repos)
msg = await self.bot.say(base_msg + status)
updated_cogs = []
new_cogs = []
deleted_cogs = []
failed_cogs = []
error_repos = {}
installed_updated_cogs = []
for f in as_completed(tasks):
tasknum += 1
try:
name, updates, oldhash = await f
if updates:
if type(updates) is dict:
for k, l in updates.items():
tl = [(name, c, oldhash) for c in l]
if k == 'A':
new_cogs.extend(tl)
elif k == 'D':
deleted_cogs.extend(tl)
elif k == 'M':
updated_cogs.extend(tl)
except UpdateError as e:
name, what = e.args
error_repos[name] = what
edit, touch_t, touch_n = regulate(touch_t, touch_n)
if edit:
status = ' %d/%d repos updated' % (tasknum, num_repos)
msg = await self._robust_edit(msg, base_msg + status)
status = 'done. '
for t in updated_cogs:
repo, cog, _ = t
if self.repos[repo][cog]['INSTALLED']:
try:
await self.install(repo, cog,
no_install_on_reqs_fail=False)
except RequirementFail:
failed_cogs.append(t)
else:
installed_updated_cogs.append(t)
for t in updated_cogs.copy():
if t in failed_cogs:
updated_cogs.remove(t)
if not any(self.repos[repo][cog]['INSTALLED'] for
repo, cog, _ in updated_cogs):
status += ' No updates to apply. '
if new_cogs:
status += '\nNew cogs: ' \
+ ', '.join('%s/%s' % c[:2] for c in new_cogs) + '.'
if deleted_cogs:
status += '\nDeleted cogs: ' \
+ ', '.join('%s/%s' % c[:2] for c in deleted_cogs) + '.'
if updated_cogs:
status += '\nUpdated cogs: ' \
+ ', '.join('%s/%s' % c[:2] for c in updated_cogs) + '.'
if failed_cogs:
status += '\nCogs that got new requirements which have ' + \
'failed to install: ' + \
', '.join('%s/%s' % c[:2] for c in failed_cogs) + '.'
if error_repos:
status += '\nThe following repos failed to update: '
for n, what in error_repos.items():
status += '\n%s: %s' % (n, what)
msg = await self._robust_edit(msg, base_msg + status)
if not installed_updated_cogs:
return
patchnote_lang = 'Prolog'
shorten_by = 8 + len(patchnote_lang)
for note in self.patch_notes_handler(installed_updated_cogs):
if note is None:
continue
for page in pagify(note, delims=['\n'], shorten_by=shorten_by):
await self.bot.say(box(page, patchnote_lang))
await self.bot.say("Cogs updated. Reload updated cogs? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15,
author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can reload cogs with"
" `{}reload <cog_name>`".format(ctx.prefix))
elif answer.content.lower().strip() == "yes":
registry = dataIO.load_json(os.path.join("data", "red", "cogs.json"))
update_list = []
fail_list = []
for repo, cog, _ in installed_updated_cogs:
if not registry.get('cogs.' + cog, False):
continue
try:
self.bot.unload_extension("cogs." + cog)
self.bot.load_extension("cogs." + cog)
update_list.append(cog)
except:
fail_list.append(cog)
msg = 'Done.'
if update_list:
msg += " The following cogs were reloaded: "\
+ ', '.join(update_list) + "\n"
if fail_list:
msg += " The following cogs failed to reload: "\
+ ', '.join(fail_list)
await self.bot.say(msg)
else:
await self.bot.say("Ok then, you can reload cogs with"
" `{}reload <cog_name>`".format(ctx.prefix))
def patch_notes_handler(self, repo_cog_hash_pairs):
for repo, cog, oldhash in repo_cog_hash_pairs:
repo_path = os.path.join('data', 'downloader', repo)
cogfile = os.path.join(cog, cog + ".py")
cmd = ["git", "-C", repo_path, "log", "--relative-date",
"--reverse", oldhash + '..', cogfile
]
try:
log = sp_run(cmd, stdout=PIPE).stdout.decode().strip()
yield self.format_patch(repo, cog, log)
except:
pass
@cog.command(pass_context=True)
async def uninstall(self, ctx, repo_name, cog):
"""Uninstalls a cog"""
if repo_name not in self.repos:
await self.bot.say("That repo doesn't exist.")
return
if cog not in self.repos[repo_name]:
await self.bot.say("That cog isn't available from that repo.")
return
set_cog("cogs." + cog, False)
self.repos[repo_name][cog]['INSTALLED'] = False
self.save_repos()
os.remove(os.path.join("cogs", cog + ".py"))
owner = self.bot.get_cog('Owner')
await owner.unload.callback(owner, cog_name=cog)
await self.bot.say("Cog successfully uninstalled.")
@cog.command(name="install", pass_context=True)
async def _install(self, ctx, repo_name: str, cog: str):
"""Installs specified cog"""
if repo_name not in self.repos:
await self.bot.say("That repo doesn't exist.")
return
if cog not in self.repos[repo_name]:
await self.bot.say("That cog isn't available from that repo.")
return
data = self.get_info_data(repo_name, cog)
try:
install_cog = await self.install(repo_name, cog, notify_reqs=True)
except RequirementFail:
await self.bot.say("That cog has requirements that I could not "
"install. Check the console for more "
"informations.")
return
if data is not None:
install_msg = data.get("INSTALL_MSG", None)
if install_msg:
await self.bot.say(install_msg[:2000])
if install_cog:
await self.bot.say("Installation completed. Load it now? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15,
author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can load it with"
" `{}load {}`".format(ctx.prefix, cog))
elif answer.content.lower().strip() == "yes":
set_cog("cogs." + cog, True)
owner = self.bot.get_cog('Owner')
await owner.load.callback(owner, cog_name=cog)
else:
await self.bot.say("Ok then, you can load it with"
" `{}load {}`".format(ctx.prefix, cog))
elif install_cog is False:
await self.bot.say("Invalid cog. Installation aborted.")
else:
await self.bot.say("That cog doesn't exist. Use cog list to see"
" the full list.")
async def install(self, repo_name, cog, *, notify_reqs=False,
no_install_on_reqs_fail=True):
# 'no_install_on_reqs_fail' will make the cog get installed anyway
# on requirements installation fail. This is necessary because due to
# how 'cog update' works right now, the user would have no way to
# reupdate the cog if the update fails, since 'cog update' only
# updates the cogs that get a new commit.
# This is not a great way to deal with the problem and a cog update
# rework would probably be the best course of action.
reqs_failed = False
if cog.endswith('.py'):
cog = cog[:-3]
path = self.repos[repo_name][cog]['file']
cog_folder_path = self.repos[repo_name][cog]['folder']
cog_data_path = os.path.join(cog_folder_path, 'data')
data = self.get_info_data(repo_name, cog)
if data is not None:
requirements = data.get("REQUIREMENTS", [])
requirements = [r for r in requirements
if not self.is_lib_installed(r)]
if requirements and notify_reqs:
await self.bot.say("Installing cog's requirements...")
for requirement in requirements:
if not self.is_lib_installed(requirement):
success = await self.bot.pip_install(requirement)
if not success:
if no_install_on_reqs_fail:
raise RequirementFail()
else:
reqs_failed = True
to_path = os.path.join("cogs", cog + ".py")
print("Copying {}...".format(cog))
shutil.copy(path, to_path)
if os.path.exists(cog_data_path):
print("Copying {}'s data folder...".format(cog))
distutils.dir_util.copy_tree(cog_data_path,
os.path.join('data', cog))
self.repos[repo_name][cog]['INSTALLED'] = True
self.save_repos()
if not reqs_failed:
return True
else:
raise RequirementFail()
def get_info_data(self, repo_name, cog=None):
if cog is not None:
cogs = self.list_cogs(repo_name)
if cog in cogs:
info_file = os.path.join(cogs[cog].get('folder'), "info.json")
if os.path.isfile(info_file):
try:
data = dataIO.load_json(info_file)
except:
return None
return data
else:
repo_info = os.path.join(self.path, repo_name, 'info.json')
if os.path.isfile(repo_info):
try:
data = dataIO.load_json(repo_info)
return data
except:
return None
return None
def list_cogs(self, repo_name):
valid_cogs = {}
repo_path = os.path.join(self.path, repo_name)
folders = [f for f in os.listdir(repo_path)
if os.path.isdir(os.path.join(repo_path, f))]
legacy_path = os.path.join(repo_path, "cogs")
legacy_folders = []
if os.path.exists(legacy_path):
for f in os.listdir(legacy_path):
if os.path.isdir(os.path.join(legacy_path, f)):
legacy_folders.append(os.path.join("cogs", f))
folders = folders + legacy_folders
for f in folders:
cog_folder_path = os.path.join(self.path, repo_name, f)
cog_folder = os.path.basename(cog_folder_path)
for cog in os.listdir(cog_folder_path):
cog_path = os.path.join(cog_folder_path, cog)
if os.path.isfile(cog_path) and cog_folder == cog[:-3]:
valid_cogs[cog[:-3]] = {'folder': cog_folder_path,
'file': cog_path}
return valid_cogs
def get_dir_name(self, url):
splitted = url.split("/")
git_name = splitted[-1]
return git_name[:-4]
def is_lib_installed(self, name):
return bool(find_spec(name))
def _do_first_run(self):
save = False
repos_copy = deepcopy(self.repos)
# Issue 725
for repo in repos_copy:
for cog in repos_copy[repo]:
cog_data = repos_copy[repo][cog]
if isinstance(cog_data, str): # ... url field
continue
for k, v in cog_data.items():
if k in ("file", "folder"):
repos_copy[repo][cog][k] = os.path.normpath(cog_data[k])
if self.repos != repos_copy:
self.repos = repos_copy
save = True
invalid = []
for repo in self.repos:
broken = 'url' in self.repos[repo] and len(self.repos[repo]) == 1
if broken:
save = True
try:
self.update_repo(repo)
self.populate_list(repo)
except CloningError:
invalid.append(repo)
continue
except Exception as e:
print(e) # TODO: Proper logging
continue
for repo in invalid:
del self.repos[repo]
if save:
self.save_repos()
def populate_list(self, name):
valid_cogs = self.list_cogs(name)
new = set(valid_cogs.keys())
old = set(self.repos[name].keys())
for cog in new - old:
self.repos[name][cog] = valid_cogs.get(cog, {})
self.repos[name][cog]['INSTALLED'] = False
for cog in new & old:
self.repos[name][cog].update(valid_cogs[cog])
for cog in old - new:
if cog != 'url':
del self.repos[name][cog]
def update_repo(self, name):
def run(*args, **kwargs):
env = os.environ.copy()
env['GIT_TERMINAL_PROMPT'] = '0'
kwargs['env'] = env
return sp_run(*args, **kwargs)
try:
dd = self.path
if name not in self.repos:
raise UpdateError("Repo does not exist in data, wtf")
folder = os.path.join(dd, name)
# Make sure we don't git reset the Red folder on accident
if not os.path.exists(os.path.join(folder, '.git')):
#if os.path.exists(folder):
#shutil.rmtree(folder)
url = self.repos[name].get('url')
if not url:
raise UpdateError("Need to clone but no URL set")
branch = None
if "@" in url: # Specific branch
url, branch = url.rsplit("@", maxsplit=1)
if branch is None:
p = run(["git", "clone", url, folder])
else:
p = run(["git", "clone", "-b", branch, url, folder])
if p.returncode != 0:
raise CloningError()
self.populate_list(name)
return name, REPO_CLONE, None
else:
rpbcmd = ["git", "-C", folder, "rev-parse", "--abbrev-ref", "HEAD"]
p = run(rpbcmd, stdout=PIPE)
branch = p.stdout.decode().strip()
rpcmd = ["git", "-C", folder, "rev-parse", branch]
p = run(["git", "-C", folder, "reset", "--hard",
"origin/%s" % branch, "-q"])
if p.returncode != 0:
raise UpdateError("Error resetting to origin/%s" % branch)
p = run(rpcmd, stdout=PIPE)
if p.returncode != 0:
raise UpdateError("Unable to determine old commit hash")
oldhash = p.stdout.decode().strip()
p = run(["git", "-C", folder, "pull", "-q", "--ff-only"])
if p.returncode != 0:
raise UpdateError("Error pulling updates")
p = run(rpcmd, stdout=PIPE)
if p.returncode != 0:
raise UpdateError("Unable to determine new commit hash")
newhash = p.stdout.decode().strip()
if oldhash == newhash:
return name, REPO_SAME, None
else:
self.populate_list(name)
self.save_repos()
ret = {}
cmd = ['git', '-C', folder, 'diff', '--no-commit-id',
'--name-status', oldhash, newhash]
p = run(cmd, stdout=PIPE)
if p.returncode != 0:
raise UpdateError("Error in git diff")
changed = p.stdout.strip().decode().split('\n')
for f in changed:
if not f.endswith('.py'):
continue
status, _, cogpath = f.partition('\t')
cogname = os.path.split(cogpath)[-1][:-3] # strip .py
if status not in ret:
ret[status] = []
ret[status].append(cogname)
return name, ret, oldhash
except CloningError as e:
raise CloningError(name, *e.args) from None
except UpdateError as e:
raise UpdateError(name, *e.args) from None
async def _robust_edit(self, msg, text):
try:
msg = await self.bot.edit_message(msg, text)
except discord.errors.NotFound:
msg = await self.bot.send_message(msg.channel, text)
except:
raise
return msg
@staticmethod
def format_patch(repo, cog, log):
header = "Patch Notes for %s/%s" % (repo, cog)
line = "=" * len(header)
if log:
return '\n'.join((header, line, log))
def check_folders():
if not os.path.exists(os.path.join("data", "downloader")):
print('Making repo downloads folder...')
os.mkdir(os.path.join("data", "downloader"))
def check_files():
f = os.path.join("data", "downloader", "repos.json")
if not dataIO.is_valid_json(f):
print("Creating default data/downloader/repos.json")
dataIO.save_json(f, {})
def setup(bot):
check_folders()
check_files()
n = Downloader(bot)
bot.add_cog(n)
| gpl-3.0 |
hydai/sligen | src/wordgen/wordgen.py | 6 | 1075 | import random, itertools
class Wordgen(object):
def __init__(self):
self.verb = []
self.final_noun = []
self.other = []
f = open('data/newbuzzword.txt', 'r', encoding='utf-8')
for line in f:
dat = line.split('\t')
word = dat[0]
part = dat[1][:-1]
if part == 'v':
self.verb.append(word)
elif part == 'fn':
self.final_noun.append(word)
else:
self.other.append(word)
def moistPhrase(self):
verb_prob = 6
final_noun_prob = 2
verb_len = len(self.verb)
final_noun_len = len(self.final_noun)
res = []
if random.randint(0, verb_prob) == 0:
res += [ self.verb[random.randint(0, verb_len - 1)] ]
res += [self.other[i] for i in random.sample(range(len(self.other)), 2)]
if random.randint(0, final_noun_prob) == 0:
res += [ self.final_noun[random.randint(0, final_noun_len - 1)] ]
return ''.join(res) | mit |
krummler/zxing-ios | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/Perforce.py | 34 | 3814 | """SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/Perforce.py 5023 2010/06/14 22:05:46 scons"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# This function should maybe be moved to SCons.Util?
from SCons.Tool.PharLapCommon import addPathIfNotExists
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SystemRoot' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The Perforce() factory is deprecated and there is no replacement.""")
return SCons.Builder.Builder(action = PerforceAction, env = env)
#setattr(env, 'Perforce', PerforceFactory)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
addPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
gangadharkadam/v5_erp | erpnext/projects/report/daily_time_log_summary/daily_time_log_summary.py | 3 | 2654 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
if not filters:
filters = {}
elif filters.get("from_date") or filters.get("to_date"):
filters["from_time"] = "00:00:00"
filters["to_time"] = "24:00:00"
columns = [_("Time Log") + ":Link/Time Log:120", _("Employee") + "::150", _("From Datetime") + "::140",
_("To Datetime") + "::140", _("Hours") + "::70", _("Activity Type") + "::120", _("Task") + ":Link/Task:150",
_("Task Subject") + "::180", _("Project") + ":Link/Project:120", _("Status") + "::70"]
user_map = get_user_map()
task_map = get_task_map()
conditions = build_conditions(filters)
time_logs = frappe.db.sql("""select * from `tabTime Log`
where docstatus < 2 %s order by owner asc""" % (conditions, ), filters, as_dict=1)
if time_logs:
users = [time_logs[0].owner]
data = []
total_hours = total_employee_hours = count = 0
for tl in time_logs:
if tl.owner not in users:
users.append(tl.owner)
data.append(["", "", "", "Total", total_employee_hours, "", "", "", "", ""])
total_employee_hours = 0
data.append([tl.name, user_map[tl.owner], tl.from_time, tl.to_time, tl.hours,
tl.activity_type, tl.task, task_map.get(tl.task), tl.project, tl.status])
count += 1
total_hours += flt(tl.hours)
total_employee_hours += flt(tl.hours)
if count == len(time_logs):
data.append(["", "", "", "Total Hours", total_employee_hours, "", "", "", "", ""])
if total_hours:
data.append(["", "", "", "Grand Total", total_hours, "", "", "", "", ""])
return columns, data
def get_user_map():
users = frappe.db.sql("""select name,
concat(first_name, if(last_name, (' ' + last_name), '')) as fullname
from tabUser""", as_dict=1)
user_map = {}
for p in users:
user_map.setdefault(p.name, []).append(p.fullname)
return user_map
def get_task_map():
tasks = frappe.db.sql("""select name, subject from tabTask""", as_dict=1)
task_map = {}
for t in tasks:
task_map.setdefault(t.name, []).append(t.subject)
return task_map
def build_conditions(filters):
conditions = ""
if filters.get("from_date"):
conditions += " and from_time >= timestamp(%(from_date)s, %(from_time)s)"
if filters.get("to_date"):
conditions += " and to_time <= timestamp(%(to_date)s, %(to_time)s)"
from frappe.desk.reportview import build_match_conditions
match_conditions = build_match_conditions("Time Log")
if match_conditions:
conditions += " and %s" % match_conditions
return conditions
| agpl-3.0 |
nharraud/b2share | invenio/legacy/websubmit/admincli.py | 13 | 21347 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebSubmitAdmin CLI tool.
from __future__ import print_function
Usage websubmitadmin [options]
Options:
-v, --verbose Verbose level (0=min, 2=default, 3=max).
-h, --help Prints this help
-d, --dump=DOCTYPE Dump given DOCTYPE from database
-c, --clean={y|n} Create dump that includes lines to remove
submission from database before
insertion (`y', default) or not (`n'). Default 'y'
-n, --no-fail-insert Create dump that does not fail when inserting
duplicate rows
-f, --diff=DOCTYPE Diff given DOCTYPE from database with standard input
-i, --ignore={d|o|p} Ignore some differences (d=date, o=order, p=page). Use with --diff
-m, --method=METHOD Type of dumps: NAMES (default) or RELATIONS:
- NAMES: includes functions and elements (including
definitions) with a name starting with doctype,
even if not used by the submission. Might then miss
functions and elements (mostly ``generic'' ones) and
add some unwanted elements.
- RELATIONS: include all functions and elements used
by the submission. Might leave aside
elements that are defined, but not
used.
Dump submission:
Eg: websubmitadmin --dump=DEMOART > DEMOART_db_dump.sql
Dump submission including all used functions and elements definitions:
Eg: websubmitadmin --dump=DEMOART -m relations > DEMOART_db_dump.sql
Diff submission with given dump:
Eg: websubmitadmin --diff=DEMOART < DEMOART_db_dump.sql
Diff between latest version in 'master' branch of your Git repo, with
version in database:
Eg: git show master:websubmit/DEMOART_db_dump.sql | ../websubmitadmin --diff=DEMOART | less -S
Diff between CVS version and submission in database, ignoring dates
and ordering of submission fields on the page:
Eg: cvs update -p DEMOART_db_dump.sql | ./websubmitadmin -i d,o --diff=DEMOART | less -S
"""
__revision__ = "$Id$"
import os
import sys
import getopt
import difflib
import re
import time
import tempfile
from MySQLdb.converters import conversions
from MySQLdb import escape, escape_string
from invenio.config import CFG_PREFIX, CFG_TMPDIR
from invenio.legacy.dbquery import run_sql
from invenio.utils.shell import run_shell_command
CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD = "NAMES"
CFG_WEBSUBMIT_DUMPER_DB_SCHEMA_VERSION = 1
def dump_submission(doctype, method=None, include_cleaning=True,
ignore_duplicate_insert=False):
"""Returns a .sql dump of submission with given doctype"""
def build_table_dump(table_name, rows_with_desc, ignore_duplicate_insert):
"Build a dump-like output from the given table and rows"
table_dump = ''
for row in rows_with_desc[0]:
table_dump += 'INSERT%s INTO %s VALUES (%s);\n' % \
(ignore_duplicate_insert and ' IGNORE' or '',
table_name,
','.join([escape(column, conversions) for column in row]))
return table_dump
if not method:
method = CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD
dump_header = "-- %s dump %s v%i\n" % (doctype,
time.strftime("%Y-%m-%d %H:%M:%S"),
CFG_WEBSUBMIT_DUMPER_DB_SCHEMA_VERSION)
if method == "NAMES":
dump_header += "-- Extra:NAMES (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables which are not specific to this submission, but that include keyword %s)\n" % doctype
elif method == "RELATIONS":
dump_header += "-- Extra:RELATIONS (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables that are not specific to doctype %s\n" % doctype
else:
dump_header += "-- Extra:None (the following dump only has rows specific to submission %s i.e. does not contains rows from sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables\n" % doctype
if include_cleaning:
if method == 'NAMES':
dump_header += """
DELETE FROM sbmFUNDESC WHERE function LIKE '%(doctype)s%%';
DELETE FROM sbmFIELD WHERE subname LIKE '%%%(doctype)s';
DELETE FROM sbmFIELDDESC WHERE name LIKE '%(doctype)s%%';
DELETE FROM sbmALLFUNCDESCR WHERE function LIKE '%(doctype)s%%';
""" % {'doctype': escape_string(doctype)}
elif method == "RELATIONS":
dump_header += """
DELETE sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype='%(doctype)s';
DELETE sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype='%(doctype)s';
DELETE sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname='%(doctype)s';
DELETE sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname='%(doctype)s';
""" % {'doctype': escape_string(doctype)}
dump_header += """DELETE FROM sbmDOCTYPE WHERE sdocname='%(doctype)s';
DELETE FROM sbmCATEGORIES WHERE doctype ='%(doctype)s';
DELETE FROM sbmFUNCTIONS WHERE doctype='%(doctype)s';
DELETE FROM sbmIMPLEMENT WHERE docname='%(doctype)s';
DELETE FROM sbmPARAMETERS WHERE doctype='%(doctype)s';
""" % {'doctype': escape_string(doctype)}
dump_output = ''
res = run_sql('SELECT * FROM sbmDOCTYPE WHERE sdocname=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmDOCTYPE', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmCATEGORIES WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmCATEGORIES', res, ignore_duplicate_insert)
# res = run_sql("SELECT * FROM sbmFIELD WHERE subname like '%%%s'" % (escape_string(doctype),), with_desc=1)
# dump_output += build_table_dump('sbmFIELD', res)
# res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name like '%s%%'" % (escape_string(doctype),), with_desc=1)
# dump_output += build_table_dump('sbmFIELDDESC', res)
res = run_sql('SELECT * FROM sbmFUNCTIONS WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmFUNCTIONS', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmIMPLEMENT WHERE docname=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmIMPLEMENT', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmPARAMETERS WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmPARAMETERS', res, ignore_duplicate_insert)
if method == "NAMES":
res = run_sql("SELECT * FROM sbmALLFUNCDESCR WHERE function LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmALLFUNCDESCR', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFUNDESC WHERE function LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFUNDESC', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFIELD WHERE subname LIKE '%%%s'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFIELD', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFIELDDESC', res, ignore_duplicate_insert)
elif method == "RELATIONS":
res = run_sql("SELECT DISTINCT sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmALLFUNCDESCR', res, ignore_duplicate_insert)
res = run_sql("SELECT DISTINCT sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmFUNDESC', res, ignore_duplicate_insert)
res = run_sql("SELECT DISTINCT sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmFIELD', res, ignore_duplicate_insert)
# check:
res = run_sql("SELECT DISTINCT sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
(doctype,), with_desc=1)
#res = run_sql("SELECT DISTINCT sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELDDESC.name=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
# (doctype,), with_desc=1)
dump_output += build_table_dump('sbmFIELDDESC', res, ignore_duplicate_insert)
# Sort
dump_output_lines = dump_output.splitlines()
dump_output_lines.sort()
return dump_header + '\n'.join(dump_output_lines)
def remove_submission(doctype, method=CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD):
"Remove submission from database"
# NOT TESTED
if method == "NAMES":
run_sql("DELETE FROM sbmFUNDESC WHERE function LIKE '%s%%'" % (doctype,))
run_sql("DELETE FROM sbmFIELD WHERE subname LIKE '%%%s'" % (doctype,))
run_sql("DELETE FROM sbmFIELDDESC WHERE name LIKE '%s%%'" % (doctype,))
run_sql("DELETE FROM sbmALLFUNCDESCR WHERE function LIKE '%s%%'" % (doctype,))
elif method == "RELATIONS":
run_sql("DELETE sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", (doctype,))
run_sql("DELETE sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", (doctype,))
run_sql("DELETE sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", (doctype,))
run_sql("DELETE sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", (doctype,))
run_sql("DELETE FROM sbmDOCTYPE WHERE sdocname=%s", (doctype,))
run_sql("DELETE FROM sbmCATEGORIES WHERE doctype=%s", (doctype,))
run_sql("DELETE FROM sbmFUNCTIONS WHERE doctype=%s", (doctype,))
run_sql("DELETE FROM sbmIMPLEMENT WHERE docname=%s", (doctype,))
run_sql("DELETE FROM sbmPARAMETERS WHERE doctype=%s", (doctype,))
re_method_pattern = re.compile("-- Extra:(?P<method>\S*)\s")
def load_submission(doctype, dump, method=None):
"Insert submission into database. Return tuple(error code, msg)"
# NOT TESTED
messages = []
def guess_dump_method(dump):
"""Guess which method was used to dump this file (i.e. if it contains all the submission rows or not)"""
match_obj = re_method_pattern.search(dump)
if match_obj:
return match_obj.group('method')
else:
return None
def guess_dump_has_delete_statements(dump):
"""Guess if given submission dump already contain delete statements"""
return "DELETE FROM sbmDOCTYPE WHERE sdocname".lower() in dump.lower()
if not method:
method = guess_dump_method(dump)
if method is None:
method = CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD
messages.append("WARNING: method could not be guessed. Using method %s" % method)
else:
messages.append("Used method %s to load data" % method)
(dump_code, dump_path) = tempfile.mkstemp(prefix=doctype, dir=CFG_TMPDIR)
dump_fd = open(dump_path, 'w')
dump_fd.write(dump)
dump_fd.close()
# We need to remove the submission. But let's create a backup first.
submission_backup = dump_submission(doctype, method)
submission_backup_path = "%s_db_dump%s.sql" % (doctype, time.strftime("%Y%m%d_%H%M%S"))
fd = file(os.path.join(CFG_TMPDIR, submission_backup_path), "w")
fd.write(submission_backup)
fd.close()
if not guess_dump_has_delete_statements(dump):
remove_submission(doctype, method)
# Load the dump
(exit_code, out_msg, err_msg) = run_shell_command("%s/bin/dbexec < %s", (CFG_PREFIX, os.path.abspath(dump_path)))
if exit_code:
messages.append("ERROR: failed to load submission:" + err_msg)
return (1, messages)
messages.append("Submission loaded. Previous submission saved to %s" % os.path.join(CFG_TMPDIR, submission_backup_path))
return (0, messages)
def diff_submission(submission1_dump, submission2_dump, verbose=2,
ignore_dates=False, ignore_positions=False, ignore_pages=False):
"Output diff between submissions"
def clean_line(line, ignore_dates, ignore_positions, ignore_pages):
"Clean one line of the submission"
updated_line = line
if ignore_dates:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[-3] = ''
args[-4] = ''
updated_line = ','.join(args)
elif line.startswith('INSERT INTO sbmFIELDDESC VALUES'):
args = updated_line.split(",")
args[-4] = ''
args[-5] = ''
updated_line = ','.join(args)
elif line.startswith('INSERT INTO sbmIMPLEMENT VALUES '):
args = updated_line.split(",")
args[-6] = ''
args[-7] = ''
updated_line = ','.join(args)
if ignore_positions:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[2] = ''
updated_line = ','.join(args)
if ignore_pages:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[1] = ''
updated_line = ','.join(args)
if line.startswith('INSERT INTO sbmIMPLEMENT VALUES '):
args = updated_line.split(",")
args[4] = ''
updated_line = ','.join(args)
return updated_line
file1 = [line.strip() for line in submission1_dump.splitlines() if line]
file2 = [line.strip() for line in submission2_dump.splitlines() if line]
file1 = [clean_line(line, ignore_dates, ignore_positions, ignore_pages) for line in file1]
file2 = [clean_line(line, ignore_dates, ignore_positions, ignore_pages) for line in file2]
file1.sort()
file2.sort()
d = difflib.Differ()
result = d.compare(file2, file1)
result = [line for line in result if not line.startswith(' ')]
if verbose > 1:
result = [line.rstrip().replace('? ', ' ', 1) for line in result]
else:
result = [line for line in result if not line.startswith('? ')]
return '\n'.join(result)
def usage(exitcode=1, msg=""):
"Print usage"
print(__doc__)
print(msg)
sys.exit(exitcode)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:l:f:r:m:c:n",
["help",
"verbose=",
"ignore=",
"dump=",
"load=",
"diff=",
"remove=",
"method=",
"clean=",
"no-fail-insert",
"yes-i-know"])
except getopt.GetoptError as err:
print(err)
usage(1)
_ignore_date = False
_ignore_position = False
_ignore_page = False
_doctype = None
_verbose = 2
_action = None
_method = None
_clean = True
_no_fail_insert = False
_yes_i_know = False
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage()
elif opt[0] in ["-v", "--verbose"]:
_verbose = opt[1]
elif opt[0] in ["-m", "--method"]:
_method = opt[1].upper()
if not _method in ["NAMES", "RELATIONS"]:
usage("Parameter --method must be 'NAMES' or 'RELATIONS'")
elif opt[0] in ["-c", "--clean"]:
_clean = opt[1].lower()
if not _clean in ["y", "n"]:
usage("Parameter --clean must be 'y' or 'n'")
_clean = _clean == 'y' and True or False
elif opt[0] in ["-n", "--no-fail-insert"]:
_no_fail_insert = True
elif opt[0] in ["--yes-i-know"]:
_yes_i_know = True
elif opt[0] in ["-i", "--ignore"]:
ignore = opt[1].split(',')
if 'd' in ignore:
_ignore_date = True
if 'p' in ignore:
_ignore_page = True
if 'o' in ignore:
_ignore_position = True
elif opt[0] in ["-d", "--dump"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'dump'
_doctype = opt[1]
elif opt[0] in ["-l", "--load"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'load'
_doctype = opt[1]
elif opt[0] in ["-f", "--diff"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'diff'
_doctype = opt[1]
elif opt[0] in ["-r", "--remove"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'remove'
_doctype = opt[1]
except StandardError as _exception:
print(_exception)
usage(1)
if not _action:
usage(1, 'You must specify an action among --dump, --load, --diff and --remove')
if not _doctype:
usage(1, 'You must specify a doctype')
if _action == 'dump':
print(dump_submission(doctype=_doctype,
method=_method,
include_cleaning=_clean,
ignore_duplicate_insert=_no_fail_insert))
elif _action == 'load':
if _yes_i_know:
input_stream = sys.stdin.read()
(code, messages) = load_submission(doctype=_doctype, dump=input_stream, method=_method)
print('\n'.join(messages))
sys.exit(code)
else:
print("Loading submission dumps using this tool is experimental. Please use 'dbexec' instead, or run with '--yes-i-know' if you really want to proceed.")
sys.exit(1)
elif _action == 'diff':
if not sys.stdin.isatty():
input_stream = sys.stdin.read()
dump1 = dump_submission(doctype=_doctype,
method=_method,
include_cleaning=_clean,
ignore_duplicate_insert=_no_fail_insert)
print(diff_submission(dump1, input_stream, _verbose, _ignore_date, _ignore_position, _ignore_page))
elif _action == 'remove':
if not _method:
usage(1, 'You must specify option --method')
if _yes_i_know:
remove_submission(doctype=_doctype, method=_method)
else:
print("Removing submissions using this tool is experimental. Run with '--yes-i-know' if you really want to proceed.")
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 |
lucasrangit/twitter-winner | twitter-winner/httplib2/test/miniserver.py | 303 | 3327 | import logging
import os
import select
import SimpleHTTPServer
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0].split('#', 1)[0]
return os.path.join(HERE, *filter(None, path.split('/')))
def log_message(self, s, *args):
# output via logging so nose can catch it
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
"""Mixin that allows serve_forever to be shut down.
The methods in this mixin are backported from SocketServer.py in the Python
2.6.4 standard library. The mixin is unnecessary in 2.6 and later, when
BaseServer supports the shutdown method directly.
"""
def __init__(self, *args, **kwargs):
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def serve_forever(self, poll_interval=0.1):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler):
httpd = ShutdownServer(("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| mit |
kennedyshead/home-assistant | homeassistant/components/syslog/notify.py | 14 | 2401 | """Syslog notification service."""
import syslog
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
CONF_FACILITY = "facility"
CONF_OPTION = "option"
CONF_PRIORITY = "priority"
SYSLOG_FACILITY = {
"kernel": "LOG_KERN",
"user": "LOG_USER",
"mail": "LOG_MAIL",
"daemon": "LOG_DAEMON",
"auth": "LOG_KERN",
"LPR": "LOG_LPR",
"news": "LOG_NEWS",
"uucp": "LOG_UUCP",
"cron": "LOG_CRON",
"syslog": "LOG_SYSLOG",
"local0": "LOG_LOCAL0",
"local1": "LOG_LOCAL1",
"local2": "LOG_LOCAL2",
"local3": "LOG_LOCAL3",
"local4": "LOG_LOCAL4",
"local5": "LOG_LOCAL5",
"local6": "LOG_LOCAL6",
"local7": "LOG_LOCAL7",
}
SYSLOG_OPTION = {
"pid": "LOG_PID",
"cons": "LOG_CONS",
"ndelay": "LOG_NDELAY",
"nowait": "LOG_NOWAIT",
"perror": "LOG_PERROR",
}
SYSLOG_PRIORITY = {
5: "LOG_EMERG",
4: "LOG_ALERT",
3: "LOG_CRIT",
2: "LOG_ERR",
1: "LOG_WARNING",
0: "LOG_NOTICE",
-1: "LOG_INFO",
-2: "LOG_DEBUG",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FACILITY, default="syslog"): vol.In(SYSLOG_FACILITY.keys()),
vol.Optional(CONF_OPTION, default="pid"): vol.In(SYSLOG_OPTION.keys()),
vol.Optional(CONF_PRIORITY, default=-1): vol.In(SYSLOG_PRIORITY.keys()),
}
)
def get_service(hass, config, discovery_info=None):
"""Get the syslog notification service."""
facility = getattr(syslog, SYSLOG_FACILITY[config.get(CONF_FACILITY)])
option = getattr(syslog, SYSLOG_OPTION[config.get(CONF_OPTION)])
priority = getattr(syslog, SYSLOG_PRIORITY[config.get(CONF_PRIORITY)])
return SyslogNotificationService(facility, option, priority)
class SyslogNotificationService(BaseNotificationService):
"""Implement the syslog notification service."""
def __init__(self, facility, option, priority):
"""Initialize the service."""
self._facility = facility
self._option = option
self._priority = priority
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
syslog.openlog(title, self._option, self._facility)
syslog.syslog(self._priority, message)
syslog.closelog()
| apache-2.0 |
matrix-org/synapse | synapse/types.py | 1 | 24839 | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import re
import string
from collections import namedtuple
from typing import (
TYPE_CHECKING,
Any,
Dict,
Mapping,
MutableMapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import attr
from signedjson.key import decode_verify_key_bytes
from unpaddedbase64 import decode_base64
from zope.interface import Interface
from twisted.internet.interfaces import (
IReactorCore,
IReactorPluggableNameResolver,
IReactorTCP,
IReactorTime,
)
from synapse.api.errors import Codes, SynapseError
from synapse.util.stringutils import parse_and_validate_server_name
if TYPE_CHECKING:
from synapse.appservice.api import ApplicationService
from synapse.storage.databases.main import DataStore
# Define a state map type from type/state_key to T (usually an event ID or
# event)
T = TypeVar("T")
StateKey = Tuple[str, str]
StateMap = Mapping[StateKey, T]
MutableStateMap = MutableMapping[StateKey, T]
# the type of a JSON-serialisable dict. This could be made stronger, but it will
# do for now.
JsonDict = Dict[str, Any]
# Note that this seems to require inheriting *directly* from Interface in order
# for mypy-zope to realize it is an interface.
class ISynapseReactor(
IReactorTCP, IReactorPluggableNameResolver, IReactorTime, IReactorCore, Interface
):
"""The interfaces necessary for Synapse to function."""
@attr.s(frozen=True, slots=True)
class Requester:
"""
Represents the user making a request
Attributes:
user: id of the user making the request
access_token_id: *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest: True if the user making this request is a guest user
shadow_banned: True if the user making this request has been shadow-banned.
device_id: device_id which was set at authentication time
app_service: the AS requesting on behalf of the user
authenticated_entity: The entity that authenticated when making the request.
This is different to the user_id when an admin user or the server is
"puppeting" the user.
"""
user = attr.ib(type="UserID")
access_token_id = attr.ib(type=Optional[int])
is_guest = attr.ib(type=bool)
shadow_banned = attr.ib(type=bool)
device_id = attr.ib(type=Optional[str])
app_service = attr.ib(type=Optional["ApplicationService"])
authenticated_entity = attr.ib(type=str)
def serialize(self):
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
Returns:
dict
"""
return {
"user_id": self.user.to_string(),
"access_token_id": self.access_token_id,
"is_guest": self.is_guest,
"shadow_banned": self.shadow_banned,
"device_id": self.device_id,
"app_server_id": self.app_service.id if self.app_service else None,
"authenticated_entity": self.authenticated_entity,
}
@staticmethod
def deserialize(store, input):
"""Converts a dict that was produced by `serialize` back into a
Requester.
Args:
store (DataStore): Used to convert AS ID to AS object
input (dict): A dict produced by `serialize`
Returns:
Requester
"""
appservice = None
if input["app_server_id"]:
appservice = store.get_app_service_by_id(input["app_server_id"])
return Requester(
user=UserID.from_string(input["user_id"]),
access_token_id=input["access_token_id"],
is_guest=input["is_guest"],
shadow_banned=input["shadow_banned"],
device_id=input["device_id"],
app_service=appservice,
authenticated_entity=input["authenticated_entity"],
)
def create_requester(
user_id: Union[str, "UserID"],
access_token_id: Optional[int] = None,
is_guest: bool = False,
shadow_banned: bool = False,
device_id: Optional[str] = None,
app_service: Optional["ApplicationService"] = None,
authenticated_entity: Optional[str] = None,
) -> Requester:
"""
Create a new ``Requester`` object
Args:
user_id: id of the user making the request
access_token_id: *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest: True if the user making this request is a guest user
shadow_banned: True if the user making this request is shadow-banned.
device_id: device_id which was set at authentication time
app_service: the AS requesting on behalf of the user
authenticated_entity: The entity that authenticated when making the request.
This is different to the user_id when an admin user or the server is
"puppeting" the user.
Returns:
Requester
"""
if not isinstance(user_id, UserID):
user_id = UserID.from_string(user_id)
if authenticated_entity is None:
authenticated_entity = user_id.to_string()
return Requester(
user_id,
access_token_id,
is_guest,
shadow_banned,
device_id,
app_service,
authenticated_entity,
)
def get_domain_from_id(string):
idx = string.find(":")
if idx == -1:
raise SynapseError(400, "Invalid ID: %r" % (string,))
return string[idx + 1 :]
def get_localpart_from_id(string):
idx = string.find(":")
if idx == -1:
raise SynapseError(400, "Invalid ID: %r" % (string,))
return string[1:idx]
DS = TypeVar("DS", bound="DomainSpecificString")
@attr.s(slots=True, frozen=True, repr=False)
class DomainSpecificString(metaclass=abc.ABCMeta):
"""Common base class among ID/name strings that have a local part and a
domain name, prefixed with a sigil.
Has the fields:
'localpart' : The local part of the name (without the leading sigil)
'domain' : The domain part of the name
"""
SIGIL = abc.abstractproperty() # type: str # type: ignore
localpart = attr.ib(type=str)
domain = attr.ib(type=str)
# Because this class is a namedtuple of strings and booleans, it is deeply
# immutable.
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@classmethod
def from_string(cls: Type[DS], s: str) -> DS:
"""Parse the string given by 's' into a structure object."""
if len(s) < 1 or s[0:1] != cls.SIGIL:
raise SynapseError(
400,
"Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL),
Codes.INVALID_PARAM,
)
parts = s[1:].split(":", 1)
if len(parts) != 2:
raise SynapseError(
400,
"Expected %s of the form '%slocalname:domain'"
% (cls.__name__, cls.SIGIL),
Codes.INVALID_PARAM,
)
domain = parts[1]
# This code will need changing if we want to support multiple domain
# names on one HS
return cls(localpart=parts[0], domain=domain)
def to_string(self) -> str:
"""Return a string encoding the fields of the structure object."""
return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain)
@classmethod
def is_valid(cls: Type[DS], s: str) -> bool:
"""Parses the input string and attempts to ensure it is valid."""
try:
obj = cls.from_string(s)
# Apply additional validation to the domain. This is only done
# during is_valid (and not part of from_string) since it is
# possible for invalid data to exist in room-state, etc.
parse_and_validate_server_name(obj.domain)
return True
except Exception:
return False
__repr__ = to_string
@attr.s(slots=True, frozen=True, repr=False)
class UserID(DomainSpecificString):
"""Structure representing a user ID."""
SIGIL = "@"
@attr.s(slots=True, frozen=True, repr=False)
class RoomAlias(DomainSpecificString):
"""Structure representing a room name."""
SIGIL = "#"
@attr.s(slots=True, frozen=True, repr=False)
class RoomID(DomainSpecificString):
"""Structure representing a room id."""
SIGIL = "!"
@attr.s(slots=True, frozen=True, repr=False)
class EventID(DomainSpecificString):
"""Structure representing an event id."""
SIGIL = "$"
@attr.s(slots=True, frozen=True, repr=False)
class GroupID(DomainSpecificString):
"""Structure representing a group ID."""
SIGIL = "+"
@classmethod
def from_string(cls: Type[DS], s: str) -> DS:
group_id = super().from_string(s) # type: DS # type: ignore
if not group_id.localpart:
raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
if contains_invalid_mxid_characters(group_id.localpart):
raise SynapseError(
400,
"Group ID can only contain characters a-z, 0-9, or '=_-./'",
Codes.INVALID_PARAM,
)
return group_id
mxid_localpart_allowed_characters = set(
"_-./=" + string.ascii_lowercase + string.digits
)
def contains_invalid_mxid_characters(localpart: str) -> bool:
"""Check for characters not allowed in an mxid or groupid localpart
Args:
localpart: the localpart to be checked
Returns:
True if there are any naughty characters
"""
return any(c not in mxid_localpart_allowed_characters for c in localpart)
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
# the following is a pattern which matches '=', and bytes which are not allowed in a mxid
# localpart.
#
# It works by:
# * building a string containing the allowed characters (excluding '=')
# * escaping every special character with a backslash (to stop '-' being interpreted as a
# range operator)
# * wrapping it in a '[^...]' regex
# * converting the whole lot to a 'bytes' sequence, so that we can use it to match
# bytes rather than strings
#
NON_MXID_CHARACTER_PATTERN = re.compile(
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode(
"ascii"
)
)
def map_username_to_mxid_localpart(
username: Union[str, bytes], case_sensitive: bool = False
) -> str:
"""Map a username onto a string suitable for a MXID
This follows the algorithm laid out at
https://matrix.org/docs/spec/appendices.html#mapping-from-other-character-sets.
Args:
username: username to be mapped
case_sensitive: true if TEST and test should be mapped
onto different mxids
Returns:
unicode: string suitable for a mxid localpart
"""
if not isinstance(username, bytes):
username = username.encode("utf-8")
# first we sort out upper-case characters
if case_sensitive:
def f1(m):
return b"_" + m.group().lower()
username = UPPER_CASE_PATTERN.sub(f1, username)
else:
username = username.lower()
# then we sort out non-ascii characters
def f2(m):
g = m.group()[0]
if isinstance(g, str):
# on python 2, we need to do a ord(). On python 3, the
# byte itself will do.
g = ord(g)
return b"=%02x" % (g,)
username = NON_MXID_CHARACTER_PATTERN.sub(f2, username)
# we also do the =-escaping to mxids starting with an underscore.
username = re.sub(b"^_", b"=5f", username)
# we should now only have ascii bytes left, so can decode back to a
# unicode.
return username.decode("ascii")
@attr.s(frozen=True, slots=True, order=False)
class RoomStreamToken:
"""Tokens are positions between events. The token "s1" comes after event 1.
s0 s1
| |
[0] V [1] V [2]
Tokens can either be a point in the live event stream or a cursor going
through historic events.
When traversing the live event stream events are ordered by when they
arrived at the homeserver.
When traversing historic events the events are ordered by their depth in
the event graph "topological_ordering" and then by when they arrived at the
homeserver "stream_ordering".
Live tokens start with an "s" followed by the "stream_ordering" id of the
event it comes after. Historic tokens start with a "t" followed by the
"topological_ordering" id of the event it comes after, followed by "-",
followed by the "stream_ordering" id of the event it comes after.
There is also a third mode for live tokens where the token starts with "m",
which is sometimes used when using sharded event persisters. In this case
the events stream is considered to be a set of streams (one for each writer)
and the token encodes the vector clock of positions of each writer in their
respective streams.
The format of the token in such case is an initial integer min position,
followed by the mapping of instance ID to position separated by '.' and '~':
m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}. ...
The `min_pos` corresponds to the minimum position all writers have persisted
up to, and then only writers that are ahead of that position need to be
encoded. An example token is:
m56~2.58~3.59
Which corresponds to a set of three (or more writers) where instances 2 and
3 (these are instance IDs that can be looked up in the DB to fetch the more
commonly used instance names) are at positions 58 and 59 respectively, and
all other instances are at position 56.
Note: The `RoomStreamToken` cannot have both a topological part and an
instance map.
"""
topological = attr.ib(
type=Optional[int],
validator=attr.validators.optional(attr.validators.instance_of(int)),
)
stream = attr.ib(type=int, validator=attr.validators.instance_of(int))
instance_map = attr.ib(
type=Dict[str, int],
factory=dict,
validator=attr.validators.deep_mapping(
key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int),
mapping_validator=attr.validators.instance_of(dict),
),
)
def __attrs_post_init__(self):
"""Validates that both `topological` and `instance_map` aren't set."""
if self.instance_map and self.topological:
raise ValueError(
"Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'."
)
@classmethod
async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken":
try:
if string[0] == "s":
return cls(topological=None, stream=int(string[1:]))
if string[0] == "t":
parts = string[1:].split("-", 1)
return cls(topological=int(parts[0]), stream=int(parts[1]))
if string[0] == "m":
parts = string[1:].split("~")
stream = int(parts[0])
instance_map = {}
for part in parts[1:]:
key, value = part.split(".")
instance_id = int(key)
pos = int(value)
instance_name = await store.get_name_from_instance_id(instance_id)
instance_map[instance_name] = pos
return cls(
topological=None,
stream=stream,
instance_map=instance_map,
)
except Exception:
pass
raise SynapseError(400, "Invalid token %r" % (string,))
@classmethod
def parse_stream_token(cls, string: str) -> "RoomStreamToken":
try:
if string[0] == "s":
return cls(topological=None, stream=int(string[1:]))
except Exception:
pass
raise SynapseError(400, "Invalid token %r" % (string,))
def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken":
"""Return a new token such that if an event is after both this token and
the other token, then its after the returned token too.
"""
if self.topological or other.topological:
raise Exception("Can't advance topological tokens")
max_stream = max(self.stream, other.stream)
instance_map = {
instance: max(
self.instance_map.get(instance, self.stream),
other.instance_map.get(instance, other.stream),
)
for instance in set(self.instance_map).union(other.instance_map)
}
return RoomStreamToken(None, max_stream, instance_map)
def as_historical_tuple(self) -> Tuple[int, int]:
"""Returns a tuple of `(topological, stream)` for historical tokens.
Raises if not an historical token (i.e. doesn't have a topological part).
"""
if self.topological is None:
raise Exception(
"Cannot call `RoomStreamToken.as_historical_tuple` on live token"
)
return (self.topological, self.stream)
def get_stream_pos_for_instance(self, instance_name: str) -> int:
"""Get the stream position that the given writer was at at this token.
This only makes sense for "live" tokens that may have a vector clock
component, and so asserts that this is a "live" token.
"""
assert self.topological is None
# If we don't have an entry for the instance we can assume that it was
# at `self.stream`.
return self.instance_map.get(instance_name, self.stream)
def get_max_stream_pos(self) -> int:
"""Get the maximum stream position referenced in this token.
The corresponding "min" position is, by definition just `self.stream`.
This is used to handle tokens that have non-empty `instance_map`, and so
reference stream positions after the `self.stream` position.
"""
return max(self.instance_map.values(), default=self.stream)
async def to_string(self, store: "DataStore") -> str:
if self.topological is not None:
return "t%d-%d" % (self.topological, self.stream)
elif self.instance_map:
entries = []
for name, pos in self.instance_map.items():
instance_id = await store.get_id_for_instance(name)
entries.append("{}.{}".format(instance_id, pos))
encoded_map = "~".join(entries)
return "m{}~{}".format(self.stream, encoded_map)
else:
return "s%d" % (self.stream,)
@attr.s(slots=True, frozen=True)
class StreamToken:
room_key = attr.ib(
type=RoomStreamToken, validator=attr.validators.instance_of(RoomStreamToken)
)
presence_key = attr.ib(type=int)
typing_key = attr.ib(type=int)
receipt_key = attr.ib(type=int)
account_data_key = attr.ib(type=int)
push_rules_key = attr.ib(type=int)
to_device_key = attr.ib(type=int)
device_list_key = attr.ib(type=int)
groups_key = attr.ib(type=int)
_SEPARATOR = "_"
START = None # type: StreamToken
@classmethod
async def from_string(cls, store: "DataStore", string: str) -> "StreamToken":
try:
keys = string.split(cls._SEPARATOR)
while len(keys) < len(attr.fields(cls)):
# i.e. old token from before receipt_key
keys.append("0")
return cls(
await RoomStreamToken.parse(store, keys[0]), *(int(k) for k in keys[1:])
)
except Exception:
raise SynapseError(400, "Invalid Token")
async def to_string(self, store: "DataStore") -> str:
return self._SEPARATOR.join(
[
await self.room_key.to_string(store),
str(self.presence_key),
str(self.typing_key),
str(self.receipt_key),
str(self.account_data_key),
str(self.push_rules_key),
str(self.to_device_key),
str(self.device_list_key),
str(self.groups_key),
]
)
@property
def room_stream_id(self):
return self.room_key.stream
def copy_and_advance(self, key, new_value) -> "StreamToken":
"""Advance the given key in the token to a new value if and only if the
new value is after the old value.
"""
if key == "room_key":
new_token = self.copy_and_replace(
"room_key", self.room_key.copy_and_advance(new_value)
)
return new_token
new_token = self.copy_and_replace(key, new_value)
new_id = int(getattr(new_token, key))
old_id = int(getattr(self, key))
if old_id < new_id:
return new_token
else:
return self
def copy_and_replace(self, key, new_value) -> "StreamToken":
return attr.evolve(self, **{key: new_value})
StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0)
@attr.s(slots=True, frozen=True)
class PersistedEventPosition:
"""Position of a newly persisted event with instance that persisted it.
This can be used to test whether the event is persisted before or after a
RoomStreamToken.
"""
instance_name = attr.ib(type=str)
stream = attr.ib(type=int)
def persisted_after(self, token: RoomStreamToken) -> bool:
return token.get_stream_pos_for_instance(self.instance_name) < self.stream
def to_room_stream_token(self) -> RoomStreamToken:
"""Converts the position to a room stream token such that events
persisted in the same room after this position will be after the
returned `RoomStreamToken`.
Note: no guarantees are made about ordering w.r.t. events in other
rooms.
"""
# Doing the naive thing satisfies the desired properties described in
# the docstring.
return RoomStreamToken(None, self.stream)
class ThirdPartyInstanceID(
namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id"))
):
# Deny iteration because it will bite you if you try to create a singleton
# set by:
# users = set(user)
def __iter__(self):
raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
# Because this class is a namedtuple of strings, it is deeply immutable.
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@classmethod
def from_string(cls, s):
bits = s.split("|", 2)
if len(bits) != 2:
raise SynapseError(400, "Invalid ID %r" % (s,))
return cls(appservice_id=bits[0], network_id=bits[1])
def to_string(self):
return "%s|%s" % (self.appservice_id, self.network_id)
__str__ = to_string
@classmethod
def create(cls, appservice_id, network_id):
return cls(appservice_id=appservice_id, network_id=network_id)
@attr.s(slots=True)
class ReadReceipt:
"""Information about a read-receipt"""
room_id = attr.ib()
receipt_type = attr.ib()
user_id = attr.ib()
event_ids = attr.ib()
data = attr.ib()
def get_verify_key_from_cross_signing_key(key_info):
"""Get the key ID and signedjson verify key from a cross-signing key dict
Args:
key_info (dict): a cross-signing key dict, which must have a "keys"
property that has exactly one item in it
Returns:
(str, VerifyKey): the key ID and verify key for the cross-signing key
"""
# make sure that exactly one key is provided
if "keys" not in key_info:
raise ValueError("Invalid key")
keys = key_info["keys"]
if len(keys) != 1:
raise ValueError("Invalid key")
# and return that one key
for key_id, key_data in keys.items():
return (key_id, decode_verify_key_bytes(key_id, decode_base64(key_data)))
| apache-2.0 |
alkemics/luigi | luigi/contrib/gcs.py | 5 | 14722 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""luigi bindings for Google Cloud Storage"""
import logging
import io
import mimetypes
import os
import tempfile
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
import luigi.target
from luigi import six
logger = logging.getLogger('luigi-interface')
try:
import httplib2
import oauth2client.client
from googleapiclient import errors
from googleapiclient import discovery
from googleapiclient import http
except ImportError:
logger.warning("Loading GCS module without the python packages googleapiclient & oauth2client. \
This will crash at runtime if GCS functionality is used.")
else:
# Retry transport and file IO errors.
RETRYABLE_ERRORS = (httplib2.HttpLib2Error, IOError)
# Number of times to retry failed downloads.
NUM_RETRIES = 5
# Number of bytes to send/receive in each request.
CHUNKSIZE = 2 * 1024 * 1024
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
class InvalidDeleteException(luigi.target.FileSystemException):
pass
class GCSClient(luigi.target.FileSystem):
"""An implementation of a FileSystem over Google Cloud Storage.
There are several ways to use this class. By default it will use the app
default credentials, as described at https://developers.google.com/identity/protocols/application-default-credentials .
Alternatively, you may pass an oauth2client credentials object. e.g. to use a service account::
credentials = oauth2client.client.SignedJwtAssertionCredentials(
'012345678912-ThisIsARandomServiceAccountEmail@developer.gserviceaccount.com',
'These are the contents of the p12 file that came with the service account',
scope='https://www.googleapis.com/auth/devstorage.read_write')
client = GCSClient(oauth_credentials=credentails)
.. warning::
By default this class will use "automated service discovery" which will require
a connection to the web. The google api client downloads a JSON file to "create" the
library interface on the fly. If you want a more hermetic build, you can pass the
contents of this file (currently found at https://www.googleapis.com/discovery/v1/apis/storage/v1/rest )
as the ``descriptor`` argument.
"""
def __init__(self, oauth_credentials=None, descriptor='', http_=None):
http_ = http_ or httplib2.Http()
if not oauth_credentials:
oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default()
if descriptor:
self.client = discovery.build_from_document(descriptor, credentials=oauth_credentials, http=http_)
else:
self.client = discovery.build('storage', 'v1', credentials=oauth_credentials, http=http_)
def _path_to_bucket_and_key(self, path):
(scheme, netloc, path, _, _) = urlsplit(path)
assert scheme == 'gs'
path_without_initial_slash = path[1:]
return netloc, path_without_initial_slash
def _is_root(self, key):
return len(key) == 0 or key == '/'
def _add_path_delimiter(self, key):
return key if key[-1:] == '/' else key + '/'
def _obj_exists(self, bucket, obj):
try:
self.client.objects().get(bucket=bucket, object=obj).execute()
except errors.HttpError as ex:
if ex.resp['status'] == '404':
return False
raise
else:
return True
def _list_iter(self, bucket, prefix):
request = self.client.objects().list(bucket=bucket, prefix=prefix)
response = request.execute()
while response is not None:
for it in response.get('items', []):
yield it
request = self.client.objects().list_next(request, response)
if request is None:
break
response = request.execute()
def _do_put(self, media, dest_path):
bucket, obj = self._path_to_bucket_and_key(dest_path)
request = self.client.objects().insert(bucket=bucket, name=obj, media_body=media)
if not media.resumable():
return request.execute()
response = None
attempts = 0
while response is None:
error = None
try:
progress, response = request.next_chunk()
if progress is not None:
logger.debug('Upload progress: %.2f%%', 100 * progress)
except errors.HttpError as err:
error = err
if err.resp.status < 500:
raise
logger.warning('Caught error while uploading', exc_info=True)
except RETRYABLE_ERRORS as err:
logger.warning('Caught error while uploading', exc_info=True)
error = err
if error:
attempts += 1
if attempts >= NUM_RETRIES:
raise error
else:
attempts = 0
return response
def exists(self, path):
bucket, obj = self._path_to_bucket_and_key(path)
if self._obj_exists(bucket, obj):
return True
return self.isdir(path)
def isdir(self, path):
bucket, obj = self._path_to_bucket_and_key(path)
if self._is_root(obj):
try:
self.client.buckets().get(bucket=bucket).execute()
except errors.HttpError as ex:
if ex.resp['status'] == '404':
return False
raise
obj = self._add_path_delimiter(obj)
if self._obj_exists(bucket, obj):
return True
# Any objects with this prefix
resp = self.client.objects().list(bucket=bucket, prefix=obj, maxResults=20).execute()
lst = next(iter(resp.get('items', [])), None)
return bool(lst)
def remove(self, path, recursive=True):
(bucket, obj) = self._path_to_bucket_and_key(path)
if self._is_root(obj):
raise InvalidDeleteException(
'Cannot delete root of bucket at path {}'.format(path))
if self._obj_exists(bucket, obj):
self.client.objects().delete(bucket=bucket, object=obj).execute()
return True
if self.isdir(path):
if not recursive:
raise InvalidDeleteException(
'Path {} is a directory. Must use recursive delete'.format(path))
req = http.BatchHttpRequest()
for it in self._list_iter(bucket, self._add_path_delimiter(obj)):
req.add(self.client.objects().delete(bucket=bucket, object=it['name']))
req.execute()
return True
return False
def put(self, filename, dest_path, mimetype=None):
resumable = os.path.getsize(filename) > 0
mimetype = mimetype or mimetypes.guess_type(dest_path)[0] or DEFAULT_MIMETYPE
media = http.MediaFileUpload(filename, mimetype, chunksize=CHUNKSIZE, resumable=resumable)
self._do_put(media, dest_path)
def put_string(self, contents, dest_path, mimetype=None):
mimetype = mimetype or mimetypes.guess_type(dest_path)[0] or DEFAULT_MIMETYPE
assert isinstance(mimetype, six.string_types)
if not isinstance(contents, six.binary_type):
contents = contents.encode("utf-8")
media = http.MediaIoBaseUpload(six.BytesIO(contents), mimetype, resumable=bool(contents))
self._do_put(media, dest_path)
def mkdir(self, path, parents=True, raise_if_exists=False):
if self.exists(path):
if raise_if_exists:
raise luigi.target.FileAlreadyExists()
elif not self.isdir(path):
raise luigi.target.NotADirectory()
else:
return
self.put_string(b"", self._add_path_delimiter(path), mimetype='text/plain')
def copy(self, source_path, destination_path):
src_bucket, src_obj = self._path_to_bucket_and_key(source_path)
dest_bucket, dest_obj = self._path_to_bucket_and_key(destination_path)
if self.isdir(source_path):
src_prefix = self._add_path_delimiter(src_obj)
dest_prefix = self._add_path_delimiter(dest_obj)
source_path = self._add_path_delimiter(source_path)
for obj in self.listdir(source_path):
suffix = obj[len(source_path):]
self.client.objects().copy(
sourceBucket=src_bucket,
sourceObject=src_prefix + suffix,
destinationBucket=dest_bucket,
destinationObject=dest_prefix + suffix,
body={}).execute()
else:
self.client.objects().copy(
sourceBucket=src_bucket,
sourceObject=src_obj,
destinationBucket=dest_bucket,
destinationObject=dest_obj,
body={}).execute()
def rename(self, source_path, destination_path):
"""
Rename/move an object from one S3 location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path)
def listdir(self, path):
"""
Get an iterable with S3 folder contents.
Iterable contains paths relative to queried path.
"""
bucket, obj = self._path_to_bucket_and_key(path)
obj_prefix = self._add_path_delimiter(obj)
if self._is_root(obj_prefix):
obj_prefix = ''
obj_prefix_len = len(obj_prefix)
for it in self._list_iter(bucket, obj_prefix):
yield self._add_path_delimiter(path) + it['name'][obj_prefix_len:]
def download(self, path):
bucket, obj = self._path_to_bucket_and_key(path)
with tempfile.NamedTemporaryFile(delete=False) as fp:
# We can't return the tempfile reference because of a bug in python: http://bugs.python.org/issue18879
return_fp = _DeleteOnCloseFile(fp.name, 'r')
# Special case empty files because chunk-based downloading doesn't work.
result = self.client.objects().get(bucket=bucket, object=obj).execute()
if int(result['size']) == 0:
return return_fp
request = self.client.objects().get_media(bucket=bucket, object=obj)
downloader = http.MediaIoBaseDownload(fp, request, chunksize=1024 * 1024)
attempts = 0
done = False
while not done:
error = None
try:
_, done = downloader.next_chunk()
except errors.HttpError as err:
error = err
if err.resp.status < 500:
raise
logger.warning('Error downloading file, retrying', exc_info=True)
except RETRYABLE_ERRORS as err:
logger.warning('Error downloading file, retrying', exc_info=True)
error = err
if error:
attempts += 1
if attempts >= NUM_RETRIES:
raise error
else:
attempts = 0
return return_fp
class _DeleteOnCloseFile(io.FileIO):
def close(self):
super(_DeleteOnCloseFile, self).close()
os.remove(self.name)
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return True
class AtomicGCSFile(luigi.target.AtomicLocalFile):
"""
A GCS file that writes to a temp file and put to GCS on close.
"""
def __init__(self, path, gcs_client):
self.gcs_client = gcs_client
super(AtomicGCSFile, self).__init__(path)
def move_to_final_destination(self):
self.gcs_client.put(self.tmp_path, self.path)
class GCSTarget(luigi.target.FileSystemTarget):
fs = None
def __init__(self, path, format=None, client=None):
super(GCSTarget, self).__init__(path)
if format is None:
format = luigi.format.get_default_format()
self.format = format
self.fs = client or GCSClient()
def open(self, mode='r'):
if mode == 'r':
return self.format.pipe_reader(self.fs.download(self.path))
elif mode == 'w':
return self.format.pipe_writer(AtomicGCSFile(self.path, self.fs))
else:
raise ValueError("Unsupported open mode '{}'".format(mode))
class GCSFlagTarget(GCSTarget):
"""
Defines a target directory with a flag-file (defaults to `_SUCCESS`) used
to signify job success.
This checks for two things:
* the path exists (just like the GCSTarget)
* the _SUCCESS file exists within the directory.
Because Hadoop outputs into a directory and not a single file,
the path is assumed to be a directory.
This is meant to be a handy alternative to AtomicGCSFile.
The AtomicFile approach can be burdensome for GCS since there are no directories, per se.
If we have 1,000,000 output files, then we have to rename 1,000,000 objects.
"""
fs = None
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
"""
Initializes a S3FlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str
"""
if format is None:
format = luigi.format.get_default_format()
if path[-1] != "/":
raise ValueError("S3FlagTarget requires the path to be to a "
"directory. It must end with a slash ( / ).")
super(GCSFlagTarget, self).__init__(path)
self.format = format
self.fs = client or GCSClient()
self.flag = flag
def exists(self):
flag_target = self.path + self.flag
return self.fs.exists(flag_target)
| apache-2.0 |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/doc/sphinx/numpydoc/comment_eater.py | 83 | 5181 | from cStringIO import StringIO
import compiler
import inspect
import textwrap
import tokenize
from compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
for token in tokenize.generate_tokens(file.next):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| apache-2.0 |
crocodoc/graphite | carbon/lib/carbon/util.py | 6 | 4715 | import sys
import os
import pwd
from os.path import abspath, basename, dirname, join
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import cPickle as pickle
USING_CPICKLE = True
except:
import pickle
USING_CPICKLE = False
from twisted.python.util import initgroups
from twisted.scripts.twistd import runApp
from twisted.scripts._twistd_unix import daemonize
daemonize = daemonize # Backwards compatibility
def dropprivs(user):
uid, gid = pwd.getpwnam(user)[2:4]
initgroups(uid, gid)
os.setregid(gid, gid)
os.setreuid(uid, uid)
return (uid, gid)
def run_twistd_plugin(filename):
from carbon.conf import get_parser
from twisted.scripts.twistd import ServerOptions
bin_dir = dirname(abspath(filename))
root_dir = dirname(bin_dir)
os.environ.setdefault('GRAPHITE_ROOT', root_dir)
program = basename(filename).split('.')[0]
# First, parse command line options as the legacy carbon scripts used to
# do.
parser = get_parser(program)
(options, args) = parser.parse_args()
if not args:
parser.print_usage()
return
# This isn't as evil as you might think
__builtins__["instance"] = options.instance
__builtins__["program"] = program
# Then forward applicable options to either twistd or to the plugin itself.
twistd_options = ["--no_save"]
# If no reactor was selected yet, try to use the epoll reactor if
# available.
try:
from twisted.internet import epollreactor
twistd_options.append("--reactor=epoll")
except:
pass
if options.debug:
twistd_options.extend(["--nodaemon"])
if options.profile:
twistd_options.append("--profile")
if options.pidfile:
twistd_options.extend(["--pidfile", options.pidfile])
# Now for the plugin-specific options.
twistd_options.append(program)
if options.debug:
twistd_options.append("--debug")
for option_name, option_value in vars(options).items():
if (option_value is not None and
option_name not in ("debug", "profile", "pidfile")):
twistd_options.extend(["--%s" % option_name.replace("_", "-"),
option_value])
# Finally, append extra args so that twistd has a chance to process them.
twistd_options.extend(args)
config = ServerOptions()
config.parseOptions(twistd_options)
runApp(config)
def parseDestinations(destination_strings):
destinations = []
for dest_string in destination_strings:
parts = dest_string.strip().split(':')
if len(parts) == 2:
server, port = parts
instance = None
elif len(parts) == 3:
server, port, instance = parts
else:
raise ValueError("Invalid destination string \"%s\"" % dest_string)
destinations.append( (server, int(port), instance) )
return destinations
# This whole song & dance is due to pickle being insecure
# yet performance critical for carbon. We leave the insecure
# mode (which is faster) as an option (USE_INSECURE_UNPICKLER).
# The SafeUnpickler classes were largely derived from
# http://nadiana.com/python-pickle-insecure
if USING_CPICKLE:
class SafeUnpickler(object):
PICKLE_SAFE = {
'copy_reg' : set(['_reconstructor']),
'__builtin__' : set(['object']),
}
@classmethod
def find_class(cls, module, name):
if not module in cls.PICKLE_SAFE:
raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module)
__import__(module)
mod = sys.modules[module]
if not name in cls.PICKLE_SAFE[module]:
raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name)
return getattr(mod, name)
@classmethod
def loads(cls, pickle_string):
pickle_obj = pickle.Unpickler(StringIO(pickle_string))
pickle_obj.find_global = cls.find_class
return pickle_obj.load()
else:
class SafeUnpickler(pickle.Unpickler):
PICKLE_SAFE = {
'copy_reg' : set(['_reconstructor']),
'__builtin__' : set(['object']),
}
def find_class(self, module, name):
if not module in self.PICKLE_SAFE:
raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module)
__import__(module)
mod = sys.modules[module]
if not name in self.PICKLE_SAFE[module]:
raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name)
return getattr(mod, name)
@classmethod
def loads(cls, pickle_string):
return cls(StringIO(pickle_string)).load()
def get_unpickler(insecure=False):
if insecure:
return pickle
else:
return SafeUnpickler
| apache-2.0 |
Jaccorot/django-cms | cms/south_migrations/0051_auto__add_placeholderreference.py | 63 | 17030 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PlaceholderReference'
db.create_table(u'cmsplugin_placeholderreference', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('placeholder_ref', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
))
db.send_create_signal('cms', ['PlaceholderReference'])
def backwards(self, orm):
# Deleting model 'PlaceholderReference'
db.delete_table(u'cmsplugin_placeholderreference')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
aminghadersohi/airflow | tests/dags/test_default_impersonation.py | 45 | 1214 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime
from textwrap import dedent
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_default_impersonation', default_args=args)
deelevated_user = 'airflow_test_user'
test_command = dedent(
"""\
if [ '{user}' != "$(whoami)" ]; then
echo current user $(whoami) is not {user}!
exit 1
fi
""".format(user=deelevated_user))
task = BashOperator(
task_id='test_deelevated_user',
bash_command=test_command,
dag=dag,
)
| apache-2.0 |
INNUENDOWEB/INNUca | src/SPAdes-3.11.0-Linux/share/spades/spades_pipeline/truspades/generate_quality.py | 12 | 1843 | ############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import re
import sys
import itertools
import sam_parser
pattern = re.compile('([0-9]*)([MIDNSHP])')
def parse(cigar, len, pos = 0):
if cigar == "=" :
for i in range(len):
yield (i, i + pos)
return
if cigar == "X":
return
cur = 0
curr = pos
for n, c in pattern.findall(cigar):
if n:
n = int(n)
else:
n = 1
if c == 'M':
for i in range(n):
yield (cur, curr)
cur += 1
curr += 1
elif c == 'DPN':
curr += n
elif c in "IS":
cur += n
def CollectQuality(contigs, sam):
qual = [[[0,0] for i in range(len(contig))] for contig in contigs]
for rec in sam:
if rec.proper_alignment:
for seq_pos, contig_pos in parse(rec.cigar, rec.alen, rec.pos - 1):
if rec.seq[seq_pos] == contigs[rec.tid].seq[contig_pos]:
qual[rec.tid][contig_pos][1] += 1
qual[rec.tid][contig_pos][0] += ord(rec.qual[seq_pos])
return qual
def CountContigQuality(contigs, qual):
for i in range(len(contigs)):
cnt = 0
qual_list = [chr(33)] * len(contigs[i])
for pos in range(len(contigs[i])):
q = qual[i][pos]
if q[1] != 0:
qual_list[pos] = chr(q[0] / q[1])
else:
cnt += 1
contigs[i].qual = "".join(qual_list)
def GenerateQuality(contigs, sam):
qual = CollectQuality(contigs, sam)
CountContigQuality(contigs, qual)
| gpl-3.0 |
tst-ahernandez/earthenterprise | earth_enterprise/src/server/wsgi/search/plugin/federated_search_handler.py | 4 | 5020 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Federated search."""
from search.common import exceptions
from search.common import utils
from search.plugin import coordinate_search_handler
from search.plugin import geplaces_search_handler
class FederatedSearch(object):
"""Class for performing the Federated search.
We initially submit the search against the CoordinateSearch, stopping
there if any positive results are returned. If not, we issue our search
against the GEPlacesSearch.
If there is a valid response from any of the searches, we use
it. If not, then we can assume that 'location' doesn't exist and so we
present a reasonable 'no results found' back to the caller.
"""
def __init__(self):
"""Inits FederatedSearch.
Initializes the logger "ge_search".
"""
self.utils = utils.SearchUtils()
self.logger = self.utils.logger
# Create coordinate and places search objects
self._coordinate = coordinate_search_handler.CoordinateSearch()
self._geplaces = geplaces_search_handler.PlacesSearch()
# Get Style information from Places or Coordinate search handlers.
self._style = self._geplaces.style
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the federated search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the federated search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
response_type: Response type can be KML or JSONP, depending on the client.
"""
search_results = ""
search_status = False
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
self._geplaces.parameters = parameters
self._coordinate.parameters = parameters
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
# Fetch additional query parameters 'flyToFirstElement' and
# 'displayKeys' from URL.
self.fly_to_first_element = self.utils.GetValue(
parameters, "flyToFirstElement")
self.display_keys_string = self.utils.GetValue(
parameters, "displayKeys")
response_type = self.utils.GetResponseType(environ)
original_query = self.utils.GetValue(parameters, "q")
if original_query:
(search_status, search_results) = self.DoSearch(
original_query, response_type)
else:
self.logger.debug("Empty search query received")
if not search_status:
folder_name = "No results were returned."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, original_query, response_type):
"""Performs the federated search and return's the results.
Args:
original_query: search query as entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
tuple containing
search_status: Whether search could be performed.
search_results: A KML/JSONP formatted string which contains search results.
"""
search_status = False
search_results = ""
self._geplaces.f_callback = self.f_callback
self._coordinate.f_callback = self.f_callback
self._geplaces.fly_to_first_element = self.fly_to_first_element
self._geplaces.display_keys_string = self.display_keys_string
self.logger.debug("Performing coordinate search on %s", original_query)
try:
(search_status, search_results) = self._coordinate.DoSearch(
original_query, response_type)
except exceptions.BadQueryException:
# If 'BadQueryException' exception occurs, ignore it
# and proceed with places search.
pass
if not search_status:
self.logger.debug(
"No search results were returned by coordinate search."
"Proceeding with places search...")
(search_status, search_results) = self._geplaces.DoSearch(
original_query, response_type)
if not search_status:
self.logger.debug(
"No search results were returned by coordinate and places search.")
return search_status, search_results
def main():
fedobj = FederatedSearch()
fedobj.DoSearch("santa clara", "KML")
if __name__ == "__main__":
main()
| apache-2.0 |
clobrano/personfinder | app/unidecode/x076.py | 252 | 4639 | data = (
'Yu ', # 0x00
'Cui ', # 0x01
'Ya ', # 0x02
'Zhu ', # 0x03
'Cu ', # 0x04
'Dan ', # 0x05
'Shen ', # 0x06
'Zhung ', # 0x07
'Ji ', # 0x08
'Yu ', # 0x09
'Hou ', # 0x0a
'Feng ', # 0x0b
'La ', # 0x0c
'Yang ', # 0x0d
'Shen ', # 0x0e
'Tu ', # 0x0f
'Yu ', # 0x10
'Gua ', # 0x11
'Wen ', # 0x12
'Huan ', # 0x13
'Ku ', # 0x14
'Jia ', # 0x15
'Yin ', # 0x16
'Yi ', # 0x17
'Lu ', # 0x18
'Sao ', # 0x19
'Jue ', # 0x1a
'Chi ', # 0x1b
'Xi ', # 0x1c
'Guan ', # 0x1d
'Yi ', # 0x1e
'Wen ', # 0x1f
'Ji ', # 0x20
'Chuang ', # 0x21
'Ban ', # 0x22
'Lei ', # 0x23
'Liu ', # 0x24
'Chai ', # 0x25
'Shou ', # 0x26
'Nue ', # 0x27
'Dian ', # 0x28
'Da ', # 0x29
'Pie ', # 0x2a
'Tan ', # 0x2b
'Zhang ', # 0x2c
'Biao ', # 0x2d
'Shen ', # 0x2e
'Cu ', # 0x2f
'Luo ', # 0x30
'Yi ', # 0x31
'Zong ', # 0x32
'Chou ', # 0x33
'Zhang ', # 0x34
'Zhai ', # 0x35
'Sou ', # 0x36
'Suo ', # 0x37
'Que ', # 0x38
'Diao ', # 0x39
'Lou ', # 0x3a
'Lu ', # 0x3b
'Mo ', # 0x3c
'Jin ', # 0x3d
'Yin ', # 0x3e
'Ying ', # 0x3f
'Huang ', # 0x40
'Fu ', # 0x41
'Liao ', # 0x42
'Long ', # 0x43
'Qiao ', # 0x44
'Liu ', # 0x45
'Lao ', # 0x46
'Xian ', # 0x47
'Fei ', # 0x48
'Dan ', # 0x49
'Yin ', # 0x4a
'He ', # 0x4b
'Yan ', # 0x4c
'Ban ', # 0x4d
'Xian ', # 0x4e
'Guan ', # 0x4f
'Guai ', # 0x50
'Nong ', # 0x51
'Yu ', # 0x52
'Wei ', # 0x53
'Yi ', # 0x54
'Yong ', # 0x55
'Pi ', # 0x56
'Lei ', # 0x57
'Li ', # 0x58
'Shu ', # 0x59
'Dan ', # 0x5a
'Lin ', # 0x5b
'Dian ', # 0x5c
'Lin ', # 0x5d
'Lai ', # 0x5e
'Pie ', # 0x5f
'Ji ', # 0x60
'Chi ', # 0x61
'Yang ', # 0x62
'Xian ', # 0x63
'Jie ', # 0x64
'Zheng ', # 0x65
'[?] ', # 0x66
'Li ', # 0x67
'Huo ', # 0x68
'Lai ', # 0x69
'Shaku ', # 0x6a
'Dian ', # 0x6b
'Xian ', # 0x6c
'Ying ', # 0x6d
'Yin ', # 0x6e
'Qu ', # 0x6f
'Yong ', # 0x70
'Tan ', # 0x71
'Dian ', # 0x72
'Luo ', # 0x73
'Luan ', # 0x74
'Luan ', # 0x75
'Bo ', # 0x76
'[?] ', # 0x77
'Gui ', # 0x78
'Po ', # 0x79
'Fa ', # 0x7a
'Deng ', # 0x7b
'Fa ', # 0x7c
'Bai ', # 0x7d
'Bai ', # 0x7e
'Qie ', # 0x7f
'Bi ', # 0x80
'Zao ', # 0x81
'Zao ', # 0x82
'Mao ', # 0x83
'De ', # 0x84
'Pa ', # 0x85
'Jie ', # 0x86
'Huang ', # 0x87
'Gui ', # 0x88
'Ci ', # 0x89
'Ling ', # 0x8a
'Gao ', # 0x8b
'Mo ', # 0x8c
'Ji ', # 0x8d
'Jiao ', # 0x8e
'Peng ', # 0x8f
'Gao ', # 0x90
'Ai ', # 0x91
'E ', # 0x92
'Hao ', # 0x93
'Han ', # 0x94
'Bi ', # 0x95
'Wan ', # 0x96
'Chou ', # 0x97
'Qian ', # 0x98
'Xi ', # 0x99
'Ai ', # 0x9a
'Jiong ', # 0x9b
'Hao ', # 0x9c
'Huang ', # 0x9d
'Hao ', # 0x9e
'Ze ', # 0x9f
'Cui ', # 0xa0
'Hao ', # 0xa1
'Xiao ', # 0xa2
'Ye ', # 0xa3
'Po ', # 0xa4
'Hao ', # 0xa5
'Jiao ', # 0xa6
'Ai ', # 0xa7
'Xing ', # 0xa8
'Huang ', # 0xa9
'Li ', # 0xaa
'Piao ', # 0xab
'He ', # 0xac
'Jiao ', # 0xad
'Pi ', # 0xae
'Gan ', # 0xaf
'Pao ', # 0xb0
'Zhou ', # 0xb1
'Jun ', # 0xb2
'Qiu ', # 0xb3
'Cun ', # 0xb4
'Que ', # 0xb5
'Zha ', # 0xb6
'Gu ', # 0xb7
'Jun ', # 0xb8
'Jun ', # 0xb9
'Zhou ', # 0xba
'Zha ', # 0xbb
'Gu ', # 0xbc
'Zhan ', # 0xbd
'Du ', # 0xbe
'Min ', # 0xbf
'Qi ', # 0xc0
'Ying ', # 0xc1
'Yu ', # 0xc2
'Bei ', # 0xc3
'Zhao ', # 0xc4
'Zhong ', # 0xc5
'Pen ', # 0xc6
'He ', # 0xc7
'Ying ', # 0xc8
'He ', # 0xc9
'Yi ', # 0xca
'Bo ', # 0xcb
'Wan ', # 0xcc
'He ', # 0xcd
'Ang ', # 0xce
'Zhan ', # 0xcf
'Yan ', # 0xd0
'Jian ', # 0xd1
'He ', # 0xd2
'Yu ', # 0xd3
'Kui ', # 0xd4
'Fan ', # 0xd5
'Gai ', # 0xd6
'Dao ', # 0xd7
'Pan ', # 0xd8
'Fu ', # 0xd9
'Qiu ', # 0xda
'Sheng ', # 0xdb
'Dao ', # 0xdc
'Lu ', # 0xdd
'Zhan ', # 0xde
'Meng ', # 0xdf
'Li ', # 0xe0
'Jin ', # 0xe1
'Xu ', # 0xe2
'Jian ', # 0xe3
'Pan ', # 0xe4
'Guan ', # 0xe5
'An ', # 0xe6
'Lu ', # 0xe7
'Shu ', # 0xe8
'Zhou ', # 0xe9
'Dang ', # 0xea
'An ', # 0xeb
'Gu ', # 0xec
'Li ', # 0xed
'Mu ', # 0xee
'Cheng ', # 0xef
'Gan ', # 0xf0
'Xu ', # 0xf1
'Mang ', # 0xf2
'Mang ', # 0xf3
'Zhi ', # 0xf4
'Qi ', # 0xf5
'Ruan ', # 0xf6
'Tian ', # 0xf7
'Xiang ', # 0xf8
'Dun ', # 0xf9
'Xin ', # 0xfa
'Xi ', # 0xfb
'Pan ', # 0xfc
'Feng ', # 0xfd
'Dun ', # 0xfe
'Min ', # 0xff
)
| apache-2.0 |
kapilrastogi/Impala | thirdparty/hive-1.1.0-cdh5.8.0-SNAPSHOT/lib/py/fb303/FacebookService.py | 54 | 57351 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
"""
Standard base service
"""
def getName(self, ):
"""
Returns a descriptive name of the service
"""
pass
def getVersion(self, ):
"""
Returns the version of the service
"""
pass
def getStatus(self, ):
"""
Gets the status of this service
"""
pass
def getStatusDetails(self, ):
"""
User friendly description of status, such as why the service is in
the dead or warning state, or what is being started or stopped.
"""
pass
def getCounters(self, ):
"""
Gets the counters for this service
"""
pass
def getCounter(self, key):
"""
Gets the value of a single counter
Parameters:
- key
"""
pass
def setOption(self, key, value):
"""
Sets an option
Parameters:
- key
- value
"""
pass
def getOption(self, key):
"""
Gets an option
Parameters:
- key
"""
pass
def getOptions(self, ):
"""
Gets all options
"""
pass
def getCpuProfile(self, profileDurationInSec):
"""
Returns a CPU profile over the given time interval (client and server
must agree on the profile format).
Parameters:
- profileDurationInSec
"""
pass
def aliveSince(self, ):
"""
Returns the unix time that the server has been running since
"""
pass
def reinitialize(self, ):
"""
Tell the server to reload its configuration, reopen log files, etc
"""
pass
def shutdown(self, ):
"""
Suggest a shutdown to the server
"""
pass
class Client(Iface):
"""
Standard base service
"""
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def getName(self, ):
"""
Returns a descriptive name of the service
"""
self.send_getName()
return self.recv_getName()
def send_getName(self, ):
self._oprot.writeMessageBegin('getName', TMessageType.CALL, self._seqid)
args = getName_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getName(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getName_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getName failed: unknown result");
def getVersion(self, ):
"""
Returns the version of the service
"""
self.send_getVersion()
return self.recv_getVersion()
def send_getVersion(self, ):
self._oprot.writeMessageBegin('getVersion', TMessageType.CALL, self._seqid)
args = getVersion_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVersion(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVersion_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVersion failed: unknown result");
def getStatus(self, ):
"""
Gets the status of this service
"""
self.send_getStatus()
return self.recv_getStatus()
def send_getStatus(self, ):
self._oprot.writeMessageBegin('getStatus', TMessageType.CALL, self._seqid)
args = getStatus_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getStatus(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getStatus_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatus failed: unknown result");
def getStatusDetails(self, ):
"""
User friendly description of status, such as why the service is in
the dead or warning state, or what is being started or stopped.
"""
self.send_getStatusDetails()
return self.recv_getStatusDetails()
def send_getStatusDetails(self, ):
self._oprot.writeMessageBegin('getStatusDetails', TMessageType.CALL, self._seqid)
args = getStatusDetails_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getStatusDetails(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getStatusDetails_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatusDetails failed: unknown result");
def getCounters(self, ):
"""
Gets the counters for this service
"""
self.send_getCounters()
return self.recv_getCounters()
def send_getCounters(self, ):
self._oprot.writeMessageBegin('getCounters', TMessageType.CALL, self._seqid)
args = getCounters_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCounters(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCounters_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounters failed: unknown result");
def getCounter(self, key):
"""
Gets the value of a single counter
Parameters:
- key
"""
self.send_getCounter(key)
return self.recv_getCounter()
def send_getCounter(self, key):
self._oprot.writeMessageBegin('getCounter', TMessageType.CALL, self._seqid)
args = getCounter_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCounter(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCounter_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounter failed: unknown result");
def setOption(self, key, value):
"""
Sets an option
Parameters:
- key
- value
"""
self.send_setOption(key, value)
self.recv_setOption()
def send_setOption(self, key, value):
self._oprot.writeMessageBegin('setOption', TMessageType.CALL, self._seqid)
args = setOption_args()
args.key = key
args.value = value
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setOption(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setOption_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def getOption(self, key):
"""
Gets an option
Parameters:
- key
"""
self.send_getOption(key)
return self.recv_getOption()
def send_getOption(self, key):
self._oprot.writeMessageBegin('getOption', TMessageType.CALL, self._seqid)
args = getOption_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getOption(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getOption_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getOption failed: unknown result");
def getOptions(self, ):
"""
Gets all options
"""
self.send_getOptions()
return self.recv_getOptions()
def send_getOptions(self, ):
self._oprot.writeMessageBegin('getOptions', TMessageType.CALL, self._seqid)
args = getOptions_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getOptions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getOptions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getOptions failed: unknown result");
def getCpuProfile(self, profileDurationInSec):
"""
Returns a CPU profile over the given time interval (client and server
must agree on the profile format).
Parameters:
- profileDurationInSec
"""
self.send_getCpuProfile(profileDurationInSec)
return self.recv_getCpuProfile()
def send_getCpuProfile(self, profileDurationInSec):
self._oprot.writeMessageBegin('getCpuProfile', TMessageType.CALL, self._seqid)
args = getCpuProfile_args()
args.profileDurationInSec = profileDurationInSec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCpuProfile(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCpuProfile_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCpuProfile failed: unknown result");
def aliveSince(self, ):
"""
Returns the unix time that the server has been running since
"""
self.send_aliveSince()
return self.recv_aliveSince()
def send_aliveSince(self, ):
self._oprot.writeMessageBegin('aliveSince', TMessageType.CALL, self._seqid)
args = aliveSince_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_aliveSince(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = aliveSince_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "aliveSince failed: unknown result");
def reinitialize(self, ):
"""
Tell the server to reload its configuration, reopen log files, etc
"""
self.send_reinitialize()
def send_reinitialize(self, ):
self._oprot.writeMessageBegin('reinitialize', TMessageType.CALL, self._seqid)
args = reinitialize_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def shutdown(self, ):
"""
Suggest a shutdown to the server
"""
self.send_shutdown()
def send_shutdown(self, ):
self._oprot.writeMessageBegin('shutdown', TMessageType.CALL, self._seqid)
args = shutdown_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getName"] = Processor.process_getName
self._processMap["getVersion"] = Processor.process_getVersion
self._processMap["getStatus"] = Processor.process_getStatus
self._processMap["getStatusDetails"] = Processor.process_getStatusDetails
self._processMap["getCounters"] = Processor.process_getCounters
self._processMap["getCounter"] = Processor.process_getCounter
self._processMap["setOption"] = Processor.process_setOption
self._processMap["getOption"] = Processor.process_getOption
self._processMap["getOptions"] = Processor.process_getOptions
self._processMap["getCpuProfile"] = Processor.process_getCpuProfile
self._processMap["aliveSince"] = Processor.process_aliveSince
self._processMap["reinitialize"] = Processor.process_reinitialize
self._processMap["shutdown"] = Processor.process_shutdown
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getName(self, seqid, iprot, oprot):
args = getName_args()
args.read(iprot)
iprot.readMessageEnd()
result = getName_result()
result.success = self._handler.getName()
oprot.writeMessageBegin("getName", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVersion(self, seqid, iprot, oprot):
args = getVersion_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVersion_result()
result.success = self._handler.getVersion()
oprot.writeMessageBegin("getVersion", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getStatus(self, seqid, iprot, oprot):
args = getStatus_args()
args.read(iprot)
iprot.readMessageEnd()
result = getStatus_result()
result.success = self._handler.getStatus()
oprot.writeMessageBegin("getStatus", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getStatusDetails(self, seqid, iprot, oprot):
args = getStatusDetails_args()
args.read(iprot)
iprot.readMessageEnd()
result = getStatusDetails_result()
result.success = self._handler.getStatusDetails()
oprot.writeMessageBegin("getStatusDetails", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCounters(self, seqid, iprot, oprot):
args = getCounters_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCounters_result()
result.success = self._handler.getCounters()
oprot.writeMessageBegin("getCounters", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCounter(self, seqid, iprot, oprot):
args = getCounter_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCounter_result()
result.success = self._handler.getCounter(args.key)
oprot.writeMessageBegin("getCounter", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setOption(self, seqid, iprot, oprot):
args = setOption_args()
args.read(iprot)
iprot.readMessageEnd()
result = setOption_result()
self._handler.setOption(args.key, args.value)
oprot.writeMessageBegin("setOption", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getOption(self, seqid, iprot, oprot):
args = getOption_args()
args.read(iprot)
iprot.readMessageEnd()
result = getOption_result()
result.success = self._handler.getOption(args.key)
oprot.writeMessageBegin("getOption", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getOptions(self, seqid, iprot, oprot):
args = getOptions_args()
args.read(iprot)
iprot.readMessageEnd()
result = getOptions_result()
result.success = self._handler.getOptions()
oprot.writeMessageBegin("getOptions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCpuProfile(self, seqid, iprot, oprot):
args = getCpuProfile_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCpuProfile_result()
result.success = self._handler.getCpuProfile(args.profileDurationInSec)
oprot.writeMessageBegin("getCpuProfile", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_aliveSince(self, seqid, iprot, oprot):
args = aliveSince_args()
args.read(iprot)
iprot.readMessageEnd()
result = aliveSince_result()
result.success = self._handler.aliveSince()
oprot.writeMessageBegin("aliveSince", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reinitialize(self, seqid, iprot, oprot):
args = reinitialize_args()
args.read(iprot)
iprot.readMessageEnd()
self._handler.reinitialize()
return
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
self._handler.shutdown()
return
# HELPER FUNCTIONS AND STRUCTURES
class getName_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getName_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getName_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getName_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVersion_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVersion_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVersion_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVersion_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatusDetails_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatusDetails_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatusDetails_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatusDetails_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounters_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounters_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounters_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.I64,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readI64();
self.success[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounters_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.success))
for kiter7,viter8 in self.success.items():
oprot.writeString(kiter7)
oprot.writeI64(viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounter_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounter_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounter_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounter_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setOption_args:
"""
Attributes:
- key
- value
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRING, 'value', None, None, ), # 2
)
def __init__(self, key=None, value=None,):
self.key = key
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setOption_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.value != None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeString(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setOption_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setOption_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOption_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOption_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOption_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOption_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOptions_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOptions_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOptions_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.STRING,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype10, _vtype11, _size9 ) = iprot.readMapBegin()
for _i13 in xrange(_size9):
_key14 = iprot.readString();
_val15 = iprot.readString();
self.success[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOptions_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
for kiter16,viter17 in self.success.items():
oprot.writeString(kiter16)
oprot.writeString(viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCpuProfile_args:
"""
Attributes:
- profileDurationInSec
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'profileDurationInSec', None, None, ), # 1
)
def __init__(self, profileDurationInSec=None,):
self.profileDurationInSec = profileDurationInSec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.profileDurationInSec = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCpuProfile_args')
if self.profileDurationInSec != None:
oprot.writeFieldBegin('profileDurationInSec', TType.I32, 1)
oprot.writeI32(self.profileDurationInSec)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCpuProfile_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCpuProfile_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class aliveSince_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('aliveSince_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class aliveSince_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('aliveSince_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reinitialize_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reinitialize_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
sun1991/lvsys | lvsys/env_lvsys/Lib/site-packages/jinja2/sandbox.py | 324 | 13327 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| mit |
Juraci/tempest | tempest/common/compute.py | 8 | 5021 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import excutils
from tempest_lib.common.utils import data_utils
from tempest.common import fixed_network
from tempest.common import waiters
from tempest import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
def create_test_server(clients, validatable, validation_resources=None,
tenant_network=None, **kwargs):
"""Common wrapper utility returning a test server.
This method is a common wrapper returning a test server that can be
pingable or sshable.
:param clients: Client manager which provides Openstack Tempest clients.
:param validatable: Whether the server will be pingable or sshable.
:param validation_resources: Resources created for the connection to the
server. Include a keypair, a security group and an IP.
:returns a tuple
"""
# TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
if 'name' in kwargs:
name = kwargs.pop('name')
else:
name = data_utils.rand_name(__name__ + "-instance")
flavor = kwargs.get('flavor', CONF.compute.flavor_ref)
image_id = kwargs.get('image_id', CONF.compute.image_ref)
kwargs = fixed_network.set_networks_kwarg(
tenant_network, kwargs) or {}
if CONF.validation.run_validation and validatable:
# As a first implementation, multiple pingable or sshable servers will
# not be supported
if 'min_count' in kwargs or 'max_count' in kwargs:
msg = ("Multiple pingable or sshable servers not supported at "
"this stage.")
raise ValueError(msg)
if 'security_groups' in kwargs:
kwargs['security_groups'].append(
{'name': validation_resources['security_group']['name']})
else:
try:
kwargs['security_groups'] = [
{'name': validation_resources['security_group']['name']}]
except KeyError:
LOG.debug("No security group provided.")
if 'key_name' not in kwargs:
try:
kwargs['key_name'] = validation_resources['keypair']['name']
except KeyError:
LOG.debug("No key provided.")
if CONF.validation.connect_method == 'floating':
if 'wait_until' not in kwargs:
kwargs['wait_until'] = 'ACTIVE'
body = clients.servers_client.create_server(name, image_id, flavor,
**kwargs)
# handle the case of multiple servers
servers = [body]
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
body_servers = clients.servers_client.list_servers()
servers = \
[s for s in body_servers['servers'] if s['name'].startswith(name)]
# The name of the method to associate a floating IP to as server is too
# long for PEP8 compliance so:
assoc = clients.floating_ips_client.associate_floating_ip_to_server
if 'wait_until' in kwargs:
for server in servers:
try:
waiters.wait_for_server_status(
clients.servers_client, server['id'], kwargs['wait_until'])
# Multiple validatable servers are not supported for now. Their
# creation will fail with the condition above (l.58).
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
assoc(floating_ip=validation_resources[
'floating_ip']['ip'],
server_id=servers[0]['id'])
except Exception:
with excutils.save_and_reraise_exception():
if ('preserve_server_on_error' not in kwargs
or kwargs['preserve_server_on_error'] is False):
for server in servers:
try:
clients.servers_client.delete_server(
server['id'])
except Exception:
LOG.exception('Deleting server %s failed'
% server['id'])
return body, servers
| apache-2.0 |
Alwnikrotikz/stoqs | loaders/MarMenor/loadMarMenor_nov2011.py | 5 | 1210 | #!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all CANON activities
Mike McCann
MBARI 22 April 2012
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # settings.py is one dir up
from MarMenor import MarMenorLoader
try:
stride = int(sys.argv[1])
except IndexError:
stride = 100
try:
dbAlias = sys.argv[2]
except IndexError:
dbAlias = 'stoqs_marmenor_nov2011_s100'
# ----------------------------------------------------------------------------------
mml = MarMenorLoader(dbAlias, 'MarMenor - October 2011')
##mml.sparus_base='http://odss.mbari.org/thredds/dodsC/'
##mml.sparus_files='marmenor/insitu/UniversityOfGirona/'
##mml.sparus_parms=['
mml.castaway_base='http://odss.mbari.org/thredds/dodsC/'
mml.castaway_files=['agg/Castaway/20111110']
mml.castaway_parms=['temperature', 'salinity']
mml.stride = stride
mml.loadAll()
| gpl-3.0 |
rue89-tech/edx-platform | common/djangoapps/course_about/views.py | 28 | 2124 | """
Implementation of the RESTful endpoints for the Course About API.
"""
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
from course_about import api
from rest_framework import status
from rest_framework.response import Response
from course_about.errors import CourseNotFoundError, CourseAboutError
class CourseAboutThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the Course About API."""
# TODO Limit based on expected throughput # pylint: disable=fixme
rate = '50/second'
class CourseAboutView(APIView):
""" RESTful Course About API view.
Used to retrieve JSON serialized Course About information.
"""
authentication_classes = []
permission_classes = []
throttle_classes = CourseAboutThrottle,
def get(self, request, course_id=None): # pylint: disable=unused-argument
"""Read course information.
HTTP Endpoint for course info api.
Args:
Course Id = URI element specifying the course location. Course information will be
returned for this particular course.
Return:
A JSON serialized representation of the course information
"""
try:
return Response(api.get_course_about_details(course_id))
except CourseNotFoundError:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}' no course found"
).format(course_id=course_id)
}
)
except CourseAboutError:
return Response(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
data={
"message": (
u"An error occurred while retrieving course information"
u" for course '{course_id}'"
).format(course_id=course_id)
}
)
| agpl-3.0 |
sedden/django-basic-apps | basic/groups/views/topics.py | 10 | 3555 | from django.shortcuts import get_object_or_404
from basic.groups.decorators import *
from basic.groups.models import *
from basic.groups.forms import *
from basic.tools.shortcuts import render, redirect
def topic_list(request, slug, template_name='groups/topics/topic_list.html'):
"""
Returns a group topic list page.
Templates: ``groups/topics/topic_list.html``
Context:
group
Group object
topic_list
GroupTopic object list
"""
group = get_object_or_404(Group, slug=slug, is_active=True)
topic_list = GroupTopic.objects.filter(group=group, is_active=True)
return render(request, template_name, {
'group': group,
'topic_list': topic_list
})
@membership_required
def topic_create(request, slug, template_name='groups/topics/topic_form.html'):
"""
Returns a group topic form page.
Templates: ``groups/topics/topic_form.html``
Context:
form
GroupTopicForm object
"""
group = get_object_or_404(Group, slug=slug)
if request.method == 'POST':
form = GroupTopicForm(request.POST)
if form.is_valid():
topic = form.save(commit=False)
topic.user = request.user
topic.group = group
topic.save()
return redirect(request, topic)
else:
form = GroupTopicForm()
return render(request, template_name, {
'form': form,
'group': group
})
def topic_detail(request, slug, topic_id,
template_name='groups/topics/topic_detail.html'):
"""
Returns a group topic detail page.
Templates: ``groups/topics/topic_detail.html``
Context:
topic
GroupTopic object
group
Group object
"""
group = get_object_or_404(Group, slug=slug, is_active=True)
topic = get_object_or_404(GroupTopic, pk=topic_id, is_active=True)
message_form = GroupMessageForm()
return render(request, template_name, {
'group': group,
'topic': topic,
'message_form': message_form,
})
@membership_required
def topic_edit(request, slug, topic_id,
template_name='groups/topics/topic_form.html'):
"""
Returns a group topic form page.
Templates: ``groups/topics/topic_form.html``
Context:
form
GroupTopicForm object
topic
GroupTopic object
"""
group = get_object_or_404(Group, slug=slug)
topic = get_object_or_404(GroupTopic, pk=topic_id, group=group, user=request.user)
if request.method == 'POST':
form = GroupTopicForm(request.POST, instance=topic)
if form.is_valid():
form.save()
return redirect(request, topic)
else:
form = GroupTopicForm(instance=topic)
return render(request, template_name, {
'form': form,
'group': group,
'topic': topic
})
@membership_required
def topic_remove(request, slug, topic_id,
template_name='groups/topics/topic_remove_confirm.html'):
"""
Returns a group topic delete confirmation page.
Templates: ``groups/topics/topic_remove_confirm.html``
Context:
topic
GroupTopic object
"""
group = get_object_or_404(Group, slug=slug)
topic = get_object_or_404(GroupTopic, pk=topic_id, group=group, user=request.user)
if request.method == 'POST':
topic.is_active = False
topic.save()
return redirect(request, group)
return render(request, template_name, {'topic': topic})
| bsd-3-clause |