code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from django.contrib.auth.decorators import login_required, permission_required,\
user_passes_test
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from project.tramitacao.forms import FormTipoProcesso
from project.tramitacao.models import Tbtipoprocesso, AuthUser
from django.http.response import HttpResponseRedirect, HttpResponse
from django.contrib import messages
from project.tramitacao.admin import verificar_permissao_grupo
from project.tramitacao.relatorio_base import relatorio_csv_base, relatorio_ods_base,\
relatorio_ods_base_header, relatorio_pdf_base,\
relatorio_pdf_base_header_title, relatorio_pdf_base_header
from odslib import ODS
nome_relatorio = "relatorio_tipo_processo"
response_consulta = "/tramitacao/tipo_processo/consulta/"
titulo_relatorio = "Relatorio dos Tipos de Processos"
planilha_relatorio = "Tipos de Processos"
@permission_required('sicop.tipo_processo_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def consulta(request):
if request.method == "POST":
nome = request.POST['nome']
lista = Tbtipoprocesso.objects.all().filter( nome__icontains=nome, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
else:
lista = Tbtipoprocesso.objects.all().filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
lista = lista.order_by( 'id' )
#gravando na sessao o resultado da consulta preparando para o relatorio/pdf
request.session['relatorio_tipo_processo'] = lista
return render_to_response('sicop/tipo_processo/consulta.html' ,{'lista':lista}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_processo_cadastro', login_url='/excecoes/permissao_negada/', raise_exception=True)
def cadastro(request):
if request.method == "POST":
if validacao(request):
f_tipoprocesso = Tbtipoprocesso(
nome = request.POST['nome'],
tabela = request.POST['tabela'],
coridentificacao = request.POST['coridentificacao'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipoprocesso.save()
return HttpResponseRedirect("/tramitacao/tipo_processo/consulta/")
return render_to_response('sicop/tipo_processo/cadastro.html', context_instance = RequestContext(request))
@permission_required('sicop.tipo_processo_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def edicao(request, id):
instance = get_object_or_404(Tbtipoprocesso, id=id)
if request.method == "POST":
if not request.user.has_perm('sicop.tipo_processo_edicao'):
return HttpResponseRedirect('/excecoes/permissao_negada/')
if validacao(request):
f_tipoprocesso = Tbtipoprocesso(
id = instance.id,
nome = request.POST['nome'],
tabela = request.POST['tabela'],
coridentificacao = request.POST['coridentificacao'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipoprocesso.save()
return HttpResponseRedirect("/tramitacao/tipo_processo/edicao/"+str(id)+"/")
return render_to_response('sicop/tipo_processo/edicao.html', {"tipoprocesso":instance}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_processo_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_pdf(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(mimetype='application/pdf')
doc = relatorio_pdf_base_header(response, nome_relatorio)
elements=[]
dados = relatorio_pdf_base_header_title(titulo_relatorio)
dados.append( ('NOME','') )
for obj in lista:
dados.append( ( obj.nome, '' ) )
return relatorio_pdf_base(response, doc, elements, dados)
else:
return HttpResponseRedirect(response_consulta)
@permission_required('sicop.tipo_processo_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_ods(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, ods)
# subtitle
sheet.getCell(0, 1).setAlignHorizontal('center').stringValue( 'Nome' ).setFontSize('14pt')
sheet.getRow(1).setHeight('20pt')
#TRECHO PERSONALIZADO DE CADA CONSULTA
#DADOS
x = 0
for obj in lista:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.nome)
x += 1
#TRECHO PERSONALIZADO DE CADA CONSULTA
relatorio_ods_base(ods, planilha_relatorio)
# generating response
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
else:
return HttpResponseRedirect( response_consulta )
@permission_required('sicop.tipo_processo_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_csv(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(content_type='text/csv')
writer = relatorio_csv_base(response, nome_relatorio)
writer.writerow(['Nome'])
for obj in lista:
writer.writerow([obj.nome])
return response
else:
return HttpResponseRedirect( response_consulta )
def validacao(request_form):
warning = True
if request_form.POST['nome'] == '':
messages.add_message(request_form,messages.WARNING,'Informe o nome do tipo processo')
warning = False
if request_form.POST['tabela'] == '':
messages.add_message(request_form,messages.WARNING,'Informe a tabela do tipo processo')
warning = False
return warning
| waldenilson/TerraLegal | project/tramitacao/restrito/tipo_processo.py | Python | gpl-2.0 | 6,716 |
# -*- coding: utf-8 -*-
"""Tests for mac notes plugin."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import mac_notes
from tests.parsers.sqlite_plugins import test_lib
class MacNotesTest(test_lib.SQLitePluginTestCase):
"""Tests for mac notes database plugin."""
def testProcess(self):
"""Test the Process function on a Mac Notes file."""
plugin_object = mac_notes.MacNotesPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['NotesV7.storedata'], plugin_object)
self.assertEqual(storage_writer.number_of_events, 6)
self.assertEqual(storage_writer.number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first note.
event = events[0]
self.CheckTimestamp(event.timestamp, '2014-02-11 02:38:27.097813')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_title = 'building 4th brandy gibs'
self.assertEqual(event_data.title, expected_title)
expected_text = (
'building 4th brandy gibs microsoft office body soul and peace '
'example.com 3015555555: plumbing and heating claim#123456 Small '
'business ')
self.assertEqual(event_data.text, expected_text)
expected_short_message = 'title:{0:s}'.format(expected_title)
expected_message = 'title:{0:s} note_text:{1:s}'.format(
expected_title, expected_text)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| rgayon/plaso | tests/parsers/sqlite_plugins/mac_notes.py | Python | apache-2.0 | 1,683 |
from PyQt5 import QtCore, QtWidgets
import chigger
import peacock
from peacock.ExodusViewer.plugins.ExodusPlugin import ExodusPlugin
from MeshBlockSelectorWidget import MeshBlockSelectorWidget
class BlockHighlighterPlugin(peacock.base.PeacockCollapsibleWidget, ExodusPlugin):
"""
Widget for controlling the visible blocks/nodesets/sidesets of the mesh.
Mirrored off of peaocock.Exodus.plugins.BlockPlugin
"""
#: pyqtSignal: Emitted when window needs to change
windowRequiresUpdate = QtCore.pyqtSignal()
highlight = QtCore.pyqtSignal(object, object, object)
def __init__(self, collapsible_layout=QtWidgets.QHBoxLayout, **kwargs):
peacock.base.PeacockCollapsibleWidget.__init__(self, collapsible_layout=collapsible_layout)
ExodusPlugin.__init__(self, **kwargs)
self.setTitle('Highlight')
self.setEnabled(False)
self.MainLayout = self.collapsibleLayout()
# Block, nodeset, and sideset selector widgets
self.BlockSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.BLOCK, 'Blocks:')
self.SidesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.SIDESET, 'Boundaries:')
self.NodesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.NODESET, 'Nodesets:')
self.MainLayout.addWidget(self.BlockSelector)
self.MainLayout.addWidget(self.SidesetSelector)
self.MainLayout.addWidget(self.NodesetSelector)
self.BlockSelector.selectionChanged.connect(self.setBlock)
self.SidesetSelector.selectionChanged.connect(self.setSideset)
self.NodesetSelector.selectionChanged.connect(self.setNodeset)
self.setup()
def onWindowCreated(self, *args):
"""
Initializes the selector widgets for the supplied reader/results.
"""
super(BlockHighlighterPlugin, self).onWindowCreated(*args)
self.BlockSelector.updateBlocks(self._reader, True)
self.SidesetSelector.updateBlocks(self._reader, True)
self.NodesetSelector.updateBlocks(self._reader, True)
self.__updateVariableState()
def onWindowUpdated(self):
"""
Update boundary/nodeset visibility when window is updated.
"""
if self._reader:
self.blockSignals(True)
self.BlockSelector.updateBlocks(self._reader)
self.SidesetSelector.updateBlocks(self._reader)
self.NodesetSelector.updateBlocks(self._reader)
self.blockSignals(False)
self.__updateVariableState()
def setBlock(self):
"""
Highlights a block and resets nodesets/sidesets
"""
block = self.BlockSelector.getBlocks()
self.SidesetSelector.reset()
self.NodesetSelector.reset()
self.highlight.emit(block, None, None)
def setSideset(self):
"""
Highlights a sideset and resets nodesets/blocks
"""
sideset = self.SidesetSelector.getBlocks()
self.BlockSelector.reset()
self.NodesetSelector.reset()
self.highlight.emit(None, sideset, None)
def setNodeset(self):
"""
Highlights a nodeset and resets sidesets/blocks
"""
nodeset = self.NodesetSelector.getBlocks()
self.BlockSelector.reset()
self.SidesetSelector.reset()
self.highlight.emit(None, None, nodeset)
def __updateVariableState(self):
"""
Enable/disable the nodeset/sidest selection based on variable type.
"""
varinfo = self._result[0].getCurrentVariableInformation()
if varinfo:
if varinfo.object_type == chigger.exodus.ExodusReader.ELEMENTAL:
self.SidesetSelector.setEnabled(False)
self.NodesetSelector.setEnabled(False)
else:
self.SidesetSelector.setEnabled(True)
self.NodesetSelector.setEnabled(True)
| yipenggao/moose | python/peacock/Input/BlockHighlighterPlugin.py | Python | lgpl-2.1 | 3,928 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import resource_options
from keystone.common.resource_options import options as ro_opt
ROLE_OPTIONS_REGISTRY = resource_options.ResourceOptionRegistry('ROLE')
# NOTE(morgan): wrap this in a function for testing purposes.
# This is called on import by design.
def register_role_options():
for opt in [
ro_opt.IMMUTABLE_OPT,
]:
ROLE_OPTIONS_REGISTRY.register_option(opt)
register_role_options()
| mahak/keystone | keystone/assignment/role_backends/resource_options.py | Python | apache-2.0 | 990 |
""" Peripheral On Demand global define
"""
import periphondemand
#global
POD_CONFIG="~/.podrc"
POD_PATH = periphondemand.__path__[0]
PLATFORMPATH = "/platforms"
BUSPATH = "/busses/"
TEMPLATESPATH = "/templates"
TOOLCHAINPATH = "/toolchains"
SIMULATIONPATH = "/simulation"
SYNTHESISPATH = "/synthesis"
DRIVERSPATH = "/drivers"
# extension
TCLEXT = ".tcl"
ARCHIVEEXT = ".zip"
XMLEXT = ".xml"
VHDLEXT = ".vhd"
UCFEXT = ".ucf"
BITSTREAMEXT = ".bit"
PODSCRIPTEXT = ".pod"
HDLEXT = ["vhdl","vhd","v"]
#for components
LIBRARYPATH = "/library"
COMPONENTSPATH = "/components"
HDLDIR = "hdl"
DRIVERS_TEMPLATES_PATH = "/drivers_templates"
# for project
BINARYPROJECTPATH = "/binaries"
OBJSPATH = "/objs"
BINARY_PREFIX = "top_"
BINARY_SUFFIX = ".bit"
# template
HEADERTPL = "/headervhdl.tpl"
# color (see VT100 console manual for more details)
COLOR_DEBUG="\033[32;7m" # White on green
COLOR_ERROR="\033[31;7m" # white on red
COLOR_ERROR_MESSAGE="\033[31;1m" # red on white
COLOR_WARNING="\033[32;7m" # white on green
COLOR_WARNING_MESSAGE="\033[32;1m" # green on white
COLOR_INFO="\033[34;7m" # white on blue
COLOR_INFO_MESSAGE="\033[34;1m" # blue on white
COLOR_SHELL="\033[33;3m" # green on black
COLOR_END="\033[0m"
| magyarm/periphondemand-code | src/bin/define.py | Python | lgpl-2.1 | 1,265 |
from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
return render_to_response('index.html', {}, context_instance=RequestContext(request)) | fabioz/django-tornado-websockets-openshift | mysite/views.py | Python | mit | 202 |
import os
import time
from collections import Counter
from re import findall
from unittest import skip
from cassandra import ConsistencyLevel
from ccmlib.common import is_win
from ccmlib.node import Node
from nose.plugins.attrib import attr
from assertions import assert_almost_equal, assert_one
from dtest import Tester, debug
from tools import insert_c1c2, known_failure, since
class TestIncRepair(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
r'Can\'t send migration request: node.*is down',
]
Tester.__init__(self, *args, **kwargs)
def sstable_marking_test(self):
"""
* Launch a three node cluster
* Stop node3
* Write 10K rows with stress
* Start node3
* Issue an incremental repair, and wait for it to finish
* Run sstablemetadata on every node, assert that all sstables are marked as repaired
"""
cluster = self.cluster
# hinted handoff can create SSTable that we don't need after node3 restarted
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node3.stop(gently=True)
node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])
node1.flush()
node2.flush()
node3.start(wait_other_notice=True)
if node3.get_cassandra_version() < '2.2':
log_file = 'system.log'
else:
log_file = 'debug.log'
node3.watch_log_for("Initializing keyspace1.standard1", filename=log_file)
# wait for things to settle before starting repair
time.sleep(1)
if cluster.version() >= "2.2":
node3.repair()
else:
node3.nodetool("repair -par -inc")
with open('sstables.txt', 'w') as f:
node1.run_sstablemetadata(output_file=f, keyspace='keyspace1')
node2.run_sstablemetadata(output_file=f, keyspace='keyspace1')
node3.run_sstablemetadata(output_file=f, keyspace='keyspace1')
with open("sstables.txt", 'r') as r:
output = r.read().replace('\n', '')
self.assertNotIn('Repaired at: 0', output)
os.remove('sstables.txt')
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11268',
flaky=True,
notes='windows')
def multiple_repair_test(self):
"""
* Launch a three node cluster
* Create a keyspace with RF 3 and a table
* Insert 49 rows
* Stop node3
* Insert 50 more rows
* Restart node3
* Issue an incremental repair on node3
* Stop node2
* Insert a final50 rows
* Restart node2
* Issue an incremental repair on node2
* Replace node3 with a new node
* Verify data integrity
# TODO: Several more verifications of data need to be interspersed throughout the test. The final assertion is insufficient.
@jira_ticket CASSANDRA-10644
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 3)
self.create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
debug("insert data")
insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)
node1.flush()
debug("bringing down node 3")
node3.flush()
node3.stop(gently=False)
debug("inserting additional data into node 1 and 2")
insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)
node1.flush()
node2.flush()
debug("restarting and repairing node 3")
node3.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node3.repair()
else:
node3.nodetool("repair -par -inc")
# wait stream handlers to be closed on windows
# after session is finished (See CASSANDRA-10644)
if is_win:
time.sleep(2)
debug("stopping node 2")
node2.stop(gently=False)
debug("inserting data in nodes 1 and 3")
insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)
node1.flush()
node3.flush()
debug("start and repair node 2")
node2.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node2.repair()
else:
node2.nodetool("repair -par -inc")
debug("replace node and check data integrity")
node3.stop(gently=False)
node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
cluster.add(node5, False)
node5.start(replace_address='127.0.0.3', wait_other_notice=True)
assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12006',
flaky=True)
def sstable_repairedset_test(self):
"""
* Launch a two node cluster
* Insert data with stress
* Stop node2
* Run sstablerepairedset against node2
* Start node2
* Run sstablemetadata on both nodes, pipe to a file
* Verify the output of sstablemetadata shows no repairs have occurred
* Stop node1
* Insert more data with stress
* Start node1
* Issue an incremental repair
* Run sstablemetadata on both nodes again, pipe to a new file
* Verify repairs occurred and repairedAt was updated
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])
node1.flush()
node2.flush()
node2.stop(gently=False)
node2.run_sstablerepairedset(keyspace='keyspace1')
node2.start(wait_for_binary_proto=True)
with open('initial.txt', 'w') as f:
node2.run_sstablemetadata(output_file=f, keyspace='keyspace1')
node1.run_sstablemetadata(output_file=f, keyspace='keyspace1')
with open('initial.txt', 'r') as r:
initialoutput = r.read()
matches = findall('(?<=Repaired at:).*', initialoutput)
debug("Repair timestamps are: {}".format(matches))
uniquematches = set(matches)
matchcount = Counter(matches)
self.assertGreaterEqual(len(uniquematches), 2, uniquematches)
self.assertGreaterEqual(max(matchcount), 1, matchcount)
self.assertIn('Repaired at: 0', initialoutput)
node1.stop()
node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])
node2.flush()
node1.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node1.repair()
else:
node1.nodetool("repair -par -inc")
with open('final.txt', 'w') as h:
node1.run_sstablemetadata(output_file=h, keyspace='keyspace1')
node2.run_sstablemetadata(output_file=h, keyspace='keyspace1')
with open('final.txt', 'r') as r:
finaloutput = r.read()
matches = findall('(?<=Repaired at:).*', finaloutput)
debug(matches)
uniquematches = set(matches)
matchcount = Counter(matches)
self.assertGreaterEqual(len(uniquematches), 2)
self.assertGreaterEqual(max(matchcount), 2)
self.assertNotIn('Repaired at: 0', finaloutput)
os.remove('initial.txt')
os.remove('final.txt')
def compaction_test(self):
"""
Test we can major compact after an incremental repair
* Launch a three node cluster
* Create a keyspace with RF 3 and a table
* Stop node3
* Insert 100 rows
* Restart node3
* Issue an incremental repair
* Insert 50 more rows
* Perform a major compaction on node3
* Verify all data is present
# TODO: I have no idea what this is testing. The assertions do not verify anything meaningful.
# TODO: Fix all the string formatting
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 3)
session.execute("create table tab(key int PRIMARY KEY, val int);")
node3.stop()
for x in range(0, 100):
session.execute("insert into tab(key,val) values(" + str(x) + ",0)")
node1.flush()
node3.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node3.repair()
else:
node3.nodetool("repair -par -inc")
for x in range(0, 150):
session.execute("insert into tab(key,val) values(" + str(x) + ",1)")
cluster.flush()
node3.nodetool('compact')
for x in range(0, 150):
assert_one(session, "select val from tab where key =" + str(x), [1])
@since("2.2")
def multiple_full_repairs_lcs_test(self):
"""
@jira_ticket CASSANDRA-11172 - repeated full repairs should not cause infinite loop in getNextBackgroundTask
"""
cluster = self.cluster
cluster.populate(2).start(wait_for_binary_proto=True)
node1, node2 = cluster.nodelist()
for x in xrange(0, 10):
node1.stress(['write', 'n=100k', 'no-warmup', '-rate', 'threads=10', '-schema', 'compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=10)', 'replication(factor=2)'])
cluster.flush()
cluster.wait_for_compactions()
node1.nodetool("repair -full keyspace1 standard1")
@attr('long')
@skip('hangs CI')
def multiple_subsequent_repair_test(self):
"""
@jira_ticket CASSANDRA-8366
There is an issue with subsequent inc repairs increasing load size.
So we perform several repairs and check that the expected amount of data exists.
* Launch a three node cluster
* Write 5M rows with stress
* Wait for minor compactions to finish
* Issue an incremental repair on each node, sequentially
* Issue major compactions on each node
* Sleep for a while so load size can be propagated between nodes
* Verify the correct amount of data is on each node
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
debug("Inserting data with stress")
node1.stress(['write', 'n=5M', 'no-warmup', '-rate', 'threads=10', '-schema', 'replication(factor=3)'])
debug("Flushing nodes")
cluster.flush()
debug("Waiting compactions to finish")
cluster.wait_for_compactions()
if self.cluster.version() >= '2.2':
debug("Repairing node1")
node1.nodetool("repair")
debug("Repairing node2")
node2.nodetool("repair")
debug("Repairing node3")
node3.nodetool("repair")
else:
debug("Repairing node1")
node1.nodetool("repair -par -inc")
debug("Repairing node2")
node2.nodetool("repair -par -inc")
debug("Repairing node3")
node3.nodetool("repair -par -inc")
# Using "print" instead of debug() here is on purpose. The compactions
# take a long time and don't print anything by default, which can result
# in the test being timed out after 20 minutes. These print statements
# prevent it from being timed out.
print "compacting node1"
node1.compact()
print "compacting node2"
node2.compact()
print "compacting node3"
node3.compact()
# wait some time to be sure the load size is propagated between nodes
debug("Waiting for load size info to be propagated between nodes")
time.sleep(45)
load_size_in_kb = float(sum(map(lambda n: n.data_size(), [node1, node2, node3])))
load_size = load_size_in_kb / 1024 / 1024
debug("Total Load size: {}GB".format(load_size))
# There is still some overhead, but it's lot better. We tolerate 25%.
expected_load_size = 4.5 # In GB
assert_almost_equal(load_size, expected_load_size, error=0.25)
def sstable_marking_test_not_intersecting_all_ranges(self):
"""
@jira_ticket CASSANDRA-10299
* Launch a four node cluster
* Insert data with stress
* Issue an incremental repair on each node sequentially
* Assert no extra, unrepaired sstables are generated
"""
cluster = self.cluster
cluster.populate(4).start(wait_for_binary_proto=True)
node1, node2, node3, node4 = cluster.nodelist()
debug("Inserting data with stress")
node1.stress(['write', 'n=3', 'no-warmup', '-rate', 'threads=1', '-schema', 'replication(factor=3)'])
debug("Flushing nodes")
cluster.flush()
repair_options = '' if self.cluster.version() >= '2.2' else '-inc -par'
debug("Repairing node 1")
node1.nodetool("repair {}".format(repair_options))
debug("Repairing node 2")
node2.nodetool("repair {}".format(repair_options))
debug("Repairing node 3")
node3.nodetool("repair {}".format(repair_options))
debug("Repairing node 4")
node4.nodetool("repair {}".format(repair_options))
with open("final.txt", "w") as h:
node1.run_sstablemetadata(output_file=h, keyspace='keyspace1')
node2.run_sstablemetadata(output_file=h, keyspace='keyspace1')
node3.run_sstablemetadata(output_file=h, keyspace='keyspace1')
node4.run_sstablemetadata(output_file=h, keyspace='keyspace1')
with open("final.txt", "r") as r:
output = r.read()
self.assertNotIn('Repaired at: 0', output)
os.remove('final.txt')
| thobbs/cassandra-dtest | repair_tests/incremental_repair_test.py | Python | apache-2.0 | 14,693 |
#!/usr/bin/env python3
import sys
import math
import json
import gps_to_mileage
from PIL import Image, ImageDraw, ImageOps
import plate_c as aar_plate
import class_i as fra_class
WIDTH=500
HEIGHT=700
LIDAR_X = WIDTH / 2
LIDAR_Y = .8 * HEIGHT
LIDAR_RADIUS = .4 * WIDTH
SCALE=10
DIRECTION=-1 # -1=backwards, 1=forwards
ANGLE_OFFSET = 0
TOTAL_SLOPE = 0
TOTAL_SLOPE_COUNT = 0
MIN_SPEED = 0.5
GHOST = 3
ERROR=1
ERROR=0.954316968
def to_mph(speed):
return speed * 2.23694 # convert to mph
def cvt_point(point):
return (int(LIDAR_X - DIRECTION*(point[0] / SCALE)),
int(LIDAR_Y - (point[1] / SCALE)))
# average 113.665, 241.342
#LOW_RANGE = range(120, 110, -1)
#HIGH_RANGE = range(240, 250)
# Front Mount
#LOW_RANGE = range(115, 111, -1)
#HIGH_RANGE = range(239, 243)
# Rear Mount
#LOW_RANGE = range(123, 119, -1)
#HIGH_RANGE = range(238, 242)
LOW_CENTER = 119
HIGH_CENTER = 241
DELTA = 2
LOW_RANGE = range(LOW_CENTER + DELTA, LOW_CENTER - DELTA, -1)
HIGH_RANGE = range(HIGH_CENTER - DELTA, HIGH_CENTER + DELTA)
def calc_gauge(data):
min_dist_left = min_dist_right = 999999
min_dist_left_i = min_dist_right_i = -1
for i in LOW_RANGE:
if data[i][0] <= min_dist_left and data[i][0] > 0:
min_dist_left = data[i][0]
min_dist_left_i = i
for i in HIGH_RANGE:
if data[i][0] <= min_dist_right and data[i][0] > 0:
min_dist_right = data[i][0]
min_dist_right_i = i
if min_dist_right_i == -1 or min_dist_left_i == -1:
return (0, 0, (0, 0), (0, 0))
p1 = (data[min_dist_left_i][1], data[min_dist_left_i][2])
p2 = (data[min_dist_right_i][1], data[min_dist_right_i][2])
x = p1[0] - p2[0]
y = p1[1] - p2[1]
z = (1/ERROR) * math.sqrt(x*x + y*y) / 25.4 # Convert to inches
slope = math.degrees(math.atan(y / x))
#if 56 < z < 57:
# print("A %d %d" % (min_dist_left_i, min_dist_right_i))
return (z, slope, p1,p2)
def score_points(point, box):
score = 0
(px,py) = point
(min_x, min_y, max_x, max_y) = box
if min_x < px and px < max_x and min_y < py and py < max_y:
score = 1
else:
score = 0
return score
def plot(data, timestamp, latitude, longitude, mileage, speed, slice):
global TOTAL_SLOPE, TOTAL_SLOPE_COUNT
if latitude != 0 and longitude != 0:
labels = True
else:
labels = False
image = Image.new("RGB", (WIDTH, HEIGHT), "white")
draw = ImageDraw.Draw(image)
draw.ellipse((int(LIDAR_X-LIDAR_RADIUS),int(LIDAR_Y-LIDAR_RADIUS),int(LIDAR_X+LIDAR_RADIUS),int(LIDAR_Y+LIDAR_RADIUS)), outline=(0,0,255))
draw.line((0,int(LIDAR_Y),WIDTH,int(LIDAR_Y)),fill=(0,0,255))
draw.line((int(LIDAR_X),0,int(LIDAR_X),HEIGHT),fill=(0,0,255))
draw.text((int(LIDAR_X),int(LIDAR_Y-LIDAR_RADIUS)), "0", fill=(0,0,255))
draw.text((int(WIDTH-20),int(LIDAR_Y)), "90", fill=(0,0,255))
draw.text((0,int(LIDAR_Y)), "270", fill=(0,0,255))
draw.text((int(LIDAR_X),int(HEIGHT-10)), "180", fill=(0,0,255))
# Clearance Plate
offset = 30
width = int(aar_plate.width / (2 * SCALE))
height = int(aar_plate.height / SCALE)
x_margin = int(width * 0.05)
y_margin = int(height * 0.05)
min_x = LIDAR_X-width
max_x = LIDAR_X+width
max_y = LIDAR_Y+offset
min_y = LIDAR_Y+offset-height
min_x_margin = min_x + x_margin
max_x_margin = max_x - x_margin
min_y_margin = min_y + y_margin
max_y_margin = max_y - y_margin
mid_y = (min_y + max_y) / 2
# TC Outline
width = int(1545.4 / (2 * SCALE))
height = int(1676.4 / SCALE)
min_tc_x = LIDAR_X-width
max_tc_x = LIDAR_X+width
max_tc_y = LIDAR_Y+offset
min_tc_y = LIDAR_Y+offset-height
# Draw Data
new_data = [(0,0,0)] * len(data)
plate_error = gauge_error = False
score = 0
for angle in range(0,len(data)):
adjusted_angle = math.radians(angle+ANGLE_OFFSET)
x = data[angle] * math.sin(adjusted_angle)
y = data[angle] * math.cos(adjusted_angle)
new_data[angle] = (data[angle], x, y)
if data[angle] < 200:
continue
(px, py) = cvt_point((x, y))
this_score = score_points((px,py), (min_x, min_y, max_x, max_y))
this_score += score_points((px,py), (min_x_margin, min_y_margin, max_x_margin, max_y_margin))
this_score += score_points((px,py), (min_x_margin, min_y_margin, max_x_margin, mid_y))
score += this_score
if this_score > 0:
pc = (255, 0, 0)
plate_error = True
draw.line((px-5,py-5,px+5,py+5), fill=pc)
draw.line((px+5,py-5,px-5,py+5), fill=pc)
elif (px <= min_x or px >= max_x) and py < max_y:
pc = (0, 255, 0)
draw.ellipse((px-5,py-5,px+5,py+5), outline=pc)
else:
pc = (0, 0, 0)
draw.point((px, py), fill=pc)
# Trackcar
plate_c=(128,128,128)
draw.line((min_tc_x,max_tc_y,max_tc_x,max_tc_y), fill=plate_c)
draw.line((min_tc_x,min_tc_y,max_tc_x,min_tc_y), fill=plate_c)
draw.line((min_tc_x,max_tc_y,min_tc_x,min_tc_y), fill=plate_c)
draw.line((max_tc_x,max_tc_y,max_tc_x,min_tc_y), fill=plate_c)
draw.text((min_tc_x+5,min_tc_y+5), "Trackcar", fill=plate_c)
# Draw the clearance box
if plate_error:
plate_c = (255,0,0)
draw.text((min_x+5,min_y+15), "Score = %d" % score, fill=plate_c)
else:
plate_c = (0,255,0)
draw.line((min_x,max_y,max_x,max_y), fill=plate_c)
draw.line((min_x,min_y,max_x,min_y), fill=plate_c)
draw.line((min_x,max_y,min_x,min_y), fill=plate_c)
draw.line((max_x,max_y,max_x,min_y), fill=plate_c)
draw.text((min_x+5,min_y+5), aar_plate.full_name, fill=plate_c)
# Calculate Gage
gauge,slope,p1,p2 = calc_gauge(new_data)
TOTAL_SLOPE += slope
TOTAL_SLOPE_COUNT += 1
p1 = cvt_point(p1)
p2 = cvt_point(p2)
if fra_class.min_gauge <= gauge <= fra_class.max_gauge:
gauge_c = (0,0,0)
if labels:
draw.text((5,55), "GAGE: %0.2f in" % gauge, fill=gauge_c)
#elif gauge < fra_class.min_gauge-1 or gauge > fra_class.max_gauge+1:
# gauge_c = (0,0,0)
# draw.text((5,55), "GAGE: *ERROR*", fill=(255,0,0))
elif gauge == 0:
gauge_c = (0,0,0)
if labels:
draw.text((5,55), "GAGE: *ERROR*", fill=(255,0,0))
else:
gauge_c = (255,0,0)
gauge_error = True
if labels:
draw.line((p1,p2), fill=gauge_c)
draw.text((5,55), "GAGE: %0.2f in" % gauge, fill=gauge_c)
if labels:
draw.text((5,5), "UTC: %s" % timestamp, fill=(0,0,0))
draw.text((5,15), "LAT: %0.6f" % latitude, fill=(0,0,0))
draw.text((5,25), "LONG: %0.6f" % longitude, fill=(0,0,0))
draw.text((5,35), "MILEAGE: %0.2f" % mileage, fill=(0,0,0))
if speed < 10:
draw.text((5,45), "SPEED: %0.1f mph" % speed, fill=(0,0,0))
else:
draw.text((5,45), "SPEED: %d mph" % int(speed), fill=(0,0,0))
if OUTPUT:
image.save("slices/slice_%08d.png" % slice)
return {'gauge_error': gauge_error, 'plate_error': plate_error, 'gauge': gauge, 'plate_score': score}
def clearance(filename):
data = [99999]*360
print(data)
latitude = longitude = mileage = speed = 0
with open(filename, "r") as f:
for line in f:
if line[0] == "#" or line[-2] != "*":
continue
fields = line.split(" ")
if fields[1] == "LIDAR":
lidar = json.loads(" ".join(fields[2:-1]))
timestamp = lidar['time']
for angle, distance in lidar['scan']:
i = round(float(angle)) % 360
if distance > 1000 and speed > .5:
data[i] = min(data[i], float(distance))
elif fields[1] == "L":
timestamp, datatype, scan_data = line.split(" ", 2)
scan_data = eval(scan_data.replace('*', ''))
for angle, distance in scan_data:
i = round(float(angle)) % 360
if distance > 1000 and speed > .5:
data[i] = min(data[i], float(distance))
elif fields[1] == "TPV":
obj = json.loads(" ".join(fields[2:-1]))
if 'speed' in obj:
speed = to_mph(obj['speed'])
print(speed)
report = plot(data, timestamp, latitude, longitude, mileage, speed, 0)
print(report)
def main(filename, known):
G = gps_to_mileage.Gps2Miles(known)
#G.sanity_check(update=True)
last_lat = last_lon = speed = latitude = longitude = mileage = 0
slice = 0
data = [0] * 360
ghost = [0] * 360
with open(filename.split('.')[0]+".kml","w") as k:
k.write('<xml version="1.0" encoding="UTF-8"?>\n')
k.write('<kml xmlns="http://www.opengis.net/kml/2.2">\n')
k.write('<Document>\n')
with open(filename, "r") as f:
count = 0
for line in f:
if line[0] == "#":
continue
if line[-2] != "*":
continue
fields = line.split(" ")
if fields[1] == "A":
pass
elif fields[1] == "TPV":
obj = json.loads(" ".join(fields[2:-1]))
try:
latitude = obj['lat']
except KeyError:
pass
try:
longitude = obj['lon']
except KeyError:
pass
try:
altitude = obj['alt']
except KeyError:
pass
try:
speed = to_mph(obj['speed'])
except KeyError:
pass
mileage, certainty = G.find_mileage(latitude, longitude)
elif fields[1] == "G":
fields = line.split(' ')
try:
latitude = float(fields[2])
except ValueError:
pass
try:
longitude = float(fields[3])
except ValueError:
pass
try:
altitude = float(fields[4])
except ValueError:
pass
try:
speed = to_mph(float(fields[8]))
except ValueError:
pass
mileage, certainty = G.find_mileage(latitude, longitude)
elif (fields[1] == "L" or fields[1] == "LIDAR") and speed >= MIN_SPEED:
#print(line)
if fields[1] == "LIDAR":
lidar = json.loads(" ".join(fields[2:-1]))
timestamp = lidar['time']
scan_data = lidar['scan']
else:
timestamp, datatype, scan_data = line.split(" ", 2)
scan_data = eval(scan_data.replace('*', ''))
data = [0]*360
for i in range(0,360):
ghost[i] -= 1
if ghost[i] == 0:
data[i] = 0
for angle, distance in scan_data:
i = round(float(angle)) % 360
data[i] = float(distance)
ghost[i] = GHOST
report = plot(data, timestamp, latitude, longitude, mileage, speed, slice)
if report['gauge_error'] or report['plate_error']:
count += 1
if OUTPUT:
if last_lat != latitude or last_lon != longitude:
print("%d %0.6f %0.6f %0.6f %0.2f %d" % (slice, latitude, longitude, mileage, report['gauge'], report['plate_score']))
k.write('<Placemark>\n')
k.write('<name>Point %d</name>\n' % slice)
k.write('<description>\n')
k.write('Mileage = %0.2f\n' % mileage)
if report['gauge_error']:
k.write('Gage = %0.f in\n' % report['gauge'])
if report['plate_error']:
k.write('Plate F obstruction')
k.write('</description>\n')
k.write('<Point>\n')
k.write('<coordinates>%0.6f,%0.6f,%0.6f</coordinates>\n' % (longitude,latitude,altitude))
k.write('</Point>\n')
k.write('</Placemark>\n')
last_lat = latitude
last_lon = longitude
else:
count = 0
slice += 1
else:
ghost = [0] * 360
k.write('</Document>\n')
k.write('</kml>\n')
#OUTPUT=True
#clearance(sys.argv[1])
#sys.exit(0)
try:
datafile = sys.argv[1]
known = sys.argv[2]
except:
print("USAGE: %s data.csv know.csv" % sys.argv[0])
sys.exit(1)
if ANGLE_OFFSET == 0:
print("Calculating Angle Offset...")
OUTPUT = False
main(datafile, known)
try:
ANGLE_OFFSET = TOTAL_SLOPE/TOTAL_SLOPE_COUNT
except ZeroDivisionError:
ANGLE_OFFSET = 0
print("Generating Images...")
OUTPUT = True
TOTAL_SLOPE = TOTAL_SLOPE_COUNT = 0
main(datafile, known)
print("Angle Offset = %0.2f" % ANGLE_OFFSET)
| cpn18/track-chart | desktop/archive/plot_lidar.py | Python | gpl-3.0 | 13,916 |
# repowidget.py - TortoiseHg repository widget
#
# Copyright (C) 2007-2010 Logilab. All rights reserved.
# Copyright (C) 2010 Adrian Buehlmann <adrian@cadifra.com>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
import binascii
import os
import shlex, subprocess # used by runCustomCommand
import cStringIO
from mercurial import error, patch, phases, util, ui
from tortoisehg.util import hglib, shlib, paths
from tortoisehg.util.i18n import _
from tortoisehg.hgqt import infobar, qtlib, repomodel
from tortoisehg.hgqt.qtlib import QuestionMsgBox, InfoMsgBox, WarningMsgBox
from tortoisehg.hgqt.qtlib import DemandWidget
from tortoisehg.hgqt import cmdcore, cmdui, update, tag, backout, merge, visdiff
from tortoisehg.hgqt import archive, thgimport, thgstrip, purge, bookmark
from tortoisehg.hgqt import bisect, rebase, resolve, compress, mq
from tortoisehg.hgqt import prune, settings, shelve
from tortoisehg.hgqt import matching, graft, hgemail, postreview, revdetails
from tortoisehg.hgqt import sign
from tortoisehg.hgqt.repofilter import RepoFilterBar
from tortoisehg.hgqt.repoview import HgRepoView
from tortoisehg.hgqt.commit import CommitWidget
from tortoisehg.hgqt.sync import SyncWidget
from tortoisehg.hgqt.grep import SearchWidget
from tortoisehg.hgqt.pbranch import PatchBranchWidget
from tortoisehg.hgqt.docklog import ConsoleWidget
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class RepoWidget(QWidget):
currentTaskTabChanged = pyqtSignal()
showMessageSignal = pyqtSignal(str)
toolbarVisibilityChanged = pyqtSignal(bool)
# TODO: progress can be removed if all actions are run as hg command
progress = pyqtSignal(str, object, str, str, object)
makeLogVisible = pyqtSignal(bool)
revisionSelected = pyqtSignal(object)
titleChanged = pyqtSignal(str)
"""Emitted when changed the expected title for the RepoWidget tab"""
busyIconChanged = pyqtSignal()
repoLinkClicked = pyqtSignal(str)
"""Emitted when clicked a link to open repository"""
def __init__(self, repoagent, parent=None, bundle=None):
QWidget.__init__(self, parent, acceptDrops=True)
self._repoagent = repoagent
self.bundlesource = None # source URL of incoming bundle [unicode]
self.outgoingMode = False
self._busyIconNames = []
self._namedTabs = {}
self.destroyed.connect(self.repo.thginvalidate)
self.currentMessage = ''
self.setupUi()
self.createActions()
self.loadSettings()
self._initModel()
if bundle:
self.setBundle(bundle)
self._dialogs = qtlib.DialogKeeper(
lambda self, dlgmeth, *args: dlgmeth(self, *args), parent=self)
# listen to change notification after initial settings are loaded
repoagent.repositoryChanged.connect(self.repositoryChanged)
repoagent.configChanged.connect(self.configChanged)
QTimer.singleShot(0, self._initView)
def setupUi(self):
self.repotabs_splitter = QSplitter(orientation=Qt.Vertical)
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
# placeholder to shift repoview while infobar is overlaid
self._repoviewFrame = infobar.InfoBarPlaceholder(self._repoagent, self)
self._repoviewFrame.linkActivated.connect(self._openLink)
self.filterbar = RepoFilterBar(self._repoagent, self)
self.layout().addWidget(self.filterbar)
self.filterbar.branchChanged.connect(self.setBranch)
self.filterbar.showHiddenChanged.connect(self.setShowHidden)
self.filterbar.showGraftSourceChanged.connect(self.setShowGraftSource)
self.filterbar.setRevisionSet.connect(self.setRevisionSet)
self.filterbar.filterToggled.connect(self.filterToggled)
self.filterbar.visibilityChanged.connect(self.toolbarVisibilityChanged)
self.filterbar.hide()
self.layout().addWidget(self.repotabs_splitter)
cs = ('workbench', _('Workbench Log Columns'))
self.repoview = view = HgRepoView(self._repoagent, 'repoWidget', cs,
self)
view.clicked.connect(self._clearInfoMessage)
view.revisionSelected.connect(self.onRevisionSelected)
view.revisionActivated.connect(self.onRevisionActivated)
view.showMessage.connect(self.showMessage)
view.menuRequested.connect(self.viewMenuRequest)
self._repoviewFrame.setView(view)
self.repotabs_splitter.addWidget(self._repoviewFrame)
self.repotabs_splitter.setCollapsible(0, True)
self.repotabs_splitter.setStretchFactor(0, 1)
self.taskTabsWidget = tt = QTabWidget()
self.repotabs_splitter.addWidget(self.taskTabsWidget)
self.repotabs_splitter.setStretchFactor(1, 1)
tt.setDocumentMode(True)
self.updateTaskTabs()
tt.currentChanged.connect(self.currentTaskTabChanged)
w = revdetails.RevDetailsWidget(self._repoagent, self)
self.revDetailsWidget = w
self.revDetailsWidget.filelisttbar.setStyleSheet(qtlib.tbstylesheet)
w.linkActivated.connect(self._openLink)
w.revisionSelected.connect(self.repoview.goto)
w.grepRequested.connect(self.grep)
w.showMessage.connect(self.showMessage)
w.revsetFilterRequested.connect(self.setFilter)
w.runCustomCommandRequested.connect(
self.handleRunCustomCommandRequest)
idx = tt.addTab(w, qtlib.geticon('hg-log'), '')
self._namedTabs['log'] = idx
tt.setTabToolTip(idx, _("Revision details", "tab tooltip"))
self.commitDemand = w = DemandWidget('createCommitWidget', self)
idx = tt.addTab(w, qtlib.geticon('hg-commit'), '')
self._namedTabs['commit'] = idx
tt.setTabToolTip(idx, _("Commit", "tab tooltip"))
self.grepDemand = w = DemandWidget('createGrepWidget', self)
idx = tt.addTab(w, qtlib.geticon('hg-grep'), '')
self._namedTabs['grep'] = idx
tt.setTabToolTip(idx, _("Search", "tab tooltip"))
w = ConsoleWidget(self._repoagent, self)
self.consoleWidget = w
w.closeRequested.connect(self.switchToPreferredTaskTab)
idx = tt.addTab(w, qtlib.geticon('thg-console'), '')
self._namedTabs['console'] = idx
tt.setTabToolTip(idx, _("Console log", "tab tooltip"))
self.syncDemand = w = DemandWidget('createSyncWidget', self)
idx = tt.addTab(w, qtlib.geticon('thg-sync'), '')
self._namedTabs['sync'] = idx
tt.setTabToolTip(idx, _("Synchronize", "tab tooltip"))
if 'pbranch' in self.repo.extensions():
self.pbranchDemand = w = DemandWidget('createPatchBranchWidget', self)
idx = tt.addTab(w, qtlib.geticon('hg-branch'), '')
tt.setTabToolTip(idx, _("Patch Branch", "tab tooltip"))
self._namedTabs['pbranch'] = idx
@pyqtSlot()
def _initView(self):
self._updateRepoViewForModel()
# restore column widths when model is initially loaded. For some
# reason, this needs to be deferred after updating the view. Otherwise
# repoview.HgRepoView.resizeEvent() fires as the vertical scrollbar is
# added, which causes the last column to grow by the scrollbar width on
# each restart (and steal from the description width).
QTimer.singleShot(0, self.repoview.resizeColumns)
# select the widget chosen by the user
name = self.repo.ui.config('tortoisehg', 'defaultwidget')
if name:
name = {'revdetails': 'log', 'search': 'grep'}.get(name, name)
self.taskTabsWidget.setCurrentIndex(self._namedTabs.get(name, 0))
def currentTaskTabName(self):
indexmap = dict((idx, name)
for name, idx in self._namedTabs.iteritems())
return indexmap.get(self.taskTabsWidget.currentIndex())
@pyqtSlot(str)
def switchToNamedTaskTab(self, tabname):
tabname = str(tabname)
if tabname in self._namedTabs:
idx = self._namedTabs[tabname]
# refresh status even if current widget is already a 'commit'
if (tabname == 'commit'
and self.taskTabsWidget.currentIndex() == idx):
self._refreshCommitTabIfNeeded()
self.taskTabsWidget.setCurrentIndex(idx)
# restore default splitter position if task tab is invisible
if self.repotabs_splitter.sizes()[1] == 0:
self.repotabs_splitter.setSizes([1, 1])
@property
def repo(self):
return self._repoagent.rawRepo()
def repoRootPath(self):
return self._repoagent.rootPath()
def repoDisplayName(self):
return self._repoagent.displayName()
def title(self):
"""Returns the expected title for this widget [unicode]"""
name = self._repoagent.shortName()
if self._repoagent.overlayUrl():
return _('%s <incoming>') % name
elif self.repomodel.branch():
return u'%s [%s]' % (name, self.repomodel.branch())
else:
return name
def busyIcon(self):
if self._busyIconNames:
return qtlib.geticon(self._busyIconNames[-1])
else:
return QIcon()
def filterBar(self):
return self.filterbar
def filterBarVisible(self):
return self.filterbar.isVisible()
@pyqtSlot(bool)
def toggleFilterBar(self, checked):
"""Toggle display repowidget filter bar"""
if self.filterbar.isVisibleTo(self) == checked:
return
self.filterbar.setVisible(checked)
if checked:
self.filterbar.setFocus()
def _openRepoLink(self, upath):
path = hglib.fromunicode(upath)
if not os.path.isabs(path):
path = self.repo.wjoin(path)
self.repoLinkClicked.emit(hglib.tounicode(path))
@pyqtSlot(str)
def _openLink(self, link):
link = unicode(link)
handlers = {'cset': self.goto,
'log': lambda a: self.makeLogVisible.emit(True),
'repo': self._openRepoLink,
'shelve' : self.shelve}
if ':' in link:
scheme, param = link.split(':', 1)
hdr = handlers.get(scheme)
if hdr:
return hdr(param)
if os.path.isabs(link):
qtlib.openlocalurl(link)
else:
QDesktopServices.openUrl(QUrl(link))
def setInfoBar(self, cls, *args, **kwargs):
return self._repoviewFrame.setInfoBar(cls, *args, **kwargs)
def clearInfoBar(self, priority=None):
return self._repoviewFrame.clearInfoBar(priority)
def createCommitWidget(self):
pats, opts = {}, {}
cw = CommitWidget(self._repoagent, pats, opts, self, rev=self.rev)
cw.buttonHBox.addWidget(cw.commitSetupButton())
cw.loadSettings(QSettings(), 'workbench')
cw.progress.connect(self.progress)
cw.linkActivated.connect(self._openLink)
cw.showMessage.connect(self.showMessage)
cw.grepRequested.connect(self.grep)
cw.runCustomCommandRequested.connect(
self.handleRunCustomCommandRequest)
QTimer.singleShot(0, self._initCommitWidgetLate)
return cw
@pyqtSlot()
def _initCommitWidgetLate(self):
cw = self.commitDemand.get()
cw.reload()
# auto-refresh should be enabled after initial reload(); otherwise
# refreshWctx() can be doubled
self.taskTabsWidget.currentChanged.connect(
self._refreshCommitTabIfNeeded)
def createSyncWidget(self):
sw = SyncWidget(self._repoagent, self)
sw.newCommand.connect(self._handleNewSyncCommand)
sw.outgoingNodes.connect(self.setOutgoingNodes)
sw.showMessage.connect(self.showMessage)
sw.showMessage.connect(self._repoviewFrame.showMessage)
sw.incomingBundle.connect(self.setBundle)
sw.pullCompleted.connect(self.onPullCompleted)
sw.pushCompleted.connect(self.clearRevisionSet)
sw.refreshTargets(self.rev)
sw.switchToRequest.connect(self.switchToNamedTaskTab)
return sw
@pyqtSlot(cmdcore.CmdSession)
def _handleNewSyncCommand(self, sess):
self._handleNewCommand(sess)
if sess.isFinished():
return
sess.commandFinished.connect(self._onSyncCommandFinished)
self._setBusyIcon('thg-sync')
@pyqtSlot()
def _onSyncCommandFinished(self):
self._clearBusyIcon('thg-sync')
def _setBusyIcon(self, iconname):
self._busyIconNames.append(iconname)
self.busyIconChanged.emit()
def _clearBusyIcon(self, iconname):
if iconname in self._busyIconNames:
self._busyIconNames.remove(iconname)
self.busyIconChanged.emit()
@pyqtSlot(str)
def setFilter(self, filter):
self.filterbar.setQuery(filter)
self.filterbar.setVisible(True)
self.filterbar.runQuery()
@pyqtSlot(str, str)
def setBundle(self, bfile, bsource=None):
if self._repoagent.overlayUrl():
self.clearBundle()
self.bundlesource = bsource and unicode(bsource) or None
oldlen = len(self.repo)
# no "bundle:<bfile>" because bfile may contain "+" separator
self._repoagent.setOverlay(bfile)
self.filterbar.setQuery('bundle()')
self.filterbar.runQuery()
self.titleChanged.emit(self.title())
newlen = len(self.repo)
w = self.setInfoBar(infobar.ConfirmInfoBar,
_('Found %d incoming changesets') % (newlen - oldlen))
assert w
w.acceptButton.setText(_('Pull'))
w.acceptButton.setToolTip(_('Pull incoming changesets into '
'your repository'))
w.rejectButton.setText(_('Cancel'))
w.rejectButton.setToolTip(_('Reject incoming changesets'))
w.accepted.connect(self.acceptBundle)
w.rejected.connect(self.clearBundle)
@pyqtSlot()
def clearBundle(self):
self.clearRevisionSet()
self.bundlesource = None
self._repoagent.clearOverlay()
self.titleChanged.emit(self.title())
@pyqtSlot()
def onPullCompleted(self):
if self._repoagent.overlayUrl():
self.clearBundle()
@pyqtSlot()
def acceptBundle(self):
bundle = self._repoagent.overlayUrl()
if bundle:
w = self.syncDemand.get()
w.pullBundle(bundle, None, self.bundlesource)
@pyqtSlot()
def pullBundleToRev(self):
bundle = self._repoagent.overlayUrl()
if bundle:
# manually remove infobar to work around unwanted clearBundle
# during pull operation (issue #2596)
self._repoviewFrame.discardInfoBar()
w = self.syncDemand.get()
w.pullBundle(bundle, self.repo[self.rev].hex(), self.bundlesource)
@pyqtSlot()
def clearRevisionSet(self):
self.filterbar.setQuery('')
self.setRevisionSet('')
def setRevisionSet(self, revspec):
self.repomodel.setRevset(revspec)
if not revspec:
self.outgoingMode = False
@pyqtSlot(bool)
def filterToggled(self, checked):
self.repomodel.setFilterByRevset(checked)
def setOutgoingNodes(self, nodes):
self.filterbar.setQuery('outgoing()')
revs = [self.repo[n].rev() for n in nodes]
self.setRevisionSet(hglib.compactrevs(revs))
self.outgoingMode = True
numnodes = len(nodes)
numoutgoing = numnodes
if self.syncDemand.get().isTargetSelected():
# Outgoing preview is already filtered by target selection
defaultpush = None
else:
# Read the tortoisehg.defaultpush setting to determine what to push
# by default, and set the button label and action accordingly
defaultpush = self.repo.ui.config('tortoisehg', 'defaultpush',
'all')
rev = None
branch = None
pushall = False
# note that we assume that none of the revisions
# on the nodes/revs lists is secret
if defaultpush == 'branch':
branch = self.repo['.'].branch()
ubranch = hglib.tounicode(branch)
# Get the list of revs that will be actually pushed
outgoingrevs = self.repo.revs('%ld and branch(.)', revs)
numoutgoing = len(outgoingrevs)
elif defaultpush == 'revision':
rev = self.repo['.'].rev()
# Get the list of revs that will be actually pushed
# excluding (potentially) the current rev
outgoingrevs = self.repo.revs('%ld and ::.', revs)
numoutgoing = len(outgoingrevs)
maxrev = rev
if numoutgoing > 0:
maxrev = max(outgoingrevs)
else:
pushall = True
# Set the default acceptbuttontext
# Note that the pushall case uses the default accept button text
if branch is not None:
acceptbuttontext = _('Push current branch (%s)') % ubranch
elif rev is not None:
if maxrev == rev:
acceptbuttontext = _('Push up to current revision (#%d)') % rev
else:
acceptbuttontext = _('Push up to revision #%d') % maxrev
else:
acceptbuttontext = _('Push all')
if numnodes == 0:
msg = _('no outgoing changesets')
elif numoutgoing == 0:
if branch:
msg = _('no outgoing changesets in current branch (%s) '
'/ %d in total') % (ubranch, numnodes)
elif rev is not None:
if maxrev == rev:
msg = _('no outgoing changesets up to current revision '
'(#%d) / %d in total') % (rev, numnodes)
else:
msg = _('no outgoing changesets up to revision #%d '
'/ %d in total') % (maxrev, numnodes)
elif numoutgoing == numnodes:
# This case includes 'Push all' among others
msg = _('%d outgoing changesets') % numoutgoing
elif branch:
msg = _('%d outgoing changesets in current branch (%s) '
'/ %d in total') % (numoutgoing, ubranch, numnodes)
elif rev:
if maxrev == rev:
msg = _('%d outgoing changesets up to current revision (#%d) '
'/ %d in total') % (numoutgoing, rev, numnodes)
else:
msg = _('%d outgoing changesets up to revision #%d '
'/ %d in total') % (numoutgoing, maxrev, numnodes)
else:
# This should never happen but we leave this else clause
# in case there is a flaw in the logic above (e.g. due to
# a future change in the code)
msg = _('%d outgoing changesets') % numoutgoing
w = self.setInfoBar(infobar.ConfirmInfoBar, msg.strip())
assert w
if numoutgoing == 0:
acceptbuttontext = _('Nothing to push')
w.acceptButton.setEnabled(False)
w.acceptButton.setText(acceptbuttontext)
w.accepted.connect(lambda: self.push(False,
rev=rev, branch=branch, pushall=pushall)) # TODO: to the same URL
w.rejected.connect(self.clearRevisionSet)
def createGrepWidget(self):
upats = {}
gw = SearchWidget(self._repoagent, upats, self)
gw.setRevision(self.repoview.current_rev)
gw.showMessage.connect(self.showMessage)
gw.progress.connect(self.progress)
gw.revisionSelected.connect(self.goto)
return gw
def createPatchBranchWidget(self):
pbw = PatchBranchWidget(self._repoagent, parent=self)
return pbw
@property
def rev(self):
"""Returns the current active revision"""
return self.repoview.current_rev
def showMessage(self, msg):
self.currentMessage = msg
if self.isVisible():
self.showMessageSignal.emit(msg)
def keyPressEvent(self, event):
if self._repoviewFrame.activeInfoBar() and event.key() == Qt.Key_Escape:
self.clearInfoBar(infobar.INFO)
else:
QWidget.keyPressEvent(self, event)
def showEvent(self, event):
QWidget.showEvent(self, event)
self.showMessageSignal.emit(self.currentMessage)
if not event.spontaneous():
# RepoWidget must be the main widget in any window, so grab focus
# when it gets visible at start-up or by switching tabs.
self.repoview.setFocus()
def createActions(self):
self._mqActions = None
if 'mq' in self.repo.extensions():
self._mqActions = mq.PatchQueueActions(self)
self._mqActions.setRepoAgent(self._repoagent)
self.generateUnappliedPatchMenu()
self.generateSingleMenu()
self.generatePairMenu()
self.generateMultipleSelectionMenu()
self.generateBundleMenu()
self.generateOutgoingMenu()
def detectPatches(self, paths):
filepaths = []
for p in paths:
if not os.path.isfile(p):
continue
try:
pf = open(p, 'rb')
earlybytes = pf.read(4096)
if '\0' in earlybytes:
continue
pf.seek(0)
data = patch.extract(self.repo.ui, pf)
filename = data.get('filename')
if filename:
filepaths.append(p)
os.unlink(filename)
except EnvironmentError:
pass
return filepaths
def dragEnterEvent(self, event):
paths = [unicode(u.toLocalFile()) for u in event.mimeData().urls()]
if self.detectPatches(paths):
event.setDropAction(Qt.CopyAction)
event.accept()
def dropEvent(self, event):
paths = [unicode(u.toLocalFile()) for u in event.mimeData().urls()]
patches = self.detectPatches(paths)
if not patches:
return
event.setDropAction(Qt.CopyAction)
event.accept()
self.thgimport(patches)
## Begin Workbench event forwards
def back(self):
self.repoview.back()
def forward(self):
self.repoview.forward()
def bisect(self):
self._dialogs.open(RepoWidget._createBisectDialog)
def _createBisectDialog(self):
dlg = bisect.BisectDialog(self._repoagent, self)
dlg.newCandidate.connect(self.gotoParent)
return dlg
def resolve(self):
dlg = resolve.ResolveDialog(self._repoagent, self)
dlg.exec_()
def thgimport(self, paths=None):
dlg = thgimport.ImportDialog(self._repoagent, self)
if paths:
dlg.setfilepaths(paths)
if dlg.exec_() == 0:
self.gotoTip()
def unbundle(self):
w = self.syncDemand.get()
w.unbundle()
def shelve(self, arg=None):
self._dialogs.open(RepoWidget._createShelveDialog)
def _createShelveDialog(self):
dlg = shelve.ShelveDialog(self._repoagent)
dlg.finished.connect(self._refreshCommitTabIfNeeded)
return dlg
def verify(self):
cmdline = ['verify', '--verbose']
dlg = cmdui.CmdSessionDialog(self)
dlg.setWindowIcon(qtlib.geticon('hg-verify'))
dlg.setWindowTitle(_('%s - verify repository') % self.repoDisplayName())
dlg.setWindowFlags(dlg.windowFlags() | Qt.WindowMaximizeButtonHint)
dlg.setSession(self._repoagent.runCommand(cmdline, self))
dlg.exec_()
def recover(self):
cmdline = ['recover', '--verbose']
dlg = cmdui.CmdSessionDialog(self)
dlg.setWindowIcon(qtlib.geticon('hg-recover'))
dlg.setWindowTitle(_('%s - recover repository')
% self.repoDisplayName())
dlg.setWindowFlags(dlg.windowFlags() | Qt.WindowMaximizeButtonHint)
dlg.setSession(self._repoagent.runCommand(cmdline, self))
dlg.exec_()
def rollback(self):
desc, oldlen = hglib.readundodesc(self.repo)
if not desc:
InfoMsgBox(_('No transaction available'),
_('There is no rollback transaction available'))
return
elif desc == 'commit':
if not QuestionMsgBox(_('Undo last commit?'),
_('Undo most recent commit (%d), preserving file changes?') %
oldlen):
return
else:
if not QuestionMsgBox(_('Undo last transaction?'),
_('Rollback to revision %d (undo %s)?') %
(oldlen - 1, desc)):
return
try:
rev = self.repo['.'].rev()
except error.LookupError, e:
InfoMsgBox(_('Repository Error'),
_('Unable to determine working copy revision\n') +
hglib.tounicode(e))
return
if rev >= oldlen and not QuestionMsgBox(
_('Remove current working revision?'),
_('Your current working revision (%d) will be removed '
'by this rollback, leaving uncommitted changes.\n '
'Continue?') % rev):
return
cmdline = ['rollback', '--verbose']
sess = self._runCommand(cmdline)
sess.commandFinished.connect(self._notifyWorkingDirChanges)
def purge(self):
dlg = purge.PurgeDialog(self._repoagent, self)
dlg.setWindowFlags(Qt.Sheet)
dlg.setWindowModality(Qt.WindowModal)
dlg.showMessage.connect(self.showMessage)
dlg.progress.connect(self.progress)
dlg.exec_()
# ignores result code of PurgeDialog because it's unreliable
self._refreshCommitTabIfNeeded()
## End workbench event forwards
@pyqtSlot(str, dict)
def grep(self, pattern='', opts={}):
"""Open grep task tab"""
opts = dict((str(k), str(v)) for k, v in opts.iteritems())
self.taskTabsWidget.setCurrentIndex(self._namedTabs['grep'])
self.grepDemand.setSearch(pattern, **opts)
self.grepDemand.runSearch()
def _initModel(self):
self.repomodel = repomodel.HgRepoListModel(self._repoagent, self)
self.repomodel.setBranch(self.filterbar.branch(),
self.filterbar.branchAncestorsIncluded())
self.repomodel.setFilterByRevset(self.filterbar.filtercb.isChecked())
self.repomodel.setShowGraftSource(self.filterbar.getShowGraftSource())
self.repomodel.showMessage.connect(self.showMessage)
self.repomodel.showMessage.connect(self._repoviewFrame.showMessage)
self.repoview.setModel(self.repomodel)
self.repomodel.revsUpdated.connect(self._updateRepoViewForModel)
@pyqtSlot()
def _updateRepoViewForModel(self):
model = self.repoview.model()
selmodel = self.repoview.selectionModel()
index = selmodel.currentIndex()
if not (index.flags() & Qt.ItemIsEnabled):
index = model.defaultIndex()
f = QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows
selmodel.setCurrentIndex(index, f)
self.repoview.scrollTo(index)
self.repoview.enablefilterpalette(bool(model.revset()))
self.clearInfoBar(infobar.INFO) # clear progress message
@pyqtSlot()
def _clearInfoMessage(self):
self.clearInfoBar(infobar.INFO)
@pyqtSlot()
def switchToPreferredTaskTab(self):
tw = self.taskTabsWidget
rev = self.rev
ctx = self.repo.changectx(rev)
if rev is None or ('mq' in self.repo.extensions() and 'qtip' in ctx.tags()
and self.repo['.'].rev() == rev):
# Clicking on working copy or on the topmost applied patch
# (_if_ it is also the working copy parent) switches to the commit tab
tw.setCurrentIndex(self._namedTabs['commit'])
else:
# Clicking on a normal revision switches from commit tab
tw.setCurrentIndex(self._namedTabs['log'])
def onRevisionSelected(self, rev):
'View selection changed, could be a reload'
self.showMessage('')
try:
self.revDetailsWidget.onRevisionSelected(rev)
self.revisionSelected.emit(rev)
if type(rev) != str:
# Regular patch or working directory
self.grepDemand.forward('setRevision', rev)
self.syncDemand.forward('refreshTargets', rev)
self.commitDemand.forward('setRev', rev)
except (IndexError, error.RevlogError, error.Abort), e:
self.showMessage(hglib.tounicode(str(e)))
cw = self.taskTabsWidget.currentWidget()
if cw.canswitch():
self.switchToPreferredTaskTab()
@pyqtSlot()
def gotoParent(self):
self.goto('.')
def gotoTip(self):
self.repoview.clearSelection()
self.goto('tip')
def goto(self, rev):
self.repoview.goto(rev)
def onRevisionActivated(self, rev):
qgoto = False
if isinstance(rev, basestring):
qgoto = True
else:
ctx = self.repo.changectx(rev)
if 'qparent' in ctx.tags() or ctx.thgmqappliedpatch():
qgoto = True
if 'qtip' in ctx.tags():
qgoto = False
if qgoto:
self.qgotoSelectedRevision()
else:
self.visualDiffRevision()
def reload(self, invalidate=True):
'Initiate a refresh of the repo model, rebuild graph'
try:
if invalidate:
self.repo.thginvalidate()
self.rebuildGraph()
self.reloadTaskTab()
except EnvironmentError, e:
self.showMessage(hglib.tounicode(str(e)))
def rebuildGraph(self):
'Called by repositoryChanged signals, and during reload'
self.showMessage('')
self.filterbar.refresh()
self.repoview.saveSettings()
def reloadTaskTab(self):
w = self.taskTabsWidget.currentWidget()
w.reload()
@pyqtSlot()
def repositoryChanged(self):
'Repository has detected a changelog / dirstate change'
try:
self.rebuildGraph()
except (error.RevlogError, error.RepoError), e:
self.showMessage(hglib.tounicode(str(e)))
@pyqtSlot()
def configChanged(self):
'Repository is reporting its config files have changed'
self.revDetailsWidget.reload()
self.titleChanged.emit(self.title())
self.updateTaskTabs()
def updateTaskTabs(self):
val = self.repo.ui.config('tortoisehg', 'tasktabs', 'off').lower()
if val == 'east':
self.taskTabsWidget.setTabPosition(QTabWidget.East)
self.taskTabsWidget.tabBar().show()
elif val == 'west':
self.taskTabsWidget.setTabPosition(QTabWidget.West)
self.taskTabsWidget.tabBar().show()
else:
self.taskTabsWidget.tabBar().hide()
@pyqtSlot(str, bool)
def setBranch(self, branch, allparents):
self.repomodel.setBranch(branch, allparents=allparents)
self.titleChanged.emit(self.title())
@pyqtSlot(bool)
def setShowHidden(self, showhidden):
self._repoagent.setHiddenRevsIncluded(showhidden)
@pyqtSlot(bool)
def setShowGraftSource(self, showgraftsource):
self.repomodel.setShowGraftSource(showgraftsource)
##
## Workbench methods
##
def canGoBack(self):
return self.repoview.canGoBack()
def canGoForward(self):
return self.repoview.canGoForward()
def loadSettings(self):
s = QSettings()
repoid = hglib.shortrepoid(self.repo)
self.revDetailsWidget.loadSettings(s)
self.filterbar.loadSettings(s)
self._repoagent.setHiddenRevsIncluded(self.filterbar.getShowHidden())
self.repotabs_splitter.restoreState(
s.value('repowidget/splitter-'+repoid).toByteArray())
def okToContinue(self):
if self._repoagent.isBusy():
r = QMessageBox.question(self, _('Confirm Exit'),
_('Mercurial command is still running.\n'
'Are you sure you want to terminate?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if r == QMessageBox.Yes:
self._repoagent.abortCommands()
return False
for i in xrange(self.taskTabsWidget.count()):
w = self.taskTabsWidget.widget(i)
if w.canExit():
continue
self.taskTabsWidget.setCurrentWidget(w)
self.showMessage(_('Tab cannot exit'))
return False
return True
def closeRepoWidget(self):
'''returns False if close should be aborted'''
if not self.okToContinue():
return False
s = QSettings()
if self.isVisible():
try:
repoid = hglib.shortrepoid(self.repo)
s.setValue('repowidget/splitter-'+repoid,
self.repotabs_splitter.saveState())
except EnvironmentError:
pass
self.revDetailsWidget.saveSettings(s)
self.commitDemand.forward('saveSettings', s, 'workbench')
self.grepDemand.forward('saveSettings', s)
self.filterbar.saveSettings(s)
self.repoview.saveSettings(s)
return True
def setSyncUrl(self, url):
"""Change the current peer-repo url of the sync widget; url may be
a symbolic name defined in [paths] section"""
self.syncDemand.get().setUrl(url)
def incoming(self):
self.syncDemand.get().incoming()
def pull(self):
self.syncDemand.get().pull()
def outgoing(self):
self.syncDemand.get().outgoing()
def push(self, confirm=None, **kwargs):
"""Call sync push.
If confirm is False, the user will not be prompted for
confirmation. If confirm is True, the prompt might be used.
"""
self.syncDemand.get().push(confirm, **kwargs)
self.outgoingMode = False
def syncBookmark(self):
self.syncDemand.get().syncBookmark()
##
## Repoview context menu
##
def viewMenuRequest(self, point, selection):
'User requested a context menu in repo view widget'
# selection is a list of the currently selected revisions.
# Integers for changelog revisions, None for the working copy,
# or strings for unapplied patches.
if len(selection) == 0:
return
self.menuselection = selection
if self._repoagent.overlayUrl():
if len(selection) == 1:
self.bundlemenu.exec_(point)
return
if self.outgoingMode:
if len(selection) == 1:
self.outgoingcmenu.exec_(point)
return
allunapp = False
if 'mq' in self.repo.extensions():
for rev in selection:
if not self.repo.changectx(rev).thgmqunappliedpatch():
break
else:
allunapp = True
if allunapp:
self.unappliedPatchMenu(point, selection)
elif len(selection) == 1:
self.singleSelectionMenu(point, selection)
elif len(selection) == 2:
self.doubleSelectionMenu(point, selection)
else:
self.multipleSelectionMenu(point, selection)
def singleSelectionMenu(self, point, selection):
ctx = self.repo.changectx(self.rev)
applied = ctx.thgmqappliedpatch()
working = self.rev is None
tags = ctx.tags()
for item in self.singlecmenuitems:
enabled = item.enableFunc(applied, working, tags)
item.setEnabled(enabled)
self.singlecmenu.exec_(point)
def doubleSelectionMenu(self, point, selection):
for r in selection:
# No pair menu if working directory or unapplied patch
if type(r) is not int:
return
self.paircmenu.exec_(point)
def multipleSelectionMenu(self, point, selection):
for r in selection:
# No multi menu if working directory or unapplied patch
if type(r) is not int:
return
self.multicmenu.exec_(point)
def unappliedPatchMenu(self, point, selection):
q = self.repo.mq
ispushable = False
unapplied = 0
for i in xrange(q.seriesend(), len(q.series)):
pushable, reason = q.pushable(i)
if pushable:
if unapplied == 0:
qnext = q.series[i]
if self.rev == q.series[i]:
ispushable = True
unapplied += 1
self.unappacts[0].setEnabled(ispushable and len(selection) == 1)
self.unappacts[1].setEnabled(ispushable and len(selection) == 1)
self.unappacts[2].setEnabled(ispushable and len(selection) == 1 and \
self.rev != qnext)
self.unappacts[3].setEnabled('qtip' in self.repo.tags())
self.unappacts[4].setEnabled(True)
self.unappacts[5].setEnabled(len(selection) == 1)
self.unappcmenu.exec_(point)
def generateSingleMenu(self, mode=None):
items = []
# This menu will never be opened for an unapplied patch, they
# have their own menu.
#
# iswd = working directory
# isrev = the changeset has an integer revision number
# isctx = changectx or workingctx
# fixed = the changeset is considered permanent
# applied = an applied patch
# qgoto = applied patch or qparent
isrev = lambda ap, wd, tags: not wd
iswd = lambda ap, wd, tags: bool(wd)
isctx = lambda ap, wd, tags: True
fixed = lambda ap, wd, tags: not (ap or wd)
applied = lambda ap, wd, tags: ap
qgoto = lambda ap, wd, tags: ('qparent' in tags) or \
(ap)
exs = self.repo.extensions()
def entry(menu, ext=None, func=None, desc=None, icon=None, cb=None):
if ext and ext not in exs:
return
if desc is None:
return menu.addSeparator()
act = QAction(desc, self)
if cb:
act.triggered.connect(cb)
if icon:
act.setIcon(qtlib.geticon(icon))
act.enableFunc = func
menu.addAction(act)
items.append(act)
return act
menu = QMenu(self)
if mode == 'outgoing':
pushtypeicon = {'all': None, 'branch': None, 'revision': None}
defaultpush = self.repo.ui.config(
'tortoisehg', 'defaultpush', 'all')
pushtypeicon[defaultpush] = 'hg-push'
submenu = menu.addMenu(_('Pus&h'))
entry(submenu, None, isrev, _('Push to &Here'),
pushtypeicon['revision'], self.pushToRevision)
entry(submenu, None, isrev, _('Push Selected &Branch'),
pushtypeicon['branch'], self.pushBranch)
entry(submenu, None, isrev, _('Push &All'),
pushtypeicon['all'], self.pushAll)
entry(menu)
entry(menu, None, isrev, _('&Update...'), 'hg-update',
self.updateToRevision)
entry(menu)
entry(menu, None, isctx, _('&Diff to Parent'), 'visualdiff',
self.visualDiffRevision)
entry(menu, None, isrev, _('Diff to &Local'), 'ldiff',
self.visualDiffToLocal)
entry(menu, None, isctx, _('Bro&wse at Revision'), 'hg-annotate',
self.manifestRevision)
act = self._createFilterBySelectedRevisionsMenu()
act.enableFunc = isrev
menu.addAction(act)
items.append(act)
entry(menu)
entry(menu, None, fixed, _('&Merge with Local...'), 'hg-merge',
self.mergeWithRevision)
entry(menu)
entry(menu, None, fixed, _('&Tag...'), 'hg-tag',
self.tagToRevision)
entry(menu, None, isrev, _('Boo&kmark...'), 'hg-bookmarks',
self.bookmarkRevision)
entry(menu, 'gpg', fixed, _('Sig&n...'), 'hg-sign',
self.signRevision)
entry(menu)
entry(menu, None, fixed, _('&Backout...'), 'hg-revert',
self.backoutToRevision)
entry(menu, None, isctx, _('Revert &All Files...'), 'hg-revert',
self.revertToRevision)
entry(menu)
entry(menu, None, isrev, _('Copy &Hash'), 'copy-hash',
self.copyHash)
entry(menu)
submenu = menu.addMenu(_('E&xport'))
entry(submenu, None, isrev, _('E&xport Patch...'), 'hg-export',
self.exportRevisions)
entry(submenu, None, isrev, _('&Email Patch...'), 'mail-forward',
self.emailSelectedRevisions)
entry(submenu, None, isrev, _('&Archive...'), 'hg-archive',
self.archiveRevision)
entry(submenu, None, isrev, _('&Bundle Rev and Descendants...'),
'hg-bundle', self.bundleRevisions)
entry(submenu, None, isctx, _('&Copy Patch'), 'copy-patch',
self.copyPatch)
entry(menu)
submenu = menu.addMenu(_('Change &Phase to'))
submenu.triggered.connect(self._changePhaseByMenu)
for pnum, pname in enumerate(phases.phasenames):
entry(submenu, None, isrev, pname).setData(pnum)
entry(menu)
entry(menu, None, isrev, _('&Graft to Local...'), 'hg-transplant',
self.graftRevisions)
if 'mq' in exs or 'rebase' in exs or 'strip' in exs or 'evolve' in exs:
submenu = menu.addMenu(_('Modi&fy History'))
entry(submenu, 'mq', applied, _('&Unapply Patch'), 'hg-qgoto',
self.qgotoParentRevision)
entry(submenu, 'mq', fixed, _('Import to &MQ'), 'qimport',
self.qimportRevision)
entry(submenu, 'mq', applied, _('&Finish Patch'), 'qfinish',
self.qfinishRevision)
entry(submenu, 'mq', applied, _('Re&name Patch...'), None,
self.qrename)
entry(submenu, 'mq')
if self._mqActions:
entry(submenu, 'mq', isctx, _('MQ &Options'), None,
self._mqActions.launchOptionsDialog)
entry(submenu, 'mq')
entry(submenu, 'rebase', isrev, _('&Rebase...'), 'hg-rebase',
self.rebaseRevision)
entry(submenu, 'rebase')
entry(submenu, 'evolve', fixed, _('&Prune...'), 'edit-cut',
self._pruneSelected)
if 'mq' in exs or 'strip' in exs:
entry(submenu, None, fixed, _('&Strip...'), 'hg-strip',
self.stripRevision)
entry(menu, 'reviewboard', isrev, _('Post to Re&view Board...'), 'reviewboard',
self.sendToReviewBoard)
entry(menu, 'rupdate', fixed, _('&Remote Update...'), 'hg-update',
self.rupdate)
def _setupCustomSubmenu(menu):
tools, toollist = hglib.tortoisehgtools(self.repo.ui,
selectedlocation='workbench.revdetails.custom-menu')
if not tools:
return
istrue = lambda ap, wd, tags: True
enablefuncs = {
'istrue': istrue, 'iswd': iswd, 'isrev': isrev, 'isctx': isctx,
'fixed': fixed, 'applied': applied, 'qgoto': qgoto
}
entry(menu)
submenu = menu.addMenu(_('Custom Tools'))
submenu.triggered.connect(self._runCustomCommandByMenu)
for name in toollist:
if name == '|':
entry(submenu)
continue
info = tools.get(name, None)
if info is None:
continue
command = info.get('command', None)
if not command:
continue
workingdir = info.get('workingdir', '')
showoutput = info.get('showoutput', False)
label = info.get('label', name)
icon = info.get('icon', 'tools-spanner-hammer')
enable = info.get('enable', 'istrue').lower()
if enable in enablefuncs:
enable = enablefuncs[enable]
else:
continue
a = entry(submenu, None, enable, label, icon)
a.setData((command, showoutput, workingdir))
_setupCustomSubmenu(menu)
if mode == 'outgoing':
self.outgoingcmenu = menu
self.outgoingcmenuitems = items
else:
self.singlecmenu = menu
self.singlecmenuitems = items
def _gotoAncestor(self):
ancestor = self.repo[self.menuselection[0]]
for rev in self.menuselection[1:]:
ctx = self.repo[rev]
ancestor = ancestor.ancestor(ctx)
self.goto(ancestor.rev())
def generatePairMenu(self):
def dagrange():
revA, revB = self.menuselection
if revA > revB:
B, A = self.menuselection
else:
A, B = self.menuselection
# simply disable lazy evaluation as we won't handle slow query
return list(self.repo.revs('%s::%s' % (A, B)))
def exportPair():
self.exportRevisions(self.menuselection)
def exportDiff():
root = self.repo.root
filename = '%s_%d_to_%d.diff' % (os.path.basename(root),
self.menuselection[0],
self.menuselection[1])
file = QFileDialog.getSaveFileName(self, _('Write diff file'),
hglib.tounicode(os.path.join(root, filename)))
if not file:
return
f = QFile(file)
if not f.open(QIODevice.WriteOnly | QIODevice.Truncate):
WarningMsgBox(_('Repository Error'),
_('Unable to write diff file'))
return
sess = self._buildPatch('diff')
sess.setOutputDevice(f)
def exportDagRange():
l = dagrange()
if l:
self.exportRevisions(l)
def diffPair():
revA, revB = self.menuselection
dlg = visdiff.visualdiff(self.repo.ui, self.repo, [],
{'rev':(str(revA), str(revB))})
if dlg:
dlg.exec_()
def emailPair():
self._emailRevisions(self.menuselection)
def emailDagRange():
l = dagrange()
if l:
self._emailRevisions(l)
def bundleDagRange():
l = dagrange()
if l:
self.bundleRevisions(base=l[0], tip=l[-1])
def bisectNormal():
revA, revB = self.menuselection
dlg = self._dialogs.open(RepoWidget._createBisectDialog)
dlg.restart(str(revA), str(revB))
def bisectReverse():
revA, revB = self.menuselection
dlg = self._dialogs.open(RepoWidget._createBisectDialog)
dlg.restart(str(revB), str(revA))
def compressDlg():
ctxa, ctxb = map(self.repo.hgchangectx, self.menuselection)
if ctxa.ancestor(ctxb) == ctxb:
revs = self.menuselection[:]
elif ctxa.ancestor(ctxb) == ctxa:
revs = [self.menuselection[1], self.menuselection[0]]
else:
InfoMsgBox(_('Unable to compress history'),
_('Selected changeset pair not related'))
return
dlg = compress.CompressDialog(self._repoagent, revs, self)
dlg.exec_()
def rebaseDlg():
opts = {'source': self.menuselection[0],
'dest': self.menuselection[1]}
dlg = rebase.RebaseDialog(self._repoagent, self, **opts)
dlg.exec_()
exs = self.repo.extensions()
menu = QMenu(self)
for name, cb, icon, ext in (
(_('Visual Diff...'), diffPair, 'visualdiff', None),
(_('Export Diff...'), exportDiff, 'hg-export', None),
(None, None, None, None),
(_('Export Selected...'), exportPair, 'hg-export', None),
(_('Email Selected...'), emailPair, 'mail-forward', None),
(_('Copy Selected as Patch'), self.copyPatch, 'copy-patch', None),
(None, None, None, None),
(_('Export DAG Range...'), exportDagRange, 'hg-export', None),
(_('Email DAG Range...'), emailDagRange, 'mail-forward', None),
(_('Bundle DAG Range...'), bundleDagRange, 'hg-bundle', None),
(None, None, None, None),
(_('Bisect - Good, Bad...'), bisectNormal, 'hg-bisect-good-bad', None),
(_('Bisect - Bad, Good...'), bisectReverse, 'hg-bisect-bad-good', None),
(_('Compress History...'), compressDlg, 'hg-compress', None),
(_('Rebase...'), rebaseDlg, 'hg-rebase', 'rebase'),
(None, None, None, None),
(_('Goto common ancestor'), self._gotoAncestor, 'hg-merge', None),
(self._createFilterBySelectedRevisionsMenu, None, None, None),
(None, None, None, None),
(_('Graft Selected to local...'), self.graftRevisions, 'hg-transplant', None),
(None, None, None, None),
(_('&Prune Selected...'), self._pruneSelected, 'edit-cut',
'evolve'),
):
if name is None:
menu.addSeparator()
continue
if ext and ext not in exs:
continue
if callable(name):
a = name()
else:
a = QAction(name, self)
if icon:
a.setIcon(qtlib.geticon(icon))
if cb:
a.triggered.connect(cb)
menu.addAction(a)
if 'reviewboard' in self.repo.extensions():
menu.addSeparator()
a = QAction(_('Post Selected to Review Board...'), self)
a.triggered.connect(self.sendToReviewBoard)
menu.addAction(a)
self.paircmenu = menu
def generateUnappliedPatchMenu(self):
def qdeleteact():
"""Delete unapplied patch(es)"""
patches = map(hglib.tounicode, self.menuselection)
self._mqActions.deletePatches(patches)
def qfoldact():
patches = map(hglib.tounicode, self.menuselection)
self._mqActions.foldPatches(patches)
menu = QMenu(self)
acts = []
for name, cb, icon in (
(_('Apply patch'), self.qpushRevision, 'hg-qpush'),
(_('Apply onto original parent'), self.qpushExactRevision, None),
(_('Apply only this patch'), self.qpushMoveRevision, None),
(_('Fold patches...'), qfoldact, 'hg-qfold'),
(_('Delete patches...'), qdeleteact, 'hg-qdelete'),
(_('Rename patch...'), self.qrename, None)):
act = QAction(name, self)
act.triggered.connect(cb)
if icon:
act.setIcon(qtlib.geticon(icon))
acts.append(act)
menu.addAction(act)
menu.addSeparator()
acts.append(menu.addAction(_('MQ &Options'),
self._mqActions.launchOptionsDialog))
self.unappcmenu = menu
self.unappacts = acts
def generateMultipleSelectionMenu(self):
def exportSel():
self.exportRevisions(self.menuselection)
def emailSel():
self._emailRevisions(self.menuselection)
menu = QMenu(self)
for name, cb, icon in (
(_('Export Selected...'), exportSel, 'hg-export'),
(_('Email Selected...'), emailSel, 'mail-forward'),
(_('Copy Selected as Patch'), self.copyPatch, 'copy-patch'),
(None, None, None),
(_('Goto common ancestor'), self._gotoAncestor, 'hg-merge'),
(self._createFilterBySelectedRevisionsMenu, None, None),
(None, None, None),
(_('Graft Selected to local...'), self.graftRevisions, 'hg-transplant'),
):
if name is None:
menu.addSeparator()
continue
if callable(name):
a = name()
else:
a = QAction(name, self)
if icon:
a.setIcon(qtlib.geticon(icon))
if cb:
a.triggered.connect(cb)
menu.addAction(a)
if 'evolve' in self.repo.extensions():
menu.addSeparator()
a = QAction(_('&Prune Selected...'), self)
a.setIcon(qtlib.geticon('edit-cut'))
a.triggered.connect(self._pruneSelected)
menu.addAction(a)
if 'reviewboard' in self.repo.extensions():
a = QAction(_('Post Selected to Review Board...'), self)
a.triggered.connect(self.sendToReviewBoard)
menu.addAction(a)
self.multicmenu = menu
def generateBundleMenu(self):
menu = QMenu(self)
for name, cb, icon in (
(_('Pull to here...'), self.pullBundleToRev, 'hg-pull-to-here'),
(_('Visual diff...'), self.visualDiffRevision, 'visualdiff'),
):
a = QAction(name, self)
a.triggered.connect(cb)
if icon:
a.setIcon(qtlib.geticon(icon))
menu.addAction(a)
self.bundlemenu = menu
def generateOutgoingMenu(self):
self.generateSingleMenu(mode='outgoing')
def exportRevisions(self, revisions):
if not revisions:
revisions = [self.rev]
if len(revisions) == 1:
if isinstance(self.rev, int):
defaultpath = os.path.join(self.repoRootPath(),
'%d.patch' % self.rev)
else:
defaultpath = self.repoRootPath()
ret = QFileDialog.getSaveFileName(self, _('Export patch'),
defaultpath,
_('Patch Files (*.patch)'))
if not ret:
return
epath = unicode(ret)
udir = os.path.dirname(epath)
custompath = True
else:
udir = QFileDialog.getExistingDirectory(self, _('Export patch'),
hglib.tounicode(self.repo.root))
if not udir:
return
udir = unicode(udir)
ename = self._repoagent.shortName() + '_%r.patch'
epath = os.path.join(udir, ename)
custompath = False
cmdline = hglib.buildcmdargs('export', verbose=True, output=epath,
rev=hglib.compactrevs(sorted(revisions)))
existingRevisions = []
for rev in revisions:
if custompath:
path = epath
else:
path = epath % rev
if os.path.exists(path):
if os.path.isfile(path):
existingRevisions.append(rev)
else:
QMessageBox.warning(self,
_('Cannot export revision'),
(_('Cannot export revision %s into the file named:'
'\n\n%s\n') % (rev, epath % rev)) + \
_('There is already an existing folder '
'with that same name.'))
return
if existingRevisions:
buttonNames = [_("Replace"), _("Append"), _("Abort")]
warningMessage = \
_('There are existing patch files for %d revisions (%s) '
'in the selected location (%s).\n\n') \
% (len(existingRevisions),
" ,".join([str(rev) for rev in existingRevisions]),
udir)
warningMessage += \
_('What do you want to do?\n') + u'\n' + \
u'- ' + _('Replace the existing patch files.\n') + \
u'- ' + _('Append the changes to the existing patch files.\n') + \
u'- ' + _('Abort the export operation.\n')
res = qtlib.CustomPrompt(_('Patch files already exist'),
warningMessage,
self,
buttonNames, 0, 2).run()
if buttonNames[res] == _("Replace"):
# Remove the existing patch files
for rev in existingRevisions:
if custompath:
os.remove(epath)
else:
os.remove(epath % rev)
elif buttonNames[res] == _("Abort"):
return
self._runCommand(cmdline)
if len(revisions) == 1:
# Show a message box with a link to the export folder and to the
# exported file
rev = revisions[0]
patchfilename = os.path.normpath(epath)
patchdirname = os.path.normpath(os.path.dirname(epath))
patchshortname = os.path.basename(patchfilename)
if patchdirname.endswith(os.path.sep):
patchdirname = patchdirname[:-1]
qtlib.InfoMsgBox(_('Patch exported'),
_('Revision #%d (%s) was exported to:<p>'
'<a href="file:///%s">%s</a>%s'
'<a href="file:///%s">%s</a>') \
% (rev, str(self.repo[rev]),
patchdirname, patchdirname, os.path.sep,
patchfilename, patchshortname))
else:
# Show a message box with a link to the export folder
qtlib.InfoMsgBox(_('Patches exported'),
_('%d patches were exported to:<p>'
'<a href="file:///%s">%s</a>') \
% (len(revisions), udir, udir))
def visualDiffRevision(self):
opts = dict(change=self.rev)
dlg = visdiff.visualdiff(self.repo.ui, self.repo, [], opts)
if dlg:
dlg.exec_()
def visualDiffToLocal(self):
if self.rev is None:
return
opts = dict(rev=['rev(%d)' % self.rev])
dlg = visdiff.visualdiff(self.repo.ui, self.repo, [], opts)
if dlg:
dlg.exec_()
@pyqtSlot()
def updateToRevision(self):
rev = None
if isinstance(self.rev, int):
rev = hglib.getrevisionlabel(self.repo, self.rev)
dlg = update.UpdateDialog(self._repoagent, rev, self)
r = dlg.exec_()
if r in (0, 1):
self.gotoParent()
@pyqtSlot()
def lockTool(self):
from locktool import LockDialog
dlg = LockDialog(self._repoagent, self)
if dlg:
dlg.exec_()
@pyqtSlot()
def revertToRevision(self):
if not qtlib.QuestionMsgBox(
_('Confirm Revert'),
_('Reverting all files will discard changes and '
'leave affected files in a modified state.<br>'
'<br>Are you sure you want to use revert?<br><br>'
'(use update to checkout another revision)'),
parent=self):
return
cmdline = hglib.buildcmdargs('revert', all=True, rev=self.rev)
sess = self._runCommand(cmdline)
sess.commandFinished.connect(self._refreshCommitTabIfNeeded)
def _createFilterBySelectedRevisionsMenu(self):
menu = QMenu(_('Filter b&y'), self)
menu.setIcon(qtlib.geticon('view-filter'))
menu.triggered.connect(self._filterBySelectedRevisions)
for t, r in [(_('&Ancestors and Descendants'),
"ancestors({revs}) or descendants({revs})"),
(_('A&uthor'), "matching({revs}, 'author')"),
(_('&Branch'), "branch({revs})"),
]:
a = menu.addAction(t)
a.setData(r)
menu.addSeparator()
menu.addAction(_('&More Options...'))
return menu.menuAction()
@pyqtSlot(QAction)
def _filterBySelectedRevisions(self, action):
revs = hglib.compactrevs(sorted(self.repoview.selectedRevisions()))
expr = str(action.data().toString())
if not expr:
self._filterByMatchDialog(revs)
return
self.setFilter(expr.format(revs=revs))
def _filterByMatchDialog(self, revlist):
dlg = matching.MatchDialog(self._repoagent, revlist, self)
if dlg.exec_():
self.setFilter(dlg.revsetexpression)
def pushAll(self):
self.syncDemand.forward('push', False, pushall=True)
def pushToRevision(self):
# Do not ask for confirmation
self.syncDemand.forward('push', False, rev=self.rev)
def pushBranch(self):
# Do not ask for confirmation
self.syncDemand.forward('push', False,
branch=self.repo[self.rev].branch())
def manifestRevision(self):
if QApplication.keyboardModifiers() & Qt.ShiftModifier:
self._dialogs.openNew(RepoWidget._createManifestDialog)
else:
dlg = self._dialogs.open(RepoWidget._createManifestDialog)
dlg.setRev(self.rev)
def _createManifestDialog(self):
return revdetails.createManifestDialog(self._repoagent, self.rev)
def mergeWithOtherHead(self):
"""Open dialog to merge with the other head of the current branch"""
cmdline = hglib.buildcmdargs('merge', preview=True,
config='ui.logtemplate={rev}\n')
sess = self._runCommand(cmdline)
sess.setCaptureOutput(True)
sess.commandFinished.connect(self._onMergePreviewFinished)
@qtlib.senderSafeSlot(int)
def _onMergePreviewFinished(self, ret):
sess = self.sender()
if ret == 255 and 'hg heads' in sess.errorString():
# multiple heads
self.filterbar.setQuery('head() - .')
self.filterbar.runQuery()
msg = '\n'.join(sess.errorString().splitlines()[:-1]) # drop hint
w = self.setInfoBar(infobar.ConfirmInfoBar, msg)
assert w
w.acceptButton.setText(_('Merge'))
w.accepted.connect(self.mergeWithRevision)
w.finished.connect(self.clearRevisionSet)
return
if ret != 0:
return
revs = map(int, str(sess.readAll()).splitlines())
if not revs:
return
self._dialogs.open(RepoWidget._createMergeDialog, revs[-1])
@pyqtSlot()
def mergeWithRevision(self):
pctx = self.repo['.']
octx = self.repo[self.rev]
if pctx == octx:
QMessageBox.warning(self, _('Unable to merge'),
_('You cannot merge a revision with itself'))
return
self._dialogs.open(RepoWidget._createMergeDialog, self.rev)
def _createMergeDialog(self, rev):
return merge.MergeDialog(self._repoagent, rev, self)
def tagToRevision(self):
dlg = tag.TagDialog(self._repoagent, rev=str(self.rev), parent=self)
dlg.exec_()
def bookmarkRevision(self):
dlg = bookmark.BookmarkDialog(self._repoagent, self.rev, self)
dlg.exec_()
def signRevision(self):
dlg = sign.SignDialog(self._repoagent, self.rev, self)
dlg.exec_()
def graftRevisions(self):
"""Graft selected revision on top of working directory parent"""
revlist = []
for rev in sorted(self.repoview.selectedRevisions()):
revlist.append(str(rev))
if not revlist:
revlist = [self.rev]
dlg = graft.GraftDialog(self._repoagent, self, source=revlist)
if dlg.valid:
dlg.exec_()
def backoutToRevision(self):
msg = backout.checkrev(self._repoagent.rawRepo(), self.rev)
if msg:
qtlib.InfoMsgBox(_('Unable to backout'), msg, parent=self)
return
dlg = backout.BackoutDialog(self._repoagent, self.rev, self)
dlg.finished.connect(dlg.deleteLater)
dlg.exec_()
@pyqtSlot()
def _pruneSelected(self):
revspec = hglib.compactrevs(sorted(self.repoview.selectedRevisions()))
dlg = prune.createPruneDialog(self._repoagent, revspec, self)
dlg.exec_()
def stripRevision(self):
'Strip the selected revision and all descendants'
dlg = thgstrip.createStripDialog(self._repoagent, rev=str(self.rev),
parent=self)
dlg.exec_()
def sendToReviewBoard(self):
self._dialogs.open(RepoWidget._createPostReviewDialog,
tuple(self.repoview.selectedRevisions()))
def _createPostReviewDialog(self, revs):
return postreview.PostReviewDialog(self.repo.ui, self._repoagent, revs)
def rupdate(self):
import rupdate
dlg = rupdate.createRemoteUpdateDialog(self._repoagent, self.rev, self)
dlg.exec_()
@pyqtSlot()
def emailSelectedRevisions(self):
self._emailRevisions(self.repoview.selectedRevisions())
def _emailRevisions(self, revs):
self._dialogs.open(RepoWidget._createEmailDialog, tuple(revs))
def _createEmailDialog(self, revs):
return hgemail.EmailDialog(self._repoagent, revs)
def archiveRevision(self):
rev = hglib.getrevisionlabel(self.repo, self.rev)
dlg = archive.createArchiveDialog(self._repoagent, rev, self)
dlg.exec_()
def bundleRevisions(self, base=None, tip=None):
root = self.repoRootPath()
if base is None or base is False:
base = self.rev
data = dict(name=os.path.basename(root), base=base)
if tip is None:
filename = '%(name)s_%(base)s_and_descendants.hg' % data
else:
data.update(rev=tip)
filename = '%(name)s_%(base)s_to_%(rev)s.hg' % data
file = QFileDialog.getSaveFileName(self, _('Write bundle'),
os.path.join(root, filename))
if not file:
return
cmdline = ['bundle', '--verbose']
parents = [hglib.escaperev(r.rev()) for r in self.repo[base].parents()]
for p in parents:
cmdline.extend(['--base', p])
if tip:
cmdline.extend(['--rev', str(tip)])
else:
cmdline.extend(['--rev', 'heads(descendants(%s))' % base])
cmdline.append(unicode(file))
self._runCommand(cmdline)
def _buildPatch(self, command=None):
if not command:
# workingdir revision cannot be exported
if self.rev is None:
command = 'diff'
else:
command = 'export'
assert command in ('export', 'diff')
if command == 'export':
# patches should be in chronological order
revs = sorted(self.menuselection)
cmdline = hglib.buildcmdargs('export', rev=hglib.compactrevs(revs))
else:
revs = self.rev and self.menuselection or None
cmdline = hglib.buildcmdargs('diff', rev=revs)
return self._runCommand(cmdline)
@pyqtSlot()
def copyPatch(self):
sess = self._buildPatch()
sess.setCaptureOutput(True)
sess.commandFinished.connect(self._copyPatchOutputToClipboard)
@qtlib.senderSafeSlot(int)
def _copyPatchOutputToClipboard(self, ret):
if ret == 0:
sess = self.sender()
output = sess.readAll()
mdata = QMimeData()
mdata.setData('text/x-diff', output) # for lossless import
mdata.setText(hglib.tounicode(str(output)))
QApplication.clipboard().setMimeData(mdata)
def copyHash(self):
clip = QApplication.clipboard()
clip.setText(binascii.hexlify(self.repo[self.rev].node()))
def changePhase(self, phase):
currentphase = self.repo[self.rev].phase()
if currentphase == phase:
# There is nothing to do, we are already in the target phase
return
phasestr = phases.phasenames[phase]
cmdline = ['phase', '--rev', '%s' % self.rev, '--%s' % phasestr]
if currentphase < phase:
# Ask the user if he wants to force the transition
title = _('Backwards phase change requested')
if currentphase == phases.draft and phase == phases.secret:
# Here we are sure that the current phase is draft and the target phase is secret
# Nevertheless we will not hard-code those phase names on the dialog strings to
# make sure that the proper phase name translations are used
main = _('Do you really want to make this revision <i>secret</i>?')
text = _('Making a "<i>draft</i>" revision "<i>secret</i>" '
'is generally a safe operation.\n\n'
'However, there are a few caveats:\n\n'
'- "secret" revisions are not pushed. '
'This can cause you trouble if you\n'
'refer to a secret subrepo revision.\n\n'
'- If you pulled this revision from '
'a non publishing server it may be\n'
'moved back to "<i>draft</i>" if you pull '
'again from that particular server.\n\n'
'Please be careful!')
labels = ((QMessageBox.Yes, _('&Make secret')),
(QMessageBox.No, _('&Cancel')))
else:
main = _('Do you really want to <i>force</i> a backwards phase transition?')
text = _('You are trying to move the phase of revision %d backwards,\n'
'from "<i>%s</i>" to "<i>%s</i>".\n\n'
'However, "<i>%s</i>" is a lower phase level than "<i>%s</i>".\n\n'
'Moving the phase backwards is not recommended.\n'
'For example, it may result in having multiple heads\nif you '
'modify a revision that you have already pushed\nto a server.\n\n'
'Please be careful!') % (self.rev, phases.phasenames[currentphase], phasestr, phasestr,
phases.phasenames[currentphase])
labels = ((QMessageBox.Yes, _('&Force')),
(QMessageBox.No, _('&Cancel')))
if not qtlib.QuestionMsgBox(title, main, text,
labels=labels, parent=self):
return
cmdline.append('--force')
self._runCommand(cmdline)
@pyqtSlot(QAction)
def _changePhaseByMenu(self, action):
phasenum, _ok = action.data().toInt()
self.changePhase(phasenum)
def rebaseRevision(self):
"""Rebase selected revision on top of working directory parent"""
opts = {'source' : self.rev, 'dest': self.repo['.'].rev()}
dlg = rebase.RebaseDialog(self._repoagent, self, **opts)
dlg.exec_()
def qimportRevision(self):
"""QImport revision and all descendents to MQ"""
if 'qparent' in self.repo.tags():
endrev = 'qparent'
else:
endrev = ''
# Check whether there are existing patches in the MQ queue whose name
# collides with the revisions that are going to be imported
revList = self.repo.revs('%s::%s and not hidden()' % (self.rev, endrev))
if endrev and not revList:
# There is a qparent but the revision list is empty
# This means that the qparent is not a descendant of the
# selected revision
QMessageBox.warning(self, _('Cannot import selected revision'),
_('The selected revision (rev #%d) cannot be imported '
'because it is not a descendant of ''qparent'' (rev #%d)') \
% (self.rev, self.repo['qparent'].rev()))
return
patchdir = self.repo.join('patches')
def patchExists(p):
return os.path.exists(os.path.join(patchdir, p))
# Note that the following two arrays are both ordered by "rev"
defaultPatchNames = ['%d.diff' % rev for rev in revList]
defaultPatchesExist = [patchExists(p) for p in defaultPatchNames]
if any(defaultPatchesExist):
# We will qimport each revision one by one, starting from the newest
# To do so, we will find a valid and unique patch name for each
# revision that we must qimport (i.e. a filename that does not
# already exist)
# and then we will import them one by one starting from the newest
# one, using these unique names
def getUniquePatchName(baseName):
maxRetries = 99
for n in range(1, maxRetries):
patchName = baseName + '_%02d.diff' % n
if not patchExists(patchName):
return patchName
return baseName
patchNames = {}
for n, rev in enumerate(revList):
if defaultPatchesExist[n]:
patchNames[rev] = getUniquePatchName(str(rev))
else:
# The default name is safe
patchNames[rev] = defaultPatchNames[n]
# qimport each revision individually, starting from the topmost one
revList.reverse()
cmdlines = []
for rev in revList:
cmdlines.append(['qimport', '--rev', '%s' % rev,
'--name', patchNames[rev]])
self._runCommandSequence(cmdlines)
else:
# There were no collisions with existing patch names, we can
# simply qimport the whole revision set in a single go
cmdline = ['qimport', '--rev', '%s::%s' % (self.rev, endrev)]
self._runCommand(cmdline)
def qfinishRevision(self):
"""Finish applied patches up to and including selected revision"""
self._mqActions.finishRevision(hglib.tounicode(str(self.rev)))
@pyqtSlot()
def qgotoParentRevision(self):
"""Apply an unapplied patch, or qgoto the parent of an applied patch"""
self.qgotoRevision(self.repo[self.rev].p1().rev())
@pyqtSlot()
def qgotoSelectedRevision(self):
self.qgotoRevision(self.rev)
def qgotoRevision(self, rev):
"""Make REV the top applied patch"""
mqw = self._mqActions
ctx = self.repo.changectx(rev)
if 'qparent'in ctx.tags():
mqw.popAllPatches()
else:
mqw.gotoPatch(hglib.tounicode(ctx.thgmqpatchname()))
def qrename(self):
sel = self.menuselection[0]
if not isinstance(sel, str):
sel = self.repo.changectx(sel).thgmqpatchname()
self._mqActions.renamePatch(hglib.tounicode(sel))
def _qpushRevision(self, move=False, exact=False):
"""QPush REV with the selected options"""
ctx = self.repo.changectx(self.rev)
patchname = hglib.tounicode(ctx.thgmqpatchname())
self._mqActions.pushPatch(patchname, move=move, exact=exact)
def qpushRevision(self):
"""Call qpush with no options"""
self._qpushRevision(move=False, exact=False)
def qpushExactRevision(self):
"""Call qpush using the exact flag"""
self._qpushRevision(exact=True)
def qpushMoveRevision(self):
"""Make REV the top applied patch"""
self._qpushRevision(move=True)
def runCustomCommand(self, command, showoutput=False, workingdir='',
files=None):
"""Execute 'custom commands', on the selected repository"""
# Perform variable expansion
# This is done in two steps:
# 1. Expand environment variables
command = os.path.expandvars(command).strip()
if not command:
InfoMsgBox(_('Invalid command'),
_('The selected command is empty'))
return
if workingdir:
workingdir = os.path.expandvars(workingdir).strip()
# 2. Expand internal workbench variables
def filelist2str(filelist):
return ' '.join(util.shellquote(
os.path.normpath(self.repo.wjoin(filename)))
for filename in filelist)
if files is None:
files = []
vars = {
'ROOT': self.repo.root,
'REVID': str(self.repo[self.rev]),
'REV': self.rev,
'FILES': filelist2str(self.repo[self.rev].files()),
'ALLFILES': filelist2str(self.repo[self.rev]),
'SELECTEDFILES': filelist2str(files),
}
for var in vars:
command = command.replace('{%s}' % var, str(vars[var]))
if workingdir:
workingdir = workingdir.replace('{%s}' % var, str(vars[var]))
if not workingdir:
workingdir = self.repo.root
# Show the Output Log if configured to do so
if showoutput:
self.makeLogVisible.emit(True)
# If the user wants to run mercurial,
# do so via our usual runCommand method
cmd = shlex.split(command)
cmdtype = cmd[0].lower()
if cmdtype == 'hg':
sess = self._runCommand(map(hglib.tounicode, cmd[1:]))
sess.commandFinished.connect(self._notifyWorkingDirChanges)
return
elif cmdtype == 'thg':
cmd = cmd[1:]
if '--repository' in cmd:
_ui = ui.ui()
else:
cmd += ['--repository', self.repo.root]
_ui = self.repo.ui.copy()
_ui.ferr = cStringIO.StringIO()
# avoid circular import of hgqt.run by importing it inplace
from tortoisehg.hgqt import run
res = run.dispatch(cmd, u=_ui)
if res:
errormsg = _ui.ferr.getvalue().strip()
if errormsg:
errormsg = \
_('The following error message was returned:'
'\n\n<b>%s</b>') % hglib.tounicode(errormsg)
errormsg +=\
_('\n\nPlease check that the "thg" command is valid.')
qtlib.ErrorMsgBox(
_('Failed to execute custom TortoiseHg command'),
_('The command "%s" failed (code %d).')
% (hglib.tounicode(command), res), errormsg)
return res
# Otherwise, run the selected command in the background
try:
res = subprocess.Popen(command, cwd=workingdir, shell=True)
except OSError, ex:
res = 1
qtlib.ErrorMsgBox(_('Failed to execute custom command'),
_('The command "%s" could not be executed.') % hglib.tounicode(command),
_('The following error message was returned:\n\n"%s"\n\n'
'Please check that the command path is valid and '
'that it is a valid application') % hglib.tounicode(ex.strerror))
return res
@pyqtSlot(QAction)
def _runCustomCommandByMenu(self, action):
command, showoutput, workingdir = action.data().toPyObject()
self.runCustomCommand(command, showoutput, workingdir)
@pyqtSlot(str, list)
def handleRunCustomCommandRequest(self, toolname, files):
tools, toollist = hglib.tortoisehgtools(self.repo.ui)
if not tools or toolname not in toollist:
return
toolname = str(toolname)
command = tools[toolname].get('command', '')
showoutput = tools[toolname].get('showoutput', False)
workingdir = tools[toolname].get('workingdir', '')
self.runCustomCommand(command, showoutput, workingdir, files)
def _runCommand(self, cmdline):
sess = self._repoagent.runCommand(cmdline, self)
self._handleNewCommand(sess)
return sess
def _runCommandSequence(self, cmdlines):
sess = self._repoagent.runCommandSequence(cmdlines, self)
self._handleNewCommand(sess)
return sess
def _handleNewCommand(self, sess):
self.clearInfoBar()
sess.outputReceived.connect(self._repoviewFrame.showOutput)
@pyqtSlot()
def _notifyWorkingDirChanges(self):
shlib.shell_notify([self.repo.root])
@pyqtSlot()
def _refreshCommitTabIfNeeded(self):
"""Refresh the Commit tab if the user settings require it"""
if self.taskTabsWidget.currentIndex() != self._namedTabs['commit']:
return
refreshwd = self.repo.ui.config('tortoisehg', 'refreshwdstatus', 'auto')
# Valid refreshwd values are 'auto', 'always' and 'alwayslocal'
if refreshwd != 'auto':
if refreshwd == 'always' \
or paths.is_on_fixed_drive(self.repo.root):
self.commitDemand.forward('refreshWctx')
class LightRepoWindow(QMainWindow):
def __init__(self, repoagent):
super(LightRepoWindow, self).__init__()
self._repoagent = repoagent
self.setIconSize(qtlib.smallIconSize())
repo = repoagent.rawRepo()
val = repo.ui.config('tortoisehg', 'tasktabs', 'off').lower()
if val not in ('east', 'west'):
repo.ui.setconfig('tortoisehg', 'tasktabs', 'east')
rw = RepoWidget(repoagent, self)
self.setCentralWidget(rw)
self._edittbar = tbar = self.addToolBar(_('&Edit Toolbar'))
tbar.setObjectName('edittbar')
a = tbar.addAction(qtlib.geticon('view-refresh'), _('&Refresh'))
a.setShortcuts(QKeySequence.Refresh)
a.triggered.connect(self.refresh)
tbar = rw.filterBar()
tbar.setObjectName('filterbar')
tbar.setWindowTitle(_('&Filter Toolbar'))
self.addToolBar(tbar)
s = QSettings()
s.beginGroup('LightRepoWindow')
self.restoreGeometry(s.value('geometry').toByteArray())
self.restoreState(s.value('windowState').toByteArray())
s.endGroup()
def createPopupMenu(self):
menu = super(LightRepoWindow, self).createPopupMenu()
assert menu # should have toolbar
menu.addSeparator()
menu.addAction(_('&Settings'), self._editSettings)
return menu
def closeEvent(self, event):
rw = self.centralWidget()
if not rw.closeRepoWidget():
event.ignore()
return
s = QSettings()
s.beginGroup('LightRepoWindow')
s.setValue('geometry', self.saveGeometry())
s.setValue('windowState', self.saveState())
s.endGroup()
event.accept()
@pyqtSlot()
def refresh(self):
self._repoagent.pollStatus()
rw = self.centralWidget()
rw.reload()
def setSyncUrl(self, url):
rw = self.centralWidget()
rw.setSyncUrl(url)
@pyqtSlot()
def _editSettings(self):
dlg = settings.SettingsDialog(parent=self)
dlg.exec_()
| seewindcn/tortoisehg | src/tortoisehg/hgqt/repowidget.py | Python | gpl-2.0 | 84,333 |
#
# Python bindings for libparted (built on top of the _ped Python module).
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from collections import Sequence
class CachedList(Sequence):
"""CachedList()
Provides an immutable list that is constructed from a function that
could take a while to run. This is basically the same concept as
memoization, except that the function does not take any parameters
and therefore there is nothing to use as a memo.
The constructor function is provided to __init__, must not take any
parameters, and must return a list. The invalidate() method indicates
that the list is no longer valid and should be reconstucted by
calling the function again. It is up to client code to call invalidate.
The rest of the procedure is handled by this class.
In all ways, this should appear to be just like a list."""
def __init__(self, lstFn):
"""Construct a new CachedList. The lstFn is a function that takes
no parameters and returns a list. It will be called lazily - the
list is not constructed until the first access, which could be
quite a while after this method is called."""
self._invalid = True
self._lst = []
self._lstFn = lstFn
def __rebuildList(self):
if self._invalid:
self._lst = self._lstFn()
self._invalid = False
def __contains__(self, value):
self.__rebuildList()
return self._lst.__contains__(value)
def __getitem__(self, index):
self.__rebuildList()
return self._lst.__getitem__(index)
def __iter__(self):
self.__rebuildList()
return self._lst.__iter__()
def __len__(self):
self.__rebuildList()
return len(self._lst)
def __repr__(self):
self.__rebuildList()
return repr(self._lst)
def __str__(self):
self.__rebuildList()
return str(self._lst)
def __hash__(self):
return hash(str(self))
def count(self, value):
self.__rebuildList()
return self._lst.count(value)
def index(self, value, *args, **kwargs):
self.__rebuildList()
return self._lst.index(value, *args, **kwargs)
def invalidate(self):
"""Indicate that the list is no longer valid, due to some external
changes. The next access to the list will result in the provided
list construction function being called to build a new list."""
self._invalid = True
| hpfn/pyparted_debian_dir | src/parted/cachedlist.py | Python | gpl-2.0 | 3,527 |
# Dialog for creating new encryption passphrase
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
from gi.repository import Gtk, Gdk
import pwquality
from pyanaconda.ui.gui import GUIObject
from pyanaconda.i18n import _, N_
__all__ = ["PassphraseDialog"]
ERROR_WEAK = N_("You have provided a weak passphrase: %s")
ERROR_NOT_MATCHING = N_("Passphrases do not match.")
class PassphraseDialog(GUIObject):
builderObjects = ["passphrase_dialog"]
mainWidgetName = "passphrase_dialog"
uiFile = "spokes/lib/passphrase.glade"
def __init__(self, data):
GUIObject.__init__(self, data)
self._confirm_entry = self.builder.get_object("confirm_pw_entry")
self._passphrase_entry = self.builder.get_object("passphrase_entry")
self._save_button = self.builder.get_object("passphrase_save_button")
self._strength_bar = Gtk.LevelBar()
self._strength_label = self.builder.get_object("strength_label")
# These will be set up later.
self._pwq = None
self._pwq_error = None
self.passphrase = ""
def refresh(self):
super(PassphraseDialog, self).refresh()
# disable input methods for the passphrase Entry widgets and make sure
# the focus change mask is enabled
self._passphrase_entry.set_property("im-module", "")
self._passphrase_entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, "")
self._passphrase_entry.add_events(Gdk.EventMask.FOCUS_CHANGE_MASK)
self._confirm_entry.set_property("im-module", "")
self._confirm_entry.add_events(Gdk.EventMask.FOCUS_CHANGE_MASK)
self._save_button.set_can_default(True)
# add the passphrase strength meter
self._strength_bar.set_mode(Gtk.LevelBarMode.DISCRETE)
self._strength_bar.set_min_value(0)
self._strength_bar.set_max_value(4)
box = self.builder.get_object("strength_box")
box.pack_start(self._strength_bar, False, True, 0)
box.show_all()
# set up passphrase quality checker
self._pwq = pwquality.PWQSettings()
self._pwq.read_config()
# initialize with the previously set passphrase
self.passphrase = self.data.autopart.passphrase
if not self.passphrase:
self._save_button.set_sensitive(False)
self._passphrase_entry.set_text(self.passphrase)
self._confirm_entry.set_text(self.passphrase)
self._update_passphrase_strength()
def run(self):
self.refresh()
self.window.show_all()
rc = self.window.run()
self.window.destroy()
return rc
def _update_passphrase_strength(self):
passphrase = self._passphrase_entry.get_text()
strength = 0
self._pwq_error = ""
try:
strength = self._pwq.check(passphrase, None, None)
except pwquality.PWQError as e:
self._pwq_error = e.args[1]
if strength < 50:
val = 1
text = _("Weak")
elif strength < 75:
val = 2
text = _("Fair")
elif strength < 90:
val = 3
text = _("Good")
else:
val = 4
text = _("Strong")
self._strength_bar.set_value(val)
self._strength_label.set_text(text)
def _set_entry_icon(self, entry, icon, msg):
entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, icon)
entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, msg)
def on_passphrase_changed(self, entry):
self._update_passphrase_strength()
if entry.get_text() and entry.get_text() == self._confirm_entry.get_text():
self._set_entry_icon(self._confirm_entry, "", "")
self._save_button.set_sensitive(True)
else:
self._save_button.set_sensitive(False)
if not self._pwq_error:
self._set_entry_icon(entry, "", "")
def on_passphrase_editing_done(self, entry, *args):
if self._pwq_error:
icon = "gtk-dialog-error"
msg = _(ERROR_WEAK) % self._pwq_error
self._set_entry_icon(entry, icon, msg)
def on_confirm_changed(self, entry):
if entry.get_text() and entry.get_text() == self._passphrase_entry.get_text():
self._set_entry_icon(entry, "", "")
self._save_button.set_sensitive(True)
else:
self._save_button.set_sensitive(False)
def on_confirm_editing_done(self, entry, *args):
passphrase = self._passphrase_entry.get_text()
confirm = self._confirm_entry.get_text()
if passphrase != confirm:
icon = "gtk-dialog-error"
msg = ERROR_NOT_MATCHING
self._set_entry_icon(entry, icon, _(msg))
self._save_button.set_sensitive(False)
else:
self._set_entry_icon(entry, "", "")
def on_save_clicked(self, button):
self.passphrase = self._passphrase_entry.get_text()
def on_entry_activated(self, entry):
if self._save_button.get_sensitive() and \
entry.get_text() == self._passphrase_entry.get_text():
self._save_button.emit("clicked")
| gautamMalu/XenInBox | pyanaconda/ui/gui/spokes/lib/passphrase.py | Python | gpl-2.0 | 6,169 |
from tkinter import *
from tksimpledialog import Dialog
from amcquestion import AMCQuestion
class EditQuestionDialog(Dialog):
def __init__(self, current_question, parent, title = None):
self.updated_question = None
self.question = current_question
Dialog.__init__(self, parent, title)
def body(self, master):
top_label = Label(master, text="Edit AMC question")
top_label.grid(row=0, columnspan=3)
self.question_label = StringVar()
self.question_label.set(self.question.get_label())
label_label = Label(master, text="Question label:")
label_label.grid(row=1, sticky=W)
label_entry = Entry(master, textvariable=self.question_label)
label_entry.grid(row=1, column=1, sticky=W)
self.question_text = StringVar()
self.question_text.set(self.question.get_question())
question_label = Label(master, text="Question text:")
question_label.grid(row=2, sticky=W)
question_entry = Entry(master, textvariable=self.question_text)
question_entry.grid(row=2, column=1, sticky=W)
self.correct_ans = IntVar()
self.correct_ans.set(self.question.get_correct())
self.ans_texts = []
ans_labels = []
ans_entries = []
ans_radios = []
rows_before_ans = 3
self.answer_count = 4
for n in range(self.answer_count):
self.ans_texts.append(StringVar())
if n < len(self.question.get_answers()):
self.ans_texts[n].set(self.question.get_answers()[n])
ans_labels.append(Label(master, text="Answer " + str(n + 1) + ":"))
ans_labels[n].grid(row=rows_before_ans + n, sticky=W)
ans_entries.append(Entry(master, textvariable=self.ans_texts[n]))
ans_entries[n].grid(row=rows_before_ans + n, column=1, sticky=W)
ans_radios.append(Radiobutton(master, variable=self.correct_ans,
value=n))
ans_radios[n].grid(row=rows_before_ans + n, column=2, sticky=W)
return label_entry
def buttonbox(self):
box = Frame(self)
write_button = Button(box, text="Commit changes",
command=self.ok)
write_button.pack(side=LEFT, padx=5, pady=5)
cancel_button = Button(box, text="Cancel", command=self.cancel)
cancel_button.pack(side=LEFT, padx=5, pady=5)
box.pack()
def apply(self):
label = self.question_label.get()
question = self.question_text.get()
answers = [ans_text.get() for ans_text in self.ans_texts]
correct = self.correct_ans.get()
self.updated_question = AMCQuestion(label, question, answers, correct)
class CreateQuestionDialog(Dialog):
def __init__(self, parent, title = None):
self.new_question = None
Dialog.__init__(self, parent, title)
def body(self, master):
top_label = Label(master, text="New AMC question")
top_label.grid(row=0, columnspan=3)
self.question_label = StringVar()
label_label = Label(master, text="Question label:")
label_label.grid(row=1, sticky=W)
label_entry = Entry(master, textvariable=self.question_label)
label_entry.grid(row=1, column=1, sticky=W)
self.question_text = StringVar()
question_label = Label(master, text="Question text:")
question_label.grid(row=2, sticky=W)
question_entry = Entry(master, textvariable=self.question_text)
question_entry.grid(row=2, column=1, sticky=W)
self.correct_ans = IntVar()
self.ans_texts = []
ans_labels = []
ans_entries = []
ans_radios = []
rows_before_ans = 3
self.answer_count = 4
for n in range(self.answer_count):
self.ans_texts.append(StringVar())
ans_labels.append(Label(master, text="Answer " + str(n + 1) + ":"))
ans_labels[n].grid(row=rows_before_ans + n, sticky=W)
ans_entries.append(Entry(master, textvariable=self.ans_texts[n]))
ans_entries[n].grid(row=rows_before_ans + n, column=1, sticky=W)
ans_radios.append(Radiobutton(master, variable=self.correct_ans,
value=n))
ans_radios[n].grid(row=rows_before_ans + n, column=2, sticky=W)
# Set the focus on the label Entry box
return label_entry
def buttonbox(self):
box = Frame(self)
write_button = Button(box, text="Generate question",
command=self.ok, default='active')
write_button.pack(side=LEFT, padx=5, pady=5)
cancel_button = Button(box, text="Cancel", command=self.cancel)
cancel_button.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
def apply(self):
label = self.question_label.get()
question = self.question_text.get()
answers = [ans_text.get() for ans_text in self.ans_texts]
correct = self.correct_ans.get()
self.new_question = AMCQuestion(label, question, answers, correct)
| jarthurgross/amc_question_creator | amc_question_dialogs.py | Python | mit | 4,693 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around
third_party/WebKit/Tools/Scripts/new-run-webkit-httpd"""
import os
import subprocess
import sys
def main():
cmd = [sys.executable]
src_dir=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(sys.argv[0]))))))
script_dir=os.path.join(src_dir, "third_party", "WebKit", "Tools",
"Scripts")
script = os.path.join(script_dir, 'new-run-webkit-httpd')
cmd.append(script)
cmd.extend(sys.argv[1:])
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(main())
| aYukiSekiguchi/ACCESS-Chromium | webkit/tools/layout_tests/run_http_server.py | Python | bsd-3-clause | 800 |
import json
import requests
__author__ = 'tony petrov'
import datetime
import twython
from twython import TwythonStreamer, TwythonError
import random
import string
import constants
import constants as c
from utils import user_filter
import storage as st
import test_struct as test
import pytumblr
import threading
import facebook
import time
import copy
class TwitterStreamer(TwythonStreamer):
def set_callback(self, callback):
self._callback = callback
def set_error_handler(self, handler):
self._error_handler = handler
def on_success(self, data):
if data:
self._callback(data)
def on_error(self, status_code, data):
print status_code
self._error_handler()
class Credentials_Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Rate_Limit_Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TwitterHandler:
def __init__(self, acc_details, read_only=False, **kwargs):
self.id = kwargs['id']
if not acc_details or not 'app_key' in acc_details:
raise Credentials_Error('Missing account credentials')
if constants.TESTING:
self._twitter = test.MockTwitter()
else:
try:
if read_only:
self._twitter = twython.Twython(acc_details['app_key'], acc_details['app_secret'], oauth_version=2)
acc_details['oauth_token'] = self._twitter.obtain_access_token()
else:
if 'client_args' in kwargs:
self._twitter = twython.Twython(acc_details['app_key'], acc_details['app_secret'],
acc_details['oauth_token'], acc_details['oauth_token_secret'],
client_args=kwargs['client_args'])
else:
self._twitter = twython.Twython(acc_details['app_key'], acc_details['app_secret'],
acc_details['oauth_token'], acc_details['oauth_token_secret'])
self.acc_details = acc_details
except Exception as e:
print "Unable to login to twitter cause " + e.message
import sys
sys.exit()
self._scheduler = kwargs.get('scheduler', None)
self._list_attempts = 15
# Tries to fit new twitter accounts into either the twitter lists or the bulk lists
def fit_to_lists(self, candidates, lists, max_list_num, max_list_size, is_twitter_list):
""" Tries to fit the given candidate users in to either a twitter list or in to a bulk list """
# check if lists are maxed out
if (len(lists) > max_list_num) and lists[-1]['count'] >= max_list_size:
return candidates
if len(candidates) == 0:
return []
# check if we have any lists and if the last list has enough space to fit any of the candidates
if len(lists) > 0 and lists[-1]['count'] < max_list_size:
# determine the max amount of users the list can take have no more than the remaining capacity of the list
take = min(max_list_size - lists[-1]['count'], len(candidates))
# determine whether the candidates should go to the twitter lists or bulk lists
if is_twitter_list:
if self._list_attempts <= 0:
lists[-1]['count'] = self._twitter.get_specific_list(list_id=lists[-1]['id'])['member_count']
return candidates
try:
# determine how many users to add to the list
take = min(constants.TWITTER_ADD_TO_LIST_LIMIT, take)
# use the api to add the new users to the list
self._list_attempts -= 1
self._twitter.create_list_members(list_id=lists[-1]['id'],
user_id=','.join(map(lambda x: str(x), candidates[:take])))
# update the size of the list
lists[-1]['count'] = lists[-1]['count'] + take
except Exception as e:
print "Users could not be added to list in bulk cause: %s" % e.message
# twitter's bulk adding is bugged
print "Trying to add users 1 by 1"
try:
self._twitter.add_list_member(list_id=lists[-1]['id'], user_id=candidates[0])
lists[-1]['count'] = lists[-1]['count'] + 1
except Exception as e:
print "Adding users to list failed cause: %s" % e.message
self._list_attempts = 0
# error occurred can not put anything into the list return the remaining number of candidates
return candidates
else:
# put users into the last bulk list
lists[-1]['ids'] += candidates[:take]
# update the bulk list's size
lists[-1]['count'] = len(lists[-1]['ids'])
# remove the users we just added
candidates = candidates[take:]
# recurse and attempt to fit in more users
return self.fit_to_lists(candidates, lists, max_list_num, max_list_size, is_twitter_list)
# either no free list was found or no lists exist
else:
if is_twitter_list:
try:
# create a new twitter list
list_id = \
self._twitter.create_list(name=''.join(random.choice(string.lowercase) for i in range(12)),
mode='public')['id']
# add it to the crawler data base
lists.append({'id': list_id, 'count': 0})
if self._scheduler:
self._scheduler.__put_data__(constants.TASK_FETCH_LISTS, {'id': list_id, 'count': 0})
# recurse and try to fill the new list
return self.fit_to_lists(candidates, lists, max_list_num, max_list_size, is_twitter_list)
except Exception as e:
print "Twitter List could not be created " + e.message
else:
# calculate how many users we can take into a bulk list
take = min(max_list_size, len(candidates))
new_list = {'ids': candidates[:take], 'count': take}
# create a new bulk list for our users and put them in it
lists.append(new_list)
if self._scheduler:
self._scheduler.__put_data__(constants.TASK_BULK_RETRIEVE, new_list)
# remove the users we just added
candidates = candidates[take:]
# recurse and attempt to fit them into the same or new list
return self.fit_to_lists(candidates, lists, max_list_num, max_list_size, is_twitter_list)
# return the remaining users which we could not add to any list
return candidates
def __follow_users__(self, candidates, users, following):
try:
if candidates and following < constants.MAX_FOLLOWABLE_USERS:
# follow all the remaining users which we couldn't find a place for
import time
import random
take = min(constants.TWITTER_MAX_FOLLOW_REQUESTS, len(candidates))
for i in xrange(take):
try:
self._twitter.create_friendship(user_id=candidates[i], follow=True)
time.sleep(random.randint(1,
15)) # sleep for random amount of seconds to avoid twitter thinking that we're automated
except:
print "Could not follow user " + str(candidates[i])
return candidates
candidates = candidates[take:]
if candidates:
# if there are some users remaining see if we have space to follow their timeline without following them
take = min(constants.TWITTER_MAX_NUMBER_OF_NON_FOLLOWED_USERS, len(candidates))
members = candidates[:take]
users += members
if self._scheduler:
self._scheduler.__put_data__(constants.TASK_FETCH_USER, members)
candidates = candidates[take:]
except Exception as e:
print e.message
return candidates
def __get_friends_and_followers__(self, id, rqs_remaining):
""" Fetches the friends or followers of a specific user """
users = []
try:
# check if we have any get id requests remaining if so get the data and decrement
if (rqs_remaining[0] > 0):
users += self._twitter.get_friends_ids(user_id=id)["ids"]
users += self._twitter.get_followers_ids(user_id=id)["ids"]
rqs_remaining[0] -= 1
# check if we have any get list of user requests remaining
elif (rqs_remaining[1] > 0):
# remove all users with less than 150 tweets or who have private accounts and add the remaining users' ids to the list
users += [x["id"] for x in user_filter(self._twitter.get_friends_list(user_id=id)["users"])]
users += [x["id"] for x in user_filter(self._twitter.get_followers_list(user_id=id)["users"])]
rqs_remaining[1] -= 1
except TwythonError as e:
if e.error_code == 404:
print "User " + str(id) + "could not be found"
else:
print e.message
except Exception as e1:
print "An error occurred while getting friends and followers " + e1.message
rqs_remaining[0] = 0
rqs_remaining[1] = 0
return users
# attempts to find new accounts to follow
def explore(self, args):
"""Find new users to follow"""
remaining = args.get('remaining', [])
candidates = []
self._list_attempts = 15
user_lists = args.get('user_lists', [])
bulk_lists = args.get('bulk_lists', [])
users = args.get('total_followed', [])
total_followed = copy.deepcopy(users)
try:
# get our suggested categories
print "collecting slugs"
slugs = self._twitter.get_user_suggestions()
data = self._twitter.verify_credentials()
# get the people who we are following
following = self._twitter.get_friends_ids(screen_name=data['screen_name'])
print "%d users followed online" % len(following['ids'])
total_followed += following['ids'] + ([i for sl in bulk_lists for i in sl['ids']] if bulk_lists else [])
print "%d total users followed offline" % (len(total_followed) - len(following['ids']))
# get the total number of twitter users that we follow including bulk list users and twitter list users as well as non followed users
ff_requests = [14, 15] # friends_ids and followers_ids requests remaining
for s in [random.choice(slugs) for i in xrange(15)]:
# get some suggested users in the given category
new_users = list()
try:
new_users = self._twitter.get_user_suggestions_by_slug(slug=s['slug'])['users']
except TwythonError as e:
if e.error_code == 404:
print "Slug " + s['name'] + " could not be found"
else:
print "Could not retrieve data for slug = " + str(s['name']) + " due to " + e.message
continue
new_users = sorted(new_users, key=lambda k: k['friends_count'])
new_users.reverse()
new_users = [u['id'] for u in user_filter(new_users)]
friends = []
if ff_requests[0] > 0 or ff_requests[1] > 0:
for u in new_users:
friends += self.__get_friends_and_followers__(u, ff_requests)
# get the users which we currently do not follow only
candidates += list(set(new_users + friends) - set(total_followed + remaining))
# try to fit some of the candidates into the twitter lists
print str(len(candidates)) + " new users found"
candidates = candidates + remaining
candidates_total = len(candidates)
candidates = self.fit_to_lists(candidates, user_lists,
constants.TWITTER_MAX_NUMBER_OF_LISTS * constants.TWITTER_CYCLES_PER_HOUR,
constants.TWITTER_MAX_LIST_SIZE, True)
print str(candidates_total - len(candidates)) + " users added to twitter lists"
candidates_total = len(candidates)
# try to fit some users into the bulk lists
candidates = self.fit_to_lists(candidates, bulk_lists,
constants.TWITTER_MAX_NUM_OF_BULK_LISTS_PER_REQUEST_CYCLE * c.TWITTER_CYCLES_PER_HOUR
* c.TWITTER_ACCOUNTS_COUNT,
constants.TWITTER_BULK_LIST_SIZE, False)
print str(candidates_total - len(candidates)) + " users added to bulk lists"
candidates_total = len(candidates)
self.__follow_users__(candidates, users, len(following))
print "%d users added to offline following" % (candidates_total - len(candidates))
print str(len(candidates)) + " users left unallocated"
except Exception as e:
print "Error while exploring " + e.message
# an error occurred see if we have any candidates
if candidates:
# if yes then store their credentials offline since the error might be from Twitter
candidates = self.fit_to_lists(candidates, bulk_lists,
constants.TWITTER_MAX_NUM_OF_BULK_LISTS_PER_REQUEST_CYCLE * c.TWITTER_CYCLES_PER_HOUR
* c.TWITTER_ACCOUNTS_COUNT,
constants.TWITTER_BULK_LIST_SIZE, False)
self.__follow_users__(candidates, users, constants.MAX_FOLLOWABLE_USERS)
if constants.TESTING:
return [] # we are testing so just exit
# save the data
st.save_data(user_lists, constants.TWITTER_LIST_STORAGE)
st.save_data(bulk_lists, constants.TWITTER_BULK_LIST_STORAGE)
st.save_data(users, constants.TWITTER_USER_STORAGE)
return candidates
# fetches the crawler's timeline id specifies the id of the last returned tweet, if since is true
# crawler returns tweets that were posted after the given id
def fetch_home_timeline(self, task_data):
data = []
max_attempts = 15 # 15 is the max number of requests we can send for our timeline
current_id = task_data["id"]
update_fq = 60
while max_attempts > 0:
temp_data = []
if current_id == 0:
# if first run
# get timeline
temp_data += self._twitter.get_home_timeline(count=200)
# set current_id to the last id since we will request the old timeline
current_id = temp_data[-1]["id"]
# set task id to the top id since next time we will request the up to date timeline
task_data["id"] = temp_data[0]["id"]
elif task_data.get('since', False):
# requesting only new tweets
# sleep for 1 min to allow the timeline to refresh
time.sleep(update_fq)
# get data
temp_data += self._twitter.get_home_timeline(since_id=current_id)
# update current id and task id
current_id = temp_data[0]["id"]
task_data["id"] = current_id
else:
# collecting data for old tweets
temp_data += self._twitter.get_home_timeline(max_id=current_id)
current_id = temp_data[-1]["id"]
data += temp_data
max_attempts -= 1
# tell worker that next time we want the new tweets only
task_data['since'] = True
if not c.TESTING:
st.save_data(task_data, constants.TWITTER_WALL_STORAGE)
return data
# fetches the specified user's timeline
def fetch_user_timeline(self, user):
# count is set to 200 since that's the max that twitter would actually return despite the docs saying 3200
if c.FRESH_TWEETS_ONLY:
return self._twitter.get_user_timeline(user_id=user)
else:
return self._twitter.get_user_timeline(user_id=user, count=200) # user = 900 , app = 1500
# fetches the tweets in the specified list of _users
def fetch_list_tweets(self, list):
return self._twitter.get_list_statuses(list_id=list['id']) # 900
# fetches the latest tweets of up to 100 _users
# as specified in the user_ids list
def fetch_bulk_tweets(self, user_ids):
data = self._twitter.lookup_user(user_id=','.join(map(lambda x: str(x), user_ids['ids'])),
include_entities=True) # user = 900, app = 300
# convert data from user info to tweet
ret_data = []
if isinstance(data,list):
for i in data:
if 'status' in i:
try:
tweet = i['status']
i.pop('status')
tweet['user'] = i
ret_data.append(tweet)
except:
print i
elif isinstance(data,dict):
if 'status' in data:
tweet = data['status']
data.pop('status')
tweet['user'] = data
ret_data.append(tweet)
return ret_data
def search(self, q_params):
max_attempts = 180 # can't make more than 180 requests ref:https://dev.twitter.com/rest/reference/get/search/tweets
query_max_length = 500 # twitter won't accept queries with more than 500 chars
data = []
keywords = q_params['keywords']
keywords.reverse() # reverse since we will be using the list as a stack
while max_attempts > 0 and len(keywords) > 0:
query = keywords.pop()
if len(keywords) > 1: #if we have more than 1 keyword then we have to make a query
current_length = len(query)
# keep adding words to our query string until the length of the query is equal to query_max_length
while current_length < query_max_length and len(keywords) > 0:
op = ' OR '
if current_length + len(op) + len(keywords[-1]) < query_max_length:
query += op + keywords.pop()
current_length = len(query)
else:
break
# keywords = ' OR '.join(keywords)
max_attempts -= 1
try:
data += self._twitter.search(q=query, count=100, include_entities=True).get('statuses', [])
except Exception as e:
print query
print "Search failed cause: %s" % e.message
return data
def get_trends(self, args):
trends = []
trend_filter = lambda data: map(lambda x: x['name'], data)
max_attempts = args.get('attempts', constants.MAX_TWITTER_TRENDS_REQUESTS)
if max_attempts == 0:
return []
if 'woeid' in args:
# search_queries for trends at a location with a specific where on earth id
try:
# go through left over woeids
for w in args['woeid'][:max_attempts]:
tmp = self._twitter.get_place_trends(id=w)
for t in tmp:
# filter each group of trends
trends += trend_filter(t['trends'])
max_attempts -= 1
except:
return trends
elif 'lat' in args and 'long' in args:
# search_queries for trends in a given geo box based on lat and long
try:
tmp = self._twitter.get_closest_trends(lat=args['lat'], long=args['long'])
for t in tmp:
trends += trend_filter(t['trends'])
except:
return trends
else:
# no location specified or the location is specified by name
# get relevant locations
locations = self._twitter.get_available_trends()
if 'location' in args:
# if any locations were specified by name filter the woeids based on region
loc = args['location'].lower()
locations = filter(lambda x: x['country'].lower() == loc or
x['name'].lower() == loc or
x['countryCode'].lower() == loc,
locations)
# get woeids
locations = map(lambda x: x['woeid'], locations)
# get trends
trends += self.get_trends(dict(woeid=locations, attempts=max_attempts))
return trends
class TumblrHandler:
def __init__(self, credentials):
self._client = pytumblr.TumblrRestClient(
credentials['oauth_key'], credentials['oauth_secret'], credentials['token'], credentials['token_secret']
)
self._rqs_per_cycle_remaining = constants.TUMBLR_MAX_REQUESTS_PER_HOUR
self._rqs_for_the_day = constants.TUMBLR_MAX_REQUESTS_PER_DAY
self._lock = threading.Lock()
def __dec_remaining_rqs__(self):
with self._lock:
self._rqs_per_cycle_remaining -= 1
self._rqs_for_the_day -= 1
def reset_requests(self):
"""Resets the hourly request quota to be called only by timer"""
if self._rqs_for_the_day > 0:
with self._lock:
self._rqs_per_cycle_remaining = constants.TUMBLR_MAX_REQUESTS_PER_HOUR
def reset_daily_requests(self):
"""Resets the daily request quoata to be called only by timer"""
with self._lock:
self._rqs_for_the_day = constants.TUMBLR_MAX_REQUESTS_PER_DAY
def __get_rqs_remaining__(self):
with self._lock:
ret = self._rqs_per_cycle_remaining
return ret
def get_dashboard(self, last_id):
""" Get the posts on the authenticated user's wall"""
if self.__get_rqs_remaining__() <= 0:
return []
data = self._client.dashboard()
while (data[-1]['id'] != last_id) or self.__get_rqs_remaining__() > 0:
data += self._client.dashboard()
self.__dec_remaining_rqs__()
return self._client.dashboard()
def follow(self, user_id):
self.__dec_remaining_rqs__()
self._client.follow(user_id)
def get_post_with_tag(self, tag_info):
"""Gets posts containing a specific tag or group of tags"""
if self.__get_rqs_remaining__() <= 0:
return []
self.__dec_remaining_rqs__()
return self._client.tagged(tag_info['tags'], filter=tag_info['filter'])
def get_blog_posts(self, blog):
"""Gets the posts from a specific blog"""
if self.__get_rqs_remaining__() <= 0:
return []
self.__dec_remaining_rqs__()
return self._client.posts(blog)
class FacebookHandler:
def __init__(self, token):
self._graph = facebook.GraphAPI(access_token=token, version='2.1')
self._rqs_per_cycle_remaining = constants.FACEBOOK_MAX_REQUESTS_PER_HOUR
self._rqs_for_the_day = constants.FACEBOOK_MAX_REQUESTS_PER_HOUR * 24
self._lock = threading.Lock()
def __dec_remaining_rqs__(self):
with self._lock:
self._rqs_per_cycle_remaining -= 1
self._rqs_for_the_day -= 1
def __dec_remaining_rqs_by__(self, amount):
with self._lock:
self._rqs_per_cycle_remaining -= amount
self._rqs_for_the_day -= amount
def __get_rqs_remaining__(self):
ret = 0
with self._lock:
ret = self._rqs_per_cycle_remaining
return ret
def reset_requests(self):
"""Resets the hourly facebook request quota to be called only by timer"""
if self._rqs_for_the_day > 0:
with self._lock:
self._rqs_per_cycle_remaining = constants.FACEBOOK_MAX_REQUESTS_PER_HOUR
def reset_daily_requests(self):
"""Resets the daily request quoata to be called only by timer"""
with self._lock:
self._rqs_for_the_day = constants.FACEBOOK_MAX_REQUESTS_PER_HOUR * 24
def get_posts_for_users(self, user_ids):
"""Fetches the posts made by a specific group of users"""
if len(user_ids) > self.__get_rqs_remaining__():
raise Rate_Limit_Error('Request will exceed Facebook rate limit')
# must decrease by number of all ids we requested cause facebook counts every id as a request
self.__dec_remaining_rqs_by__(len(user_ids))
url = 'https://graph.facebook.com/posts?ids=%s' % ','.join(user_ids)
parameters = {'access_token': self._graph.access_token}
r = requests.get(url, params=parameters)
result = json.loads(r.text)
return result
def get_my_wall(self, **kwargs):
""""Fetches the posts on the authenticated user's wall"""
if self.__get_rqs_remaining__() <= 0:
raise Rate_Limit_Error('Facebook rate limit reached')
self.__dec_remaining_rqs__()
return self._graph.get_object('me/posts')['data']
| 2087829p/smores | smores/handlers.py | Python | mit | 26,442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
("""
Usage:
img2txt.py <imgfile> [--maxLen=<n>] [--fontSize=<n>] [--color] [--ansi]"""
"""[--bgcolor=<#RRGGBB>] [--targetAspect=<n>] [--antialias] [--dither]
img2txt.py (-h | --help)
Options:
-h --help show this screen.
--ansi output an ANSI rendering of the image
--color output a colored HTML rendering of the image.
--antialias causes any resizing of the image to use antialiasing
--dither dither the colors to web palette. Useful when converting
images to ANSI (which has a limited color palette)
--fontSize=<n> sets font size (in pixels) when outputting HTML,
default: 7
--maxLen=<n> resize image so that larger of width or height matches
maxLen, default: 100px
--bgcolor=<#RRGGBB> if specified, is blended with transparent pixels to
produce the output. In ansi case, if no bgcolor set, a
fully transparent pixel is not drawn at all, partially
transparent pixels drawn as if opaque
--targetAspect=<n> resize image to this ratio of width to height. Default is
1.0 (no resize). For a typical terminal where height of a
character is 2x its width, you might want to try 0.5 here
""")
import sys
from docopt import docopt
from PIL import Image
import ansi
from graphics_util import alpha_blend
def HTMLColorToRGB(colorstring):
""" convert #RRGGBB to an (R, G, B) tuple """
colorstring = colorstring.strip()
if colorstring[0] == '#':
colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError(
"input #{0} is not in #RRGGBB format".format(colorstring))
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return (r, g, b)
def generate_HTML_for_image(pixels, width, height):
string = ""
# first go through the height, otherwise will rotate
for h in range(height):
for w in range(width):
rgba = pixels[w, h]
# TODO - could optimize output size by keeping span open until we
# hit end of line or different color/alpha
# Could also output just rgb (not rgba) when fully opaque - if
# fully opaque is prevalent in an image
# those saved characters would add up
string += ("<span style=\"color:rgba({0}, {1}, {2}, {3});\">"
"▇</span>").format(
rgba[0], rgba[1], rgba[2], rgba[3] / 255.0)
string += "\n"
return string
def generate_grayscale_for_image(pixels, width, height, bgcolor):
# grayscale
color = "MNHQ$OC?7>!:-;. "
string = ""
# first go through the height, otherwise will rotate
for h in range(height):
for w in range(width):
rgba = pixels[w, h]
# If partial transparency and we have a bgcolor, combine with bg
# color
if rgba[3] != 255 and bgcolor is not None:
rgba = alpha_blend(rgba, bgcolor)
# Throw away any alpha (either because bgcolor was partially
# transparent or had no bg color)
# Could make a case to choose character to draw based on alpha but
# not going to do that now...
rgb = rgba[:3]
string += color[int(sum(rgb) / 3.0 / 256.0 * 16)]
string += "\n"
return string
def load_and_resize_image(imgname, antialias, maxLen, aspectRatio):
if aspectRatio is None:
aspectRatio = 1.0
img = Image.open(imgname)
# force image to RGBA - deals with palettized images (e.g. gif) etc.
if img.mode != 'RGBA':
img = img.convert('RGBA')
# need to change the size of the image?
if maxLen is not None or aspectRatio != 1.0:
native_width, native_height = img.size
new_width = native_width
new_height = native_height
# First apply aspect ratio change (if any) - just need to adjust one axis
# so we'll do the height.
if aspectRatio != 1.0:
new_height = int(float(aspectRatio) * new_height)
# Now isotropically resize up or down (preserving aspect ratio) such that
# longer side of image is maxLen
if maxLen is not None:
rate = float(maxLen) / max(new_width, new_height)
new_width = int(rate * new_width)
new_height = int(rate * new_height)
if native_width != new_width or native_height != new_height:
img = img.resize((new_width, new_height), Image.ANTIALIAS if antialias else Image.NEAREST)
return img
def floydsteinberg_dither_to_web_palette(img):
# Note that alpha channel is thrown away - if you want to keep it you need to deal with it yourself
#
# Here's how it works:
# 1. Convert to RGB if needed - we can't go directly from RGBA because Image.convert will not dither in this case
# 2. Convert to P(alette) mode - this lets us kick in dithering.
# 3. Convert back to RGBA, which is where we want to be
#
# Admittedly converting back and forth requires more memory than just dithering directly
# in RGBA but that's how the library works and it isn't worth writing it ourselves
# or looking for an alternative given current perf needs.
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.convert(mode="P", matrix=None, dither=Image.FLOYDSTEINBERG, palette=Image.WEB, colors=256)
img = img.convert('RGBA')
return img
def dither_image_to_web_palette(img, bgcolor):
if bgcolor is not None:
# We know the background color so flatten the image and bg color together, thus getting rid of alpha
# This is important because as discussed below, dithering alpha doesn't work correctly.
img = Image.alpha_composite(Image.new("RGBA", img.size, bgcolor), img) # alpha blend onto image filled with bgcolor
dithered_img = floydsteinberg_dither_to_web_palette(img)
else:
"""
It is not possible to correctly dither in the presence of transparency without knowing the background
that the image will be composed onto. This is because dithering works by propagating error that is introduced
when we select _available_ colors that don't match the _desired_ colors. Without knowing the final color value
for a pixel, it is not possible to compute the error that must be propagated FROM it. If a pixel is fully or
partially transparent, we must know the background to determine the final color value. We can't even record
the incoming error for the pixel, and then later when/if we know the background compute the full error and
propagate that, because that error needs to propagate into the original color selection decisions for the other
pixels. Those decisions absorb error and are lossy. You can't later apply more error on top of those color
decisions and necessarily get the same results as applying that error INTO those decisions in the first place.
So having established that we could only handle transparency correctly at final draw-time, shouldn't we just
dither there instead of here? Well, if we don't know the background color here we don't know it there either.
So we can either not dither at all if we don't know the bg color, or make some approximation. We've chosen
the latter. We'll handle it here to make the drawing code simpler. So what is our approximation? We basically
just ignore any color changes dithering makes to pixels that have transparency, and prevent any error from being
propagated from those pixels. This is done by setting them all to black before dithering (using an exact-match
color in Floyd Steinberg dithering with a web-safe-palette will never cause a pixel to receive enough inbound error
to change color and thus will not propagate error), and then afterwards we set them back to their original values.
This means that transparent pixels are essentially not dithered - they ignore (and absorb) inbound error but they
keep their original colors. We could alternately play games with the alpha channel to try to propagate the error
values for transparent pixels through to when we do final drawing but it only works in certain cases and just isn't
worth the effort (which involves writing the dithering code ourselves for one thing).
"""
# Force image to RGBA if it isn't already - simplifies the rest of the code
if img.mode != 'RGBA':
img = img.convert('RGBA')
rgb_img = img.convert('RGB')
orig_pixels = img.load()
rgb_pixels = rgb_img.load()
width, height = img.size
for h in range(height): # set transparent pixels to black
for w in range(width):
if (orig_pixels[w, h])[3] != 255:
rgb_pixels[w, h] = (0, 0, 0) # bashing in a new value changes it!
dithered_img = floydsteinberg_dither_to_web_palette(rgb_img)
dithered_pixels = dithered_img.load() # must do it again
for h in range(height): # restore original RGBA for transparent pixels
for w in range(width):
if (orig_pixels[w, h])[3] != 255:
dithered_pixels[w, h] = orig_pixels[w, h] # bashing in a new value changes it!
return dithered_img
if __name__ == '__main__':
dct = docopt(__doc__)
imgname = dct['<imgfile>']
maxLen = dct['--maxLen']
clr = dct['--color']
do_ansi = dct['--ansi']
fontSize = dct['--fontSize']
bgcolor = dct['--bgcolor']
antialias = dct['--antialias']
dither = dct['--dither']
target_aspect_ratio = dct['--targetAspect']
try:
maxLen = float(maxLen)
except:
maxLen = 100.0 # default maxlen: 100px
try:
fontSize = int(fontSize)
except:
fontSize = 7
try:
# add fully opaque alpha value (255)
bgcolor = HTMLColorToRGB(bgcolor) + (255, )
except:
bgcolor = None
try:
target_aspect_ratio = float(target_aspect_ratio)
except:
target_aspect_ratio = 1.0 # default target_aspect_ratio: 1.0
try:
img = load_and_resize_image(imgname, antialias, maxLen, target_aspect_ratio)
except IOError:
exit("File not found: " + imgname)
# Dither _after_ resizing
if dither:
img = dither_image_to_web_palette(img, bgcolor)
# get pixels
pixel = img.load()
width, height = img.size
if do_ansi:
# Since the "current line" was not established by us, it has been
# filled with the current background color in the
# terminal. We have no ability to read the current background color
# so we want to refill the line with either
# the specified bg color or if none specified, the default bg color.
if bgcolor is not None:
# Note that we are making the assumption that the viewing terminal
# supports BCE (Background Color Erase) otherwise we're going to
# get the default bg color regardless. If a terminal doesn't
# support BCE you can output spaces but you'd need to know how many
# to output (too many and you get linewrap)
fill_string = ansi.getANSIbgstring_for_ANSIcolor(
ansi.getANSIcolor_for_rgb(bgcolor))
else:
# reset bg to default (if we want to support terminals that can't
# handle this will need to instead use 0m which clears fg too and
# then when using this reset prior_fg_color to None too
fill_string = "\x1b[49m"
fill_string += "\x1b[K" # does not move the cursor
sys.stdout.write(fill_string)
sys.stdout.write(
ansi.generate_ANSI_from_pixels(pixel, width, height, bgcolor)[0])
# Undo residual color changes, output newline because
# generate_ANSI_from_pixels does not do so
# removes all attributes (formatting and colors)
sys.stdout.write("\x1b[0m\n")
else:
if clr:
# TODO - should handle bgcolor - probably by setting it as BG on
# the CSS for the pre
string = generate_HTML_for_image(pixel, width, height)
else:
string = generate_grayscale_for_image(
pixel, width, height, bgcolor)
# wrap with html
template = """<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<style type="text/css" media="all">
pre {
white-space: pre-wrap; /* css-3 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
font-family: 'Menlo', 'Courier New', 'Consola';
line-height: 1.0;
font-size: %dpx;
}
</style>
</head>
<body>
<pre>%s</pre>
</body>
</html>
"""
html = template % (fontSize, string)
sys.stdout.write(html)
sys.stdout.flush()
| mackay/ble_detector | img2txt/_img2txt.py | Python | mit | 13,718 |
# ======================================================================
import numpy
vb = False
# Age of the Universe:
t0 = 13.73 # Gyr, XXX et al XXXX
t0err = 0.15
# Units etc:
Gyr2sec = 1e9*365.25*24*3600
Mpc2km = 1e6*3.0856e16/1e3
kms2MpcperGyr = Gyr2sec/Mpc2km
# print "To convert km/s to Mpc/Gyr, multiply by",kms2MpcperGyr
G = 4.30e-9 # km^2 s^-2 Mpc Msun^-1
# Solver limits:
tiny = numpy.pi/100.0
# Radial approach:
# Not setting upper limit too close to 2pi was key...
hardxlimits = numpy.array([numpy.pi+tiny,1.9*numpy.pi-tiny])
# Non-radial approach:
# e between 0 and 1 for a closed orbit:
hardelimits = numpy.array([tiny,1.0-tiny])
# ======================================================================
# The timing argument Local Group mass estimate.
def mass(D,vr,vt=None,approach='radial',t0scatter=False):
global fr_constant,ft_constant
n = len(vr)
# Add scatter to t0?
if t0scatter:
t = numpy.random.normal(t0,t0err,n)
else:
t = t0
if (approach == 'radial'):
fr_constant = (vr * kms2MpcperGyr) * t0 / D
xlimits = numpy.outer(numpy.ones(n),hardxlimits)
x = solve(fr,xlimits)
# No solution - hide low masses when plotting:
M = numpy.ones(n)*10.0**9.5
a = numpy.ones(n)*999.0
e = numpy.ones(n)
solved = numpy.where(x > hardxlimits[0])
# Sub back for M and a:
M[solved] = vr[solved]*vr[solved]*D[solved]/(G*(1.0+numpy.cos(x[solved])))
a[solved] = D[solved]/(1.0 - numpy.cos(x[solved]))
else:
# Include tangential speed vt in orbit calculation...
ft_constant = numpy.sqrt(vr*vr + vt*vt) * kms2MpcperGyr * t0 / D
elimits = numpy.outer(numpy.ones(n),hardelimits)
e = solve(ft,elimits)
# No solution:
M = numpy.ones(n)*10.0**10.5
a = numpy.ones(n)*999.0
x = numpy.zeros(n)
esinx = numpy.ones(n)
ecosx = numpy.ones(n)
solved = numpy.where(e > hardelimits[0])
# Sub back for chi, M and a:
sinx[solved] = (vr[solved]/vt[solved])*numpy.sqrt(1.0-e[solved]*e[solved])/e[solved]
ecosx[solved] = e[solved]*numpy.sqrt(1.0-sinx[solved]*sinx[solved])
x[solved] = numpy.asin(sinx[solved])
a[solved] = D[solved]/(1.0 - ecosx[solved])
M[solved] = (vr[solved]*vr[solved]+vt[solved]*vt[solved])*D[solved]/(G*(1.0+ecosx[solved]))
return M,a,x,e
# ----------------------------------------------------------------------
# Function (of chi) to solve in radial approach case.
def fr(x,i):
global fr_constant
t = fr_constant[i] - numpy.sin(x[i])*(x[i]-numpy.sin(x[i]))/(1.0-numpy.cos(x[i]))**2
if vb: print " In fr: x,f = ",x[i],t
return t
# ----------------------------------------------------------------------
# Function (of e) to solve in non-radial approach case.
# UNTESTED
def ft(e,i):
global ft_constant,vr,vt
sinx = (vr/vt)*numpy.sqrt(1.0-e*e)/e # Closed orbit requires |sinx| < 1
cosx = numpy.sqrt(1.0-sinx*sinx)
# Warning - no guarantee |sinx| < 1...
x = arcsin(sinx) + 2.0*numpy.pi # to ensure [pi:2pi]
t = ft_constant[i] - (x[i] - (e[i]*sinx[i])/(1.0-e[i]*cos(x[i])))*numpy.sqrt((1.0+e[i]*cosx[i])/(1.0-e[i]*cosx[i]))
if vb: print " In ft: e,f = ",e[i],t
return t
# ----------------------------------------------------------------------
# Simple bisection rootfinder. Note that x is a vector.
def solve(func,xlimits,tol=1e-5):
# Initialise arrays:
x0 = xlimits[:,0]
x1 = xlimits[:,1]
n = 1.0*len(x0)
fb = numpy.ones(n)*tol*1000
u = numpy.where(fb > tol) # index marking unsolved systems, here
# used to pick out all systems at start.
f0 = func(x0,u)
f1 = func(x1,u)
df = f1-f0
xb = numpy.zeros(n)
u = numpy.where(f0*f1 > 0.0) # index marking unsolved systems, here
# used to flag unsolvable systems.
fb[u] = 0.0
xb[u] = x0[u]*f1[u]/df[u] - x1[u]*f0[u]/df[u]
u = numpy.where(numpy.abs(fb) > tol) # index marking unsolved systems
if vb: print " solve: fb = ",fb, "tol = ",tol
if vb: print " solve: still working on",u,u[0]
# Now do bisections, updating either x0 or x1 until fb hits tol in
# each case - only have to calculate for the u-indexed systems:
i = 0
while len(u[0]) > 0:
i += 1
if vb: print " solve: iteration",i,", unsolved count = ",len(u[0])
# Evaluate function at current limits, and reset unfinished index:
f0[u] = func(x0,u)
f1[u] = func(x1,u)
df[u] = f1[u] - f0[u]
xb[u] = x0[u]*f1[u]/df[u] - x1[u]*f0[u]/df[u]
fb[u] = func(xb,u)
un = (numpy.abs(fb) > tol)
u = numpy.where(un)
if vb: print " solve: xb = ",xb
if vb: print " solve: fb = ",fb, "tol = ",tol
if vb: print " solve: still working on",u,u[0]
# Update limits to better bracket the roots:
m0 = numpy.where((un) & (numpy.sign(fb) == numpy.sign(f0)))
m1 = numpy.where((un) & (numpy.sign(fb) == numpy.sign(f1)))
x0[m0] = xb[m0]
x1[m1] = xb[m1]
if vb: print " solve: updated x0,x1 = ",x0,x1
return xb
# ======================================================================
# Testing:
if __name__ == '__main__':
vr = numpy.array([-120.0,-130.0,-140.0])
D = numpy.array([700.0,800.0,900.0])/1000.0
vb = False
print "Radial velocity vr/kms^-1 = ",vr
print "Distance D/Mpc = ",D
M,a,chi = mass(vr,D,t0scatter=True)
print "Mass estimates M/Msun = ",M
print "Semi-major axes a/Mpc = ",a
print "Eccentric anomalies chi/pi = ",chi/numpy.pi
| drphilmarshall/LocalGroupHaloProps | localgroup/timingargument.py | Python | gpl-2.0 | 5,676 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Create a CSV with each race since 1990, with the rid and the Wikidata qid.
"""
import os
import requests
from bs4 import BeautifulSoup
import re
import csv
race_qids= {}
root_dir = os.environ['HOME'] + "/Dropbox/finnmarkslopet/"
with open(root_dir + 'finnmarkslopet-qid-temp.csv', 'r') as csv_in:
reader = csv.reader(csv_in)
for row in reader:
race_qids[row[1]] = row[0]
csv_in.closed
root_url = 'http://www.finnmarkslopet.no'
index_url = root_url + '/rhist/results.jsp?lang=en'
response = requests.get(index_url)
soup = BeautifulSoup(response.text)
table = soup.select('.winners tr')
table.pop(0)
with open(root_dir + 'finnmarkslopet-qid.csv', 'w') as csv_out:
fieldnames = ["race", "rid", "qid"]
writer = csv.DictWriter(csv_out, fieldnames=fieldnames)
writer.writeheader()
for row in table:
year = row.strong.string
links = row.find_all("a")
writer.writerow({
'race': year + ' FL1000',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[0].get('href')).group("id"),
'qid':race_qids[year + " FL1000"]
});
if len(links) > 1:
writer.writerow({
'race': year + ' FL500',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[1].get('href')).group("id"),
'qid':race_qids[year + " FL500"]
});
if len(links) == 3:
writer.writerow({
'race': year + ' FL Junior',
'rid': re.search("openRaceWnd\('(?P<id>[0-9]*)'\)", links[2].get('href')).group("id"),
'qid':race_qids[year + " FL Junior"]
});
csv_out.closed | Ash-Crow/mush-bot | finnmarkslopet-scraper.py | Python | bsd-3-clause | 1,539 |
"""
pyphoto.flickr_uploader
~~~~~~~~~~~~~~~~
Uploads photos to flickr.
:copyright: (c) 2013 by Michael Luckeneder.
"""
from __future__ import absolute_import
import flickrapi
import logging
from urllib2 import HTTPError
from flickrapi.exceptions import FlickrError
from .retrier import retrier
from functools import partial
LOG = logging.getLogger()
class FlickrUploader(object):
"""represents a flickr instance"""
def __init__(self, **kwargs):
self.sets = None
self.photos = []
self.flickr_key = kwargs.pop('flickr_key')
self.flickr_secret = kwargs.pop('flickr_secret')
self.flickr_namespace = kwargs.get('flickr_namespace', "pyphoto")
self.pyphoto_setid = None
# get auth token and update sets, photos
self.flickr = self._authorize_flickr()
self.update_from_server()
def update_from_server(self):
"""update set sand photos from flickr"""
self.sets = self.get_sets()
self.photos = self.get_photos()
@retrier
def get_sets(self):
"""get a dict of sets
:return: dict()
"""
# get response
sets_response = self.flickr.photosets_getList()
set_list = list(sets_response)[0]
sets = {}
# generate set list
for photo_set in set_list.iter('photoset'):
title = photo_set.find('title').text
description = photo_set.find('description').text
set_info = {'id': photo_set.attrib['id'],
'date_update': photo_set.attrib['date_update'],
'description': description}
sets[title] = set_info
return sets
@retrier
def upload_file(self, filename, guid):
"""uploads an image with filename and guid"""
res = self.flickr.upload(filename=filename,
title=guid,
description=guid,
is_public=0,
is_family=0,
is_friend=0)
# return the flickr id of the photo
return res.find('photoid').text
def get_photos(self, items_per_page=500):
"""get a list of photos
:return: dict()
"""
# TODO compress
if self.flickr_namespace not in self.sets:
return []
# monkey patch for flickrapi problem
pages = partial(self.flickr.photosets_getPhotos,
photoset_id=self.sets[self.flickr_namespace]["id"],
per_page=items_per_page)
pages = retrier(pages)
num_pages = page = 1
photos = {}
while page <= num_pages:
LOG.debug("Retrieving page %i of set %s",
page, self.flickr_namespace)
res = pages(page=page)
num_pages = int(res.find('photoset').get('pages'))
page += 1
for photo in list(res)[0].iter('photo'):
photos[photo.attrib['title']] = photo.attrib
# photo_list.append(list(res.find('photo')))
# for photo in res.find('photos'):
# if photo.get('title') == guid:
# return photo.get('id')
# return None
# photo_list = list(photos)[0]
# photos = {}
# for photo in photo_list:
# photos[photo.attrib['title']] = photo.attrib
return photos
def get_photos_for_set(self, photoset_id):
"""get a list of photos
:return: dict()
"""
# monkey patch for flickrapi problem
pages = partial(self.flickr.photosets_getPhotos,
photoset_id=photoset_id,
per_page=500)
pages = retrier(pages)
num_pages = page = 1
photos = {}
while page <= num_pages:
LOG.debug("Retrieving page %i of set %s",
page, photoset_id)
res = pages(page=page)
num_pages = int(res.find('photoset').get('pages'))
page += 1
for photo in list(res)[0].iter('photo'):
photos[photo.attrib['title']] = photo.attrib
# photo_list.append(list(res.find('photo')))
# for photo in res.find('photos'):
# if photo.get('title') == guid:
# return photo.get('id')
# return None
# photo_list = list(photos)[0]
# photos = {}
# for photo in photo_list:
# photos[photo.attrib['title']] = photo.attrib
return photos
@retrier
def create_set(self, title, photo_id):
"""docstring"""
if not (title and photo_id):
return False
res = self.flickr.photosets_create(title=title,
primary_photo_id=photo_id)
self.update_from_server()
return res.find('photoset').attrib['id']
@retrier
def add_photo_to_set(self, setid, photoid):
"""docstring"""
self.flickr.photosets_addPhoto(photoset_id=setid,
photo_id=photoid)
return True
def get_photo_by_guid(self, guid):
"""docstring"""
if not guid in self.photos:
return None
return self.photos[guid]["id"]
# monkey patch for flickrapi problem
# pages = partial(self.flickr.photos_search, user_id="me", per_page=500)
# num_pages = page = 1
# while page <= num_pages:
# LOG.debug("Retrieving page %i" % (page))
# res = pages(page=page)
# num_pages = int(res.find('photos').get('pages'))
# page += 1
# for photo in res.find('photos'):
# if photo.get('title') == guid:
# return photo.get('id')
# return None
def delete_orphaned_photos(self):
"""delete photos that were uploaded but don't exist
in any set
"""
LOG.info("deleting orphaned photos")
set_photos = []
for k, v in self.sets.items():
if k == self.flickr_namespace:
continue
set_photos.extend(self.get_photos_for_set(v["id"]))
orphaned_photos = [p for p in self.photos if p not in set_photos]
for photoid in orphaned_photos:
LOG.info("delete photo %s", photoid)
self.delete_photo_by_guid(photoid)
if len(orphaned_photos) > 0:
self.update_from_server()
def delete_photo_by_guid(self, guid):
"""deletes a photo by GUID string"""
photo_id = self.get_photo_by_guid(guid)
if not photo_id:
return None
retval = self.flickr.photos_delete(photo_id=photo_id)
return retval
def _authorize_flickr(self, perms="delete"):
"""taken from flickrapi source, generates auth token and authorizes
everything
"""
flickr = flickrapi.FlickrAPI(self.flickr_key, self.flickr_secret)
(token, frob) = flickr.get_token_part_one(perms=perms)
if not token:
raw_input("Press ENTER after you authorized this program")
token = flickr.get_token_part_two((token, frob))
return flickrapi.FlickrAPI(self.flickr_key, self.flickr_secret,
token=token)
# def _initialize_main_set(self):
def upload_photo(self, guid, setname, photopath):
"""upload a photo and handle set issues"""
photoid = None
photo_exists = False
if guid not in self.photos:
LOG.info(" Uploading Image: %s",
guid)
photoid = self.upload_file(photopath, guid)
else:
LOG.info(" Image exists: %s", guid)
photo_exists = True
photoid = self.get_photo_by_guid(guid)
# if pyphoto set doesn't exist yet
if self.flickr_namespace not in self.sets.keys():
self.pyphoto_setid = self.create_set(
self.flickr_namespace, photoid)
elif not photo_exists:
if not self.pyphoto_setid:
self.pyphoto_setid = self.sets[self.flickr_namespace]["id"]
self.add_photo_to_set(self.pyphoto_setid, photoid)
if setname not in self.sets.keys():
LOG.debug("Creating new set: %s", setname)
setid = self.create_set(setname, photoid)
elif not photo_exists:
setid = self.sets[setname]["id"]
self.add_photo_to_set(setid, photoid)
| optiminimalist/pyphoto | pyphoto/flickr_uploader.py | Python | mit | 8,573 |
#!usr/bin/python
'''
Overlay classes and satellite data
'''
import os,csv,json
import utils.gdal_rasterize as gdal_rasterize
from PIL import Image
from functools import partial
from multiprocessing import Pool
from utils.coordinate_converter import CoordConvert
from utils.getImageCoordinates import imageCoordinates
from utils.project import project,projectRev
from modules.getFeatures import latLon,find_between,find_before
from modules.get_stats import get_stats
from libs.colorlist import colorlist
from libs.foldernames import *
def rasterLayer(i,stats,subpath,size,te):
'''converts feature geojson to png image'''
feature=str(stats[i])
i+=1
print "Layer "+str(i)+"/"+str(len(stats))+'\t Processing feature: '+feature
outFile=subpath+"/f_"+str(i)+".png"
featureFile =subpath+"/"+featureDataFolder+"/f_"+str(i)+".json"
try:
os.remove(outFile)
except OSError:
pass
gdal_rasterize.rasterize( \
featureFile,
outFile,
ts=size, # set resolution
out_format='PNG',
init=0,
te=te,
burn_values=[i]) # set image limits in te
#make 0s transparent to prepare for merge
img = Image.open(outFile)
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append((0, 0, 0, 0))
else:
newData.append(item)
img.putdata(newData)
img.save(outFile, "PNG")
#os.remove(featureFile)
def createLayer(i,stats,subpath,inputFile,key):
'''creates sub geojson files with only one feature property'''
feature=stats[i]
i+=1
print "Processing feature:",feature,i,"/",len(stats)
featureFile = subpath+"/"+featureDataFolder+"/f_"+str(i)+".json"
if not os.path.isfile(featureFile):
print "\tOpening input file... ",
with open(inputFile,'r') as f:
elementsInstance=json.load(f)
print "Done."
#delete every item that is not feature
cntdel=0
print "\tExtracting layers... ",
for i in range(0,len(elementsInstance['features'])):
#print elementsInstance['features'][cntdel]['properties']
if str(elementsInstance['features'][cntdel]['properties'][key])!=feature:
#print "del", elementsInstance['features'][cnt_featureelement]
del elementsInstance['features'][cntdel]
else:
cntdel+=1
#for i in range(0,len(elementsInstance['features'])):
# print elementsInstance['features'][i]['properties']
print "Done."
print "\tStoring layer file... ",
try:
os.remove(featureFile)
except OSError:
pass
with open(featureFile,'w+') as f:
json.dump(elementsInstance,f)
print "Done."
else:
print "\tFile",featureFile,"already exists."
def overlay(outputFolder,inputFile,xpixel=480,ypixel=360,zoomLevel=None,lonshift=0,latshift=0,
shiftformat=1,top=10,stats=None,count=None,key='Descriptio',epsg=None,
elements=None,randomImages=False,sat=None):
'''
Overlays images in satiImageFolder
with data in inputFile
'''
if not zoomLevel:
print "Warning: Zoom level not set. Assuming zoom level 17."
zoomLevel = 17
#Get list of images
samples_data = {}
#set outputFolder to directory above the /sat directory
if outputFolder[-1]=="/":
outputFolder=outputFolder[0:-1]
if outputFolder[-3:]==satDataFolder[1:-1]:
outputFolder=outputFolder[0:-4]
#Make directory for subfiles
subpath=outputFolder+"/"+os.path.split(inputFile)[-1][:-5]
for subsubpath in ['',trainingDataFolder,checkDataFolder,testDataFolder,featureDataFolder]:
if not os.path.isdir(subpath+subsubpath):
os.mkdir(subpath+subsubpath)
print 'Directory',subpath+subsubpath,'created'
#load data and check if images in folder
#has to be png image and start with input filename
if sat: #if sat folder externally provided
sat = sat+'/'
else:
sat = subpath+satDataFolder
listImages=os.listdir(sat)
if epsg!=9999:
image_files = [f for f in listImages if f.endswith('.png') \
and f.startswith(os.path.split(inputFile)[-1][:-5])]
else:
image_files = [f for f in listImages if f.endswith('.png')]
if len(image_files)==0:
print "Error: No images found in",sat[0:-1]
exit()
else:
print "Number of images found:",len(image_files)
if count:
print "Create",count,"images"
else:
print "Create",len(image_files),"images"
print 'Get GIS elements...'
if not elements:
print 'Opening %s...' % inputFile
with open(inputFile, 'r') as f:
elements = json.load(f)
#Get statistics if not in input
if not stats:
stats,freq,_=get_stats(inputFile,top,verbose=True,key=key,\
elements=elements)
else:
freq=None
#Create json-file for each layer
print "Create layer files..."
if len(stats)>0:
if os.path.getsize(inputFile)>220000000:
print "Very large input file of size ~",\
int(os.path.getsize(inputFile)/1000000),"MB"
print "Clearing memory...",
elements=True
for i in range(0,len(stats)):
createLayer(i,stats,subpath,inputFile,key)
print 'Reopening %s...' % inputFile
with open(inputFile, 'r') as f:
elements = json.load(f)
#initialize multi-core processing if file size not too large
else:
pool = Pool()
print 'Map to cores...'
#create subfile for each feature
#pool only takes 1-argument functions
partial_createLayer=partial\
(createLayer,stats=stats,subpath=subpath,inputFile=inputFile,key=key)
pool.map(partial_createLayer, range(0,len(stats)))
pool.close()
pool.join()
else: #empty feature map
print "No features found. Exiting."
#exit()
stats = [0] #set feature length to 1
featureFile = subpath+"/"+featureDataFolder+"/f_1.json"
emptyjson=\
'''{
"type": "FeatureCollection",
"features": []
}'''
with open(featureFile,"w+") as f:
f.write(emptyjson)
print "Layer files created..."
#Get coordinate system
myCoordConvert = CoordConvert()
code=myCoordConvert.getCoordSystem(elements,epsg)
#Get imageconverter
myImageCoord=imageCoordinates(xpixel,ypixel,'libs/zoomLevelResolution.csv',zoomLevel)
av_lats=[]
av_lons=[]
cnt = 1
for image in image_files:
#rasterize corresponding data
print ''
if count: #abort if maximum limit set and cnt above maximum limit
if cnt>count:
break
# The index is between the last underscore and the extension dot
print str(cnt)+'/'+str(len(image_files))
index = int(find_between(image,"_",".png"))
if randomImages: #get coordinates for random images
av_lon=None
with open(sat+"meta.csv","rb") as csvfile:
coordFile = list(csv.reader(csvfile,delimiter=",",quotechar='"'))
for coord in coordFile:
if coord[0]==str(index):
av_lon=coord[1]
av_lat=coord[2]
break
if not av_lon:
av_lon,av_lat=latLon(elements['features'][index]) # get center points
else:
if code != 9999:
av_lon,av_lat=latLon(elements['features'][index]) # get center points
else:
image_index=find_before(image,'___')
av_lon=int(find_between(image,'___','_',True))
av_lat=int(find_between(image,'_','.png',False))
#Convert to standard format
print code
if code != 4319: # if not already in wgs84 standard format
if code != 9999:
lotlan= myCoordConvert.convert(av_lon,av_lat)
longitude=lotlan[0]
latitude=lotlan[1]
else:
with open(imgsizefile,"rb") as csvfile:
imgSizes= list(csv.reader(csvfile,delimiter=",",quotechar='"'))
for imgSize in imgSizes:
if imgSize[0]==image_index:
W=int(imgSize[1])
H=int(imgSize[2])
break
lotlan_init= projectRev(av_lon,av_lat,image_index,'.',W,H)
longitude=lotlan_init[0]
latitude=lotlan_init[1]
lotlan_b= projectRev(av_lon+xpixel,av_lat+ypixel,image_index,'.',W,H)
longitude_b=lotlan_b[0]
latitude_b=lotlan_b[1]
else: #if already in wgs84 format
latitude= av_lat
longitude= av_lot
print "Coordinates WSG84: "+str(longitude)+','+str(latitude)
if (av_lon != longitude) and (av_lat != latitude):
print "Coordinates Native: "+str(av_lon)+','+str(av_lat)
#Calculate image coordinates in WSG 84
if code!=9999:
image_box_lat,image_box_lon= myImageCoord.getImageCoord(latitude,longitude)
else:
image_box_lat=[latitude,latitude,latitude_b,latitude_b]
image_box_lon=[longitude,longitude_b,longitude,longitude_b]
#print 'Coordinates:',latitude,longitude
#Convert back to original format
if code != 4319: # if not already in wgs84 standard format
if code != 9999:
image_box=\
myCoordConvert.convertBack(image_box_lon,image_box_lat)
else:
image_box=[image_box_lon,image_box_lat]
else:
image_box=[image_box_lon,image_box_lat]
image_lat = image_box[1]
image_lon = image_box[0]
#print "Coordinates Native corner: "+str(image_lon[0])+','+str(image_lat[0])
#print "Coordinates WSG84 corner: "+str(image_box_lon[0])+','+str(image_box_lat[0])
cnt+=1
tifile=subpath+trainingDataFolder+\
os.path.split(image)[-1][0:-4]+"train.png" #path for raster tif file
print 'Converting',image,'to',os.path.split(tifile)[-1]
#shift factor
west=(image_lon[0]+image_lon[2])/2
south=min(image_lat)
east=(image_lon[1]+image_lon[3])/2
north=max(image_lat)
if shiftformat == 0: #if fraction as shift unit
lonshift_calc=lonshift*abs(east-west)
latshift_calc=latshift*abs(north-south)
else:
lonshift_calc=lonshift
latshift_calc=latshift
print "Shift applied:"
print "Longitudinal \t",lonshift_calc
print "Lateral \t",latshift_calc
#set rasterize settings
size=[xpixel,ypixel]
te=[west-lonshift_calc,south-latshift_calc,\
east-lonshift_calc,north-latshift_calc] #image bounderies
print te
#print te
print "Image bounderies:"
print str(image_box_lon[0])[:-5],'\t',\
str(image_box_lat[0])[:-5],'\t----\t----\t----\t----\t----\t----',\
str(image_box_lon[1])[:-5],'\t',str(image_box_lat[1])[:-5]
print '\t|\t\t\t\t\t\t\t\t\t\t|\t'
print '\t|\t\t\t\t\t\t\t\t\t\t|\t'
print str(image_box_lon[2])[:-5],'\t',\
str(image_box_lat[2])[:-5],'\t----\t----\t----\t----\t----\t----',\
str(image_box_lon[3])[:-5],'\t',str(image_box_lat[3])[:-5]
#rasterize
#rasterLayer(0,stats,subpath,size,te)
if os.path.getsize(inputFile)>500000000:
print "Very large input file size ~",\
int(os.path.getsize(inputFile)/1000000),"MB"
for i in range(0,len(stats)):
rasterLayer(i,stats,subpath,size,te)
else:
pool = Pool()
print 'Map to cores...'
#pool only takes 1-argument functions, so create partial function
partial_rasterLayer=\
partial(rasterLayer,stats=stats,subpath=subpath,size=size,te=te)
pool.map(partial_rasterLayer, range(0,len(stats)))
pool.close()
pool.join()
#create output file
try: #remove output file, if it already exists
os.remove(tifile)
except OSError:
pass
#merge first two pictures
print "Merging images..."
imgFile=subpath+"/f_"+str(1)+".png"
background = Image.open(imgFile)
if len(stats)>1:
imgFile=subpath+"/f_"+str(2)+".png"
foreground = Image.open(imgFile)
background.paste(foreground, (0, 0), foreground)
background.save(tifile)
if len(stats)>2:
for i in range(3,len(stats)+1):
imgFile=subpath+"/f_"+str(i)+".png"
background = Image.open(tifile)
foreground = Image.open(imgFile)
background.paste(foreground, (0, 0), foreground)
background.save(tifile)
#Create test images for visual checks
checkfile=subpath+checkDataFolder+os.path.split(image)[-1][0:-4]+"check.png" #path for check files
try:
os.remove(checkfile)
except OSError:
pass
background = Image.open(sat+image)
brightened = Image.open(tifile)
#brighten up visual images for check file
#make 0s transparent to prepare for merge
datas = brightened.getdata()
newData = []
print "Creating check image..."
try:
for item in datas:
for i in range(0,len(stats)):
if item[0] == 0 and item[1] == 0 and item[2] == 0 and item[3]==0:
newData.append(item)
break
elif item[0] == i and item[1] == i and item[2] == i and item[3]==255:
newData.append(colorlist[i-1])
break
brightened.putdata(newData)
except IndexError:
print "Warning: Color list for visual check file too short"
background.paste(brightened, (0, 0), brightened)
background.save(checkfile)
#convert back to grayscale
img = Image.open(tifile)
img = img.convert("L")
img.save(tifile, "PNG")
print "Class label image",tifile," and check image created."
#Clean up side data
print "Cleanup..."
for i in range(1,len(stats)+1):
# try:
# os.remove(subpath+"/f_"+str(i)+".json")
# except OSError:
# pass
try:
os.remove(subpath+"/f_"+str(i)+".png")
except OSError:
pass
try:
os.remove(subpath+"/f_"+str(i)+".png.aux.xml")
except OSError:
pass
print "Overlaying done."
return stats,freq
# try:
# os.rmdir(subpath)
# except OSError:
# pass
| worldbank/cv4ag | modules/overlay.py | Python | mit | 12,673 |
__all__ = ["GentooServiceProvider"]
from kokki.providers.service import ServiceProvider
class GentooServiceProvider(ServiceProvider):
def enable_runlevel(self, runlevel):
pass
| samuel/kokki | kokki/providers/service/gentoo.py | Python | bsd-3-clause | 191 |
import requests
try:
from json.decoder import JSONDecodeError
except ImportError:
class JSONDecodeError(ValueError):
pass
from ..constants import OK, WARNING, ERROR
from ..decorators import timed
from . import Resource
class Service(Resource):
"""
Checks the status of a service with an HTTP GET request.
:param url: The URL of the service's status endpoint.
:param headers: A dictionary of headers to include with the check.
:param timeout: The number of seconds before an error is returned.
"""
def __init__(self, name, url, headers=None, timeout=1.0, **kwargs):
self.url = url
self.timeout = timeout
self.headers = headers
super(Service, self).__init__(name=name, description=url, **kwargs)
@timed
def check(self):
result = super(Service, self).check()
try:
self.last_response = None
self.last_response = self.get_http_response()
self.last_response.raise_for_status()
return dict(result, status=OK)
except requests.RequestException as e:
return dict(result, status=ERROR, error=str(e))
def get_http_response(self):
return requests.get(
self.url,
headers=self.headers,
timeout=self.timeout
)
class ServiceWithCanary(Service):
"""
Checks the status of a service with a canary endpoint.
Reports an error if no JSON can be decoded from the remote canary.
:param url: The URL of the service's canary endpoint.
"""
def check(self):
result = super(ServiceWithCanary, self).check()
status = result.get('status')
try:
service_result = self.last_response.json()
service_status = service_result.get('status')
if status == OK and service_status == WARNING:
status = WARNING
elif service_status == ERROR:
status = ERROR
return dict(result, status=status, result=service_result)
except JSONDecodeError:
error_message = 'No JSON object could be decoded'
return dict(result, status=ERROR, result=None, error=error_message)
except (AttributeError, ValueError) as e:
return dict(result, status=ERROR, result=None, error=str(e))
| TabbedOut/django_canary_endpoint | canary_endpoint/resources/services.py | Python | mit | 2,339 |
# Yahoo! OAuth Credentials - http://developer.yahoo.com/dashboard/
# Credentials for an App hunter registered to be used only for test
CONSUMER_KEY = 'dj0yJmk9clJFN2N6bTRSM3E4JmQ9WVdrOWJHSkdhamRFTm1VbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD0zZQ--'
CONSUMER_SECRET = 'f9545904f2cc39f67104031c32ef83dfe37c6570'
APPLICATION_ID = 'lbFj7D6e'
CALLBACK_URL = 'localhost.owens.coffee'
from rauth import OAuth1Service
yahoo = OAuth1Service(
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
name='yahoo', access_token_url= 'https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url= "https://api.login.yahoo.com/oauth/v2/request_auth" ,
request_token_url= "https://api.login.yahoo.com/oauth/v2/get_request_token",
base_url='https://api.login.yahoo.com/oauth/v2/')
request_token, request_token_secret = yahoo.get_request_token(data = { 'oauth_callback': "oob" })
print "Request Token:"
print " - oauth_token = %s" % request_token
print " - oauth_token_secret = %s" % request_token_secret
auth_url = yahoo.get_authorize_url(request_token)
print 'Visit this URL in your browser: ' + auth_url
pin = raw_input('Enter PIN from browser: ')
session = yahoo.get_auth_session(request_token, request_token_secret, method='POST', data={'oauth_verifier': pin}) | nsmader/fantasy-manager-bot | make_connection.py | Python | gpl-3.0 | 1,281 |
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from skin import parseColor, parseFont
from enigma import eListboxServiceContent, eListbox, eServiceCenter, eServiceReference, gFont, eRect
from Tools.LoadPixmap import LoadPixmap
from Tools.TextBoundary import getTextBoundarySize
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Components.Renderer.Picon import getPiconName
from Components.config import config
def refreshServiceList(configElement = None):
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance is not None:
servicelist = InfoBarInstance.servicelist
if servicelist:
servicelist.setMode()
class ServiceList(HTMLComponent, GUIComponent):
MODE_NORMAL = 0
MODE_FAVOURITES = 1
def __init__(self, serviceList):
self.serviceList = serviceList
GUIComponent.__init__(self)
self.l = eListboxServiceContent()
pic = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/folder.png"))
pic and self.l.setPixmap(self.l.picFolder, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/marker.png"))
pic and self.l.setPixmap(self.l.picMarker, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_dvb_s.png"))
pic and self.l.setPixmap(self.l.picDVB_S, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_dvb_c.png"))
pic and self.l.setPixmap(self.l.picDVB_C, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_dvb_t.png"))
pic and self.l.setPixmap(self.l.picDVB_T, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_stream.png"))
pic and self.l.setPixmap(self.l.picStream, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_service_group.png"))
pic and self.l.setPixmap(self.l.picServiceGroup, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/icon_crypt.png"))
pic and self.l.setPixmap(self.l.picCrypto, pic)
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/record.png"))
pic and self.l.setPixmap(self.l.picRecord, pic)
self.root = None
self.mode = self.MODE_NORMAL
self.ItemHeight = 28
self.ServiceNameFont = parseFont("Regular;22", ((1,1),(1,1)))
self.ServiceInfoFont = parseFont("Regular;18", ((1,1),(1,1)))
self.ServiceNumberFont = parseFont("Regular;20", ((1,1),(1,1)))
self.progressBarWidth = 52
self.progressPercentWidth = 0
self.fieldMargins = 10
self.onSelectionChanged = [ ]
def applySkin(self, desktop, parent):
def foregroundColorMarked(value):
self.l.setColor(eListboxServiceContent.markedForeground, parseColor(value))
def foregroundColorMarkedSelected(value):
self.l.setColor(eListboxServiceContent.markedForegroundSelected, parseColor(value))
def backgroundColorMarked(value):
self.l.setColor(eListboxServiceContent.markedBackground, parseColor(value))
def backgroundColorMarkedSelected(value):
self.l.setColor(eListboxServiceContent.markedBackgroundSelected, parseColor(value))
def foregroundColorServiceNotAvail(value):
self.l.setColor(eListboxServiceContent.serviceNotAvail, parseColor(value))
def foregroundColorEvent(value):
self.l.setColor(eListboxServiceContent.eventForeground, parseColor(value))
def colorServiceDescription(value):
self.l.setColor(eListboxServiceContent.eventForeground, parseColor(value))
def foregroundColorEventSelected(value):
self.l.setColor(eListboxServiceContent.eventForegroundSelected, parseColor(value))
def colorServiceDescriptionSelected(value):
self.l.setColor(eListboxServiceContent.eventForegroundSelected, parseColor(value))
def foregroundColorEventborder(value):
self.l.setColor(eListboxServiceContent.eventborderForeground, parseColor(value))
def foregroundColorEventborderSelected(value):
self.l.setColor(eListboxServiceContent.eventborderForegroundSelected, parseColor(value))
def colorEventProgressbar(value):
self.l.setColor(eListboxServiceContent.serviceEventProgressbarColor, parseColor(value))
def colorEventProgressbarSelected(value):
self.l.setColor(eListboxServiceContent.serviceEventProgressbarColorSelected, parseColor(value))
def colorEventProgressbarBorder(value):
self.l.setColor(eListboxServiceContent.serviceEventProgressbarBorderColor, parseColor(value))
def colorEventProgressbarBorderSelected(value):
self.l.setColor(eListboxServiceContent.serviceEventProgressbarBorderColorSelected, parseColor(value))
def colorServiceRecorded(value):
self.l.setColor(eListboxServiceContent.serviceRecorded, parseColor(value))
def colorFallbackItem(value):
self.l.setColor(eListboxServiceContent.serviceItemFallback, parseColor(value))
def colorServiceSelectedFallback(value):
self.l.setColor(eListboxServiceContent.serviceSelectedFallback, parseColor(value))
def colorServiceDescriptionFallback(value):
self.l.setColor(eListboxServiceContent.eventForegroundFallback, parseColor(value))
def colorServiceDescriptionSelectedFallback(value):
self.l.setColor(eListboxServiceContent.eventForegroundSelectedFallback, parseColor(value))
def picServiceEventProgressbar(value):
pic = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, value))
pic and self.l.setPixmap(self.l.picServiceEventProgressbar, pic)
def serviceItemHeight(value):
self.ItemHeight = int(value)
def serviceNameFont(value):
self.ServiceNameFont = parseFont(value, ((1,1),(1,1)))
def serviceInfoFont(value):
self.ServiceInfoFont = parseFont(value, ((1,1),(1,1)))
def serviceNumberFont(value):
self.ServiceNumberFont = parseFont(value, ((1,1),(1,1)))
def progressbarHeight(value):
self.l.setProgressbarHeight(int(value))
def progressbarBorderWidth(value):
self.l.setProgressbarBorderWidth(int(value))
def progressBarWidth(value):
self.progressBarWidth = int(value)
def progressPercentWidth(value):
self.progressPercentWidth = int(value)
def fieldMargins(value):
self.fieldMargins = int(value)
def nonplayableMargins(value):
self.l.setNonplayableMargins(int(value))
def itemsDistances(value):
self.l.setItemsDistances(int(value))
for (attrib, value) in list(self.skinAttributes):
try:
locals().get(attrib)(value)
self.skinAttributes.remove((attrib, value))
except:
pass
return GUIComponent.applySkin(self, desktop, parent)
def connectSelChanged(self, fnc):
if not fnc in self.onSelectionChanged:
self.onSelectionChanged.append(fnc)
def disconnectSelChanged(self, fnc):
if fnc in self.onSelectionChanged:
self.onSelectionChanged.remove(fnc)
def selectionChanged(self):
for x in self.onSelectionChanged:
x()
def setCurrent(self, ref, adjust=True):
if self.l.setCurrent(ref):
return None
from Components.ServiceEventTracker import InfoBarCount
if adjust and config.usage.multibouquet.value and InfoBarCount == 1 and ref and ref.type != 8192:
print "[servicelist] search for service in userbouquets"
if self.serviceList:
revert_mode = config.servicelist.lastmode.value
revert_root = self.getRoot()
self.serviceList.setModeTv()
revert_tv_root = self.getRoot()
bouquets = self.serviceList.getBouquetList()
for bouquet in bouquets:
self.serviceList.enterUserbouquet(bouquet[1])
if self.l.setCurrent(ref):
config.servicelist.lastmode.save()
self.serviceList.saveChannel(ref)
return True
self.serviceList.enterUserbouquet(revert_tv_root)
self.serviceList.setModeRadio()
revert_radio_root = self.getRoot()
bouquets = self.serviceList.getBouquetList()
for bouquet in bouquets:
self.serviceList.enterUserbouquet(bouquet[1])
if self.l.setCurrent(ref):
config.servicelist.lastmode.save()
self.serviceList.saveChannel(ref)
return True
self.serviceList.enterUserbouquet(revert_radio_root)
print "[servicelist] service not found in any userbouquets"
if revert_mode == "tv":
self.serviceList.setModeTv()
elif revert_mode == "radio":
self.serviceList.setModeRadio()
self.serviceList.enterUserbouquet(revert_root)
return False
def getCurrent(self):
r = eServiceReference()
self.l.getCurrent(r)
return r
def getPrev(self):
r = eServiceReference()
self.l.getPrev(r)
return r
def getNext(self):
r = eServiceReference()
self.l.getNext(r)
return r
def atBegin(self):
return self.instance.atBegin()
def atEnd(self):
return self.instance.atEnd()
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def moveToChar(self, char):
# TODO fill with life
print "Next char: "
index = self.l.getNextBeginningWithChar(char)
indexup = self.l.getNextBeginningWithChar(char.upper())
if indexup != 0:
if index > indexup or index == 0:
index = indexup
self.instance.moveSelectionTo(index)
print "Moving to character " + str(char)
def moveToNextMarker(self):
idx = self.l.getNextMarkerPos()
self.instance.moveSelectionTo(idx)
def moveToPrevMarker(self):
idx = self.l.getPrevMarkerPos()
self.instance.moveSelectionTo(idx)
def moveToIndex(self, index):
self.instance.moveSelectionTo(index)
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setWrapAround(True)
instance.setContent(self.l)
instance.selectionChanged.get().append(self.selectionChanged)
self.setMode(self.mode)
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def getRoot(self):
return self.root
def getRootServices(self):
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(self.root)
dest = [ ]
if list is not None:
while 1:
s = list.getNext()
if s.valid():
dest.append(s.toString())
else:
break
return dest
def setPlayableIgnoreService(self, ref):
self.l.setIgnoreService(ref)
def setRoot(self, root, justSet=False):
self.root = root
self.l.setRoot(root, justSet)
if not justSet:
self.l.sort()
self.selectionChanged()
def resetRoot(self):
index = self.instance.getCurrentIndex()
self.l.setRoot(self.root, False)
self.l.sort()
self.instance.moveSelectionTo(index)
def removeCurrent(self):
self.l.removeCurrent()
def addService(self, service, beforeCurrent=False):
self.l.addService(service, beforeCurrent)
def finishFill(self):
self.l.FillFinished()
self.l.sort()
# stuff for multiple marks (edit mode / later multiepg)
def clearMarks(self):
self.l.initMarked()
def isMarked(self, ref):
return self.l.isMarked(ref)
def addMarked(self, ref):
self.l.addMarked(ref)
def removeMarked(self, ref):
self.l.removeMarked(ref)
def getMarked(self):
i = self.l
i.markedQueryStart()
ref = eServiceReference()
marked = [ ]
while i.markedQueryNext(ref) == 0:
marked.append(ref.toString())
ref = eServiceReference()
return marked
#just for movemode.. only one marked entry..
def setCurrentMarked(self, state):
self.l.setCurrentMarked(state)
def setMode(self, mode):
if config.usage.servicelist_number_of_services.value == "by skin":
ItemHeight = self.ItemHeight
ServiceNameFont = self.ServiceNameFont
ServiceNumberFont = self.ServiceNumberFont
ServiceInfoFont = self.ServiceInfoFont
else:
ItemHeight = int(self.instance.size().height() / int(config.usage.servicelist_number_of_services.value))
FontFactor = ItemHeight * 100 / self.ItemHeight
ServiceNameFont = gFont(self.ServiceNameFont.family, int(self.ServiceNameFont.pointSize * FontFactor/100))
ServiceNumberFont = gFont(self.ServiceNumberFont.family, int(self.ServiceNumberFont.pointSize * FontFactor/100))
ServiceInfoFont = gFont(self.ServiceInfoFont.family, int(self.ServiceInfoFont.pointSize * FontFactor/100))
self.mode = mode
self.l.setItemHeight(ItemHeight)
self.l.setVisualMode(eListboxServiceContent.visModeComplex)
if config.usage.service_icon_enable.value:
self.l.setGetPiconNameFunc(getPiconName)
else:
self.l.setGetPiconNameFunc(None)
rowWidth = self.instance.size().width() - 30 #scrollbar is fixed 20 + 10 Extra marge
if mode == self.MODE_NORMAL or not config.usage.show_channel_numbers_in_servicelist.value:
channelNumberWidth = 0
channelNumberSpace = 0
else:
channelNumberWidth = config.usage.alternative_number_mode.value and getTextBoundarySize(self.instance, ServiceNumberFont, self.instance.size(), "0000").width() or getTextBoundarySize(self.instance, self.ServiceNumberFont, self.instance.size(), "00000").width()
channelNumberSpace = self.fieldMargins
self.l.setElementPosition(self.l.celServiceNumber, eRect(0, 0, channelNumberWidth, ItemHeight))
progressWidth = self.progressBarWidth
if "perc" in config.usage.show_event_progress_in_servicelist.value:
progressWidth = self.progressPercentWidth or self.progressBarWidth
if "left" in config.usage.show_event_progress_in_servicelist.value:
self.l.setElementPosition(self.l.celServiceEventProgressbar, eRect(channelNumberWidth+channelNumberSpace, 0, progressWidth , ItemHeight))
self.l.setElementPosition(self.l.celServiceName, eRect(channelNumberWidth+channelNumberSpace + progressWidth + self.fieldMargins, 0, rowWidth - (channelNumberWidth+channelNumberSpace + progressWidth + self.fieldMargins), ItemHeight))
elif "right" in config.usage.show_event_progress_in_servicelist.value:
self.l.setElementPosition(self.l.celServiceEventProgressbar, eRect(rowWidth - progressWidth, 0, progressWidth, ItemHeight))
self.l.setElementPosition(self.l.celServiceName, eRect(channelNumberWidth+channelNumberSpace, 0, rowWidth - (channelNumberWidth+channelNumberSpace + progressWidth + self.fieldMargins), ItemHeight))
else:
self.l.setElementPosition(self.l.celServiceEventProgressbar, eRect(0, 0, 0, 0))
self.l.setElementPosition(self.l.celServiceName, eRect(channelNumberWidth+channelNumberSpace, 0, rowWidth - (channelNumberWidth+channelNumberSpace), ItemHeight))
self.l.setElementFont(self.l.celServiceName, ServiceNameFont)
self.l.setElementFont(self.l.celServiceNumber, ServiceNumberFont)
self.l.setElementFont(self.l.celServiceInfo, ServiceInfoFont)
if "perc" in config.usage.show_event_progress_in_servicelist.value:
self.l.setElementFont(self.l.celServiceEventProgressbar, ServiceInfoFont)
self.l.setHideNumberMarker(config.usage.hide_number_markers.value)
self.l.setServiceTypeIconMode(int(config.usage.servicetype_icon_mode.value))
self.l.setCryptoIconMode(int(config.usage.crypto_icon_mode.value))
self.l.setRecordIndicatorMode(int(config.usage.record_indicator_mode.value))
self.l.setColumnWidth(int(config.usage.servicelist_column.value))
def selectionEnabled(self, enabled):
if self.instance is not None:
self.instance.setSelectionEnable(enabled)
| openmips/stbgui | lib/python/Components/ServiceList.py | Python | gpl-2.0 | 15,015 |
# -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
"""
View configurations for Groups in Common Repository project.
"""
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView, DetailView, ListView, UpdateView
from django.shortcuts import redirect, get_object_or_404
from actstream import action
from actstream import actions
from braces.views import LoginRequiredMixin, OrderableListMixin
from notifications.signals import notify
from commonrepo.users.models import User as User
from .models import Group
from .forms import GroupForm, GroupUpdateForm, GroupAddForm, GroupLeaveForm
__author__ = 'yrchen@ATCity.org (Xaver Y.R. Chen)'
class GroupsAbortView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
"""
View of group aborting actions.
* Requires authentication.
"""
model = Group
form_class = GroupLeaveForm
query_pk_and_slug = True
template_name = 'groups/groups_abort.html'
success_message = "You aborted Group %(name)s successfully"
def form_valid(self, form):
# remove request user from the members of group
form.instance.members.remove(self.request.user)
form.save()
return super(GroupsAbortView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(GroupsAbortView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update(self.kwargs) # self.kwargs contains all url conf params
return kwargs
def get_success_url(self):
action.send(self.request.user, verb='aborted', target=self.object)
actions.unfollow(self.request.user, self.object, send_action=False)
notify.send(
self.request.user,
recipient=self.object.creator,
verb=u'has aborted from your Group',
level='success')
return reverse("groups:groups-detail",
kwargs={'pk': self.kwargs['pk']})
class GroupsCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
"""
View of group creating actions.
* Requires authentication.
"""
model = Group
form_class = GroupForm
template_name = "groups/groups_create.html"
success_message = "%(name)s was created successfully"
def get_form_kwargs(self):
kwargs = super(GroupsCreateView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update(self.kwargs) # self.kwargs contains all url conf params
return kwargs
def get_success_url(self):
action.send(self.request.user, verb='created', target=self.object)
return super(GroupsCreateView, self).get_success_url()
class GroupsDetailView(LoginRequiredMixin, DetailView):
"""
View of group details.
* Requires authentication.
"""
model = Group
query_pk_and_slug = True
template_name = 'groups/groups_detail.html'
class GroupsJoinView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
"""
View of group joining actions.
* Requires authentication.
"""
model = Group
form_class = GroupAddForm
query_pk_and_slug = True
template_name = 'groups/groups_join.html'
success_message = "You joined Group %(name)s successfully"
def form_valid(self, form):
# add request user to the members of group
form.instance.members.add(self.request.user)
form.save()
return super(GroupsJoinView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(GroupsJoinView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update(self.kwargs) # self.kwargs contains all url conf params
return kwargs
def get_success_url(self):
action.send(self.request.user, verb='joined', target=self.object)
actions.follow(self.request.user, self.object, send_action=True)
notify.send(
self.request.user,
recipient=self.object.creator,
verb=u'has joined to your Group',
level='success')
return reverse("groups:groups-detail",
kwargs={'pk': self.kwargs['pk']})
class GroupsListView(LoginRequiredMixin, ListView):
"""
View of group list actions.
* Requires authentication.
"""
template_name = 'groups/groups_list.html'
paginate_by = settings.GROUPS_MAX_ITEMS_PER_PAGE
def get_queryset(self):
return Group.objects.all()
class GroupsMyListView(LoginRequiredMixin, ListView):
"""
View of user related group list actions.
* Requires authentication.
"""
template_name = 'groups/groups_my_list.html'
paginate_by = settings.GROUPS_MAX_ITEMS_PER_PAGE
def get_queryset(self):
return Group.objects.filter(creator=self.request.user)
class GroupsFollowingListView(
OrderableListMixin,
LoginRequiredMixin,
ListView):
"""
View of group following list actions.
* Requires authentication.
"""
template_name = 'groups/groups_following_list.html'
paginate_by = settings.GROUPS_MAX_ITEMS_PER_PAGE
orderable_columns = ("id", "create_update", "update_date")
orderable_columns_default = "id"
def get_queryset(self):
user = get_object_or_404(User, username=self.request.user.username)
unordered_queryset = user.userprofile.follow_groups.all()
return self.get_ordered_queryset(unordered_queryset)
class GroupsUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
"""
View of group updating actions.
* Requires authentication.
"""
model = Group
form_class = GroupUpdateForm
query_pk_and_slug = True
template_name = 'groups/groups_update.html'
success_message = "%(name)s was updated successfully"
def dispatch(self, request, *args, **kwargs):
group = get_object_or_404(Group, pk=self.kwargs['pk'])
if not group.creator == request.user and not request.user.is_staff:
messages.error(request, 'Permission denied.')
return redirect('groups:groups-alllist')
else:
return super(
GroupsUpdateView,
self).dispatch(
request,
*
args,
**kwargs)
def form_valid(self, form):
# self.object.version += 1
return super(GroupsUpdateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(GroupsUpdateView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update(self.kwargs) # self.kwargs contains all url conf params
return kwargs
def get_success_url(self):
action.send(self.request.user, verb='updated', target=self.object)
return reverse("groups:groups-detail",
kwargs={'pk': self.kwargs['pk']})
@login_required
@csrf_exempt
def follow_group(request, pk):
"""
Creates the follow relationship between ``request.user`` and the ``Group``
"""
group = get_object_or_404(Group, id=pk)
# Check user is not member of the group
if not group.members.filter(id=request.user.id).exists():
actions.follow(request.user, group, send_action=True)
notify.send(
request.user,
recipient=group.creator,
verb=u'has followed your Group',
level='success')
request.user.userprofile.follow_groups.add(group)
messages.success(
request,
'Successed, you are now following this Group.')
else:
actions.follow(request.user, group, send_action=False)
messages.success(
request,
'You are the member of this Group and automatically become the follower.')
return redirect('groups:groups-detail', pk)
@login_required
@csrf_exempt
def unfollow_group(request, pk):
"""
Deletes the follow relationship between ``request.user`` and the ``Group``
"""
group = get_object_or_404(Group, id=pk)
# Check user is not member of the group
if not group.members.filter(id=request.user.id).exists():
actions.unfollow(request.user, group, send_action=False)
request.user.userprofile.follow_groups.remove(group)
messages.warning(
request,
'Successed, you are not following this Group anymore.')
# the group members can choose not follow the group anymore, but still
# been the member
else:
actions.unfollow(request.user, group, send_action=False)
messages.warning(
request,
'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')
return redirect('groups:groups-detail', pk)
| yrchen/CommonRepo | commonrepo/groups/views.py | Python | apache-2.0 | 9,831 |
import xml.etree.cElementTree as ET
from time import sleep
from django.utils import timezone
from datetime import timedelta
from random import shuffle
from django.contrib.gis.geos import Point, Polygon
from django.contrib.gis.db.models import Extent
from bustimes.models import Trip
from ..import_live_vehicles import ImportLiveVehiclesCommand
from ...models import VehicleLocation, VehicleJourney, Service
NS = {
'a': 'http://www.acishorizon.com/',
's': 'http://www.w3.org/2003/05/soap-envelope'
}
OPERATORS = {
'Ulsterbus': 'ULB',
'Translink Metro': 'MET',
'Translink Glider': 'GDR'
}
def items_from_response(response):
try:
items = ET.fromstring(response.text)
except ET.ParseError:
print(response)
return ()
return items.findall(
's:Body/a:GetVehiclesNearPointResponse/a:GetVehiclesNearPointResult/a:Vehicles/a:VehicleRealtime',
NS
)
class Command(ImportLiveVehiclesCommand):
source_name = 'acis'
url = 'http://belfastapp.acishorizon.com/DataService.asmx'
def get_response(self, latitute=None, longitude=None):
if latitute and longitude:
latlong = '<latitude>{}</latitude><longitude>{}</longitude>'.format(latitute, longitude)
else:
latlong = ''
data = """
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<GetVehiclesNearPoint xmlns="http://www.acishorizon.com/">
{}
<maxResults>100</maxResults>
</GetVehiclesNearPoint>
</soap12:Body>
</soap12:Envelope>
""".format(latlong)
return self.session.post(self.url, data=data, timeout=5, headers={'content-type': 'application/soap+xml'})
def get_points(self):
points = []
now = self.source.datetime
services = Service.objects.filter(current=True, operator__in=['MET', 'GDR', 'ULB', 'GLE'])
extent = services.aggregate(Extent('geometry'))['geometry__extent']
if extent:
longitude = extent[0]
time_since_midnight = timedelta(hours=now.hour, minutes=now.minute, seconds=now.second,
microseconds=now.microsecond)
trips = Trip.objects.filter(start__lte=time_since_midnight + timedelta(minutes=5),
end__gte=time_since_midnight - timedelta(minutes=30))
services = services.filter(route__trip__in=trips)
while longitude <= extent[2]:
latitute = extent[1]
while latitute <= extent[3]:
bbox = Polygon.from_bbox(
(longitude - 0.05, latitute - 0.05, longitude + 0.05, latitute + 0.05)
)
if services.filter(geometry__bboverlaps=bbox).exists():
points.append((latitute, longitude))
latitute += 0.1
longitude += 0.1
shuffle(points)
return points
def get_items(self):
for latitute, longitude in self.get_points():
self.source.datetime = timezone.now()
for item in items_from_response(self.get_response(latitute, longitude)):
yield item
self.save()
sleep(1)
def get_vehicle(self, item):
operator = item.find('a:VehicleOperatorName', NS).text
if operator in OPERATORS:
operator = OPERATORS[operator]
else:
operator = 'MET'
vehicle = item.find('a:VehicleId', NS).text
defaults = {}
notes = item.find('a:VehicleType', NS)
if notes is not None:
defaults['notes'] = notes.text
defaults['operator_id'] = operator
vehicle, created = self.vehicles.get_or_create(defaults, code=vehicle, source=self.source)
return vehicle, created
def get_journey(self, item, vehicle):
journey = VehicleJourney()
journey.code = item.find('a:VehicleJourneyId', NS).text
journey.route_name = item.find('a:VehiclePublicServiceCode', NS).text
latest_journey = vehicle.latest_journey
if latest_journey:
if latest_journey.code == journey.code and latest_journey.route_name == journey.route_name:
return latest_journey
journey.destination = item.find('a:VehicleDestination', NS).text
operator = item.find('a:VehicleOperatorName', NS).text
if operator in OPERATORS:
operator = OPERATORS[operator]
else:
operator = 'MET'
print(operator, journey.route_name)
try:
try:
journey.service = Service.objects.get(line_name__iexact=journey.route_name, operator=operator)
except Service.DoesNotExist:
operator = 'GLE'
journey.service = Service.objects.get(line_name__iexact=journey.route_name, operator=operator)
except (Service.MultipleObjectsReturned, Service.DoesNotExist) as e:
print(e, journey.route_name)
return journey
def create_vehicle_location(self, item):
lat = item.find('a:VehicleLatitude', NS).text
lon = item.find('a:VehicleLongitude', NS).text
latlong = Point(float(lon), float(lat))
return VehicleLocation(
latlong=latlong,
)
| jclgoodwin/bustimes.org.uk | vehicles/management/commands/import_live_acis.py | Python | mpl-2.0 | 5,585 |
# encoding: utf-8
import datetime
__all__ = [
'info',
]
def info():
return {
'birthday': datetime.date(1990, 12, 10),
'class': 7,
'family_name_en': u'matsui',
'family_name_kana': u'まつい',
'first_name_en': u'sakiko',
'first_name_kana': u'さきこ',
'graduate_date': None,
'hometown': u'埼玉',
'name_en': u'Matsui Sakiko',
'name_ja': u'松井咲子',
'name_kana': u'まつい さきこ',
'nick': u'さきっぺ',
'team': u'K',
}
| moriyoshi/pyakb48 | akb48/member/matsui_sakiko.py | Python | mit | 646 |
# -*- coding: utf-8 -*-
"""
===============================================
.. module::
:platform: Unix, Windows
:synopsis:
:deprecated:
.. moduleauthor:: (C) 2014 Oliver Gutiérrez
"""
from pyevo.http import nvp_request
def translate(apikey,text,from_lang,to_lang):
"""
Translation to english function
"""
if text:
data={
"key": apikey,
"q": text,
"source": from_lang,
"target": to_lang,
}
try:
resp=nvp_request("https://www.googleapis.com/language/translate/v2",data,json=True)
return resp['data']['translations'][0]['translatedText']
except:
pass
return None | olivergs/pyevo | pyevo/api/googletranslate.py | Python | mit | 730 |
# -*- coding: utf-8 -*-
# $Id: search_engine_query_parser.py,v 1.12 2008/06/13 15:35:13 rivanov Exp $
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine query parsers."""
__lastupdated__ = """$Date: 2008/06/13 15:35:13 $"""
__revision__ = "$Id: search_engine_query_parser.py,v 1.12 2008/06/13 15:35:13 rivanov Exp $"
from invenio.bibtask import write_message
# imports used in FieldExporter class
from invenio import search_engine
from invenio import bibrecord
from invenio import bibdocfile
import os
# imports used in perform_request_... methods
from invenio.config import CFG_SITE_LANG
from invenio import bibexport_method_fieldexporter_dblayer as fieldexporter_dblayer
from invenio import template
fieldexporter_templates = template.load('bibexport_method_fieldexporter')
from invenio.messages import gettext_set_language
def run_export_method(jobname):
"""Main function, reading params and running the task."""
write_message("bibexport_fieldexporter: job %s started." % jobname)
job = fieldexporter_dblayer.get_job_by_name(jobname)
job_result = _run_job(job)
if job_result.STATUS_CODE_OK != job_result.get_status():
error_message = job_result.get_status_message()
write_message("Error during %s execution. Error message: %s" % (jobname, error_message) )
write_message("bibexport_fieldexporter: job %s started." % jobname)
def _run_job(job):
"""Execute a job and saves the results
@param job: Job object containing inforamtion about the job
@return: JobResult object containing informatoin about the result
of job execution
"""
exporter = FieldExporter()
job_result = exporter.execute_job(job)
fieldexporter_dblayer.save_job_result(job_result)
return job_result
class FieldExporter:
"""Provides mothods for exporting given fields from
records corresponding to a given search criteria.
It provides also methods for transforming the resulting
MARC XML into other formats.
"""
def __init__(self):
"""Nothing to init"""
pass
def _export_fields(self, search_criteria, output_fields):
"""Export fields that are among output_fields from
all the records that match the search criteria.
@param search_criteria: combination of search terms in Invenio
@param output_fields: list of fields that should remain in the records
@return: MARC XML with records containing only the fields that are
among output fields
"""
records = self._get_records(search_criteria)
filtered_xml = self._filter_records_fields(records, output_fields)
return filtered_xml
def execute_query(self, query):
"""Executes a query and returns the result of execution.
@param query: Query object containing information about the query.
@return: QueryResult object containing the result.
"""
search_criteria = query.get_search_criteria()
output_fields = query.get_output_fields()
xml_result = self._export_fields(search_criteria, output_fields)
query_result = fieldexporter_dblayer.QueryResult(query, xml_result)
return query_result
def execute_job(self, job):
"""Executes a job and returns the result of execution.
@param job: Job object containing information about the job.
@return: JobResult object containing the result.
"""
job_result = fieldexporter_dblayer.JobResult(job)
job_queries = fieldexporter_dblayer.get_job_queries(job.get_id())
for current_query in job_queries:
current_query_result = self.execute_query(current_query)
job_result.add_query_result(current_query_result)
return job_result
def _get_records(self, search_criteria):
"""Creates MARC XML containing all the records corresponding
to a given search criteria.
@param search_criteria: combination of search terms in Invenio
@return: MARC XML containing all the records corresponding
to the search criteria"""
record_IDs = search_engine.perform_request_search(p = search_criteria)
records_XML = self._create_records_xml(record_IDs)
return records_XML
def _filter_records_fields(self, records_xml, output_fields):
"""Leaves in the records only fields that are necessary.
All the other fields are removed from the records.
@param records_xml: MARC XML containing all the information about the records
@param output_fields: list of fields that should remain in the records
@return: MARC XML with records containing only fields that are
in output_fields list.
"""
# Add 001/970 to the output fields. 970 is necessary for system number
# extraction when exporting in aleph marc. When we add more formats,
# we can add it optionally only when exporting aleph marc.
output_fields.append("001")
output_fields.append("970")
records = bibrecord.create_records(records_xml)
output_records = []
for (record, status_code, list_of_errors) in records:
record = self._filter_fields(record, output_fields)
# do not return empty records
if not self._is_record_empty(record):
output_records.append(record)
output_xml = bibrecord.print_recs(output_records)
return output_xml
def _is_record_empty(self, record):
"""Check if a record is empty.
We assume that record is empty if all the values of the
tags are empty lists or the record dictionary itself is empty.
@param record: record structure (@see: bibrecord.py for details)
@return True if the record is empty
"""
for value in record.values():
if len(value) > 0:
return False
return True
def _filter_fields(self, record, output_fields):
"""Removes from the record all the fields
that are not output_fields.
@param record: record structure (@see: bibrecord.py for details)
@param output_fields: list of fields that should remain in the record
@return: record containing only fields among output_fields
"""
# Tibor's new implementation:
for tag in record.keys():
if tag not in output_fields:
bibrecord.record_delete_fields(record, tag)
return record
# Rado's old implementation that leads to bibrecord-related
# bug, see <https://savannah.cern.ch/task/?10267>:
record_keys = record.keys()
# Check if any of the tags, fields or subfields match
# any value in output_fields. In case of match we leave
# the element and its children in the record.
#
# If the element and all its children are not among the
# output fields, it is deleted
for tag in record_keys:
tag = tag.lower()
if tag not in output_fields:
for (subfields, ind1, ind2, value, field_number) in record[tag]:
current_field = tag + ind1.strip() + ind2.strip()
current_field = current_field.lower()
if current_field not in output_fields:
delete_parents = True
for (code, value) in subfields:
current_subfield = current_field + code
current_subfield = current_subfield.lower()
if current_subfield not in output_fields:
bibrecord.record_delete_subfield(record, tag, code, ind1, ind2)
else:
delete_parents = False
if delete_parents:
bibrecord.record_delete_field(record, tag, ind1, ind2)
return record
def _create_records_xml(self, record_IDs):
"""Creates XML containing all the information
for the records with the given identifiers
@param record_IDs: list of identifiers of records
@return: MARC XML containing all the information about the records
"""
output_xml = "<collection>"
for record_id in record_IDs:
record_xml = search_engine.print_record(recID = record_id, format = "xm")
output_xml += record_xml
output_xml += "</collection>"
return output_xml
def get_css():
"""Returns the CSS for field exporter pages."""
return fieldexporter_templates.tmpl_styles()
def get_navigation_menu(language = CFG_SITE_LANG):
"""Returns HTML reresenting the navigation menu
of field exporter
@param language: language of the page
"""
return fieldexporter_templates.tmpl_navigation_menu(language)
def perform_request_new_job(language = CFG_SITE_LANG):
"""Displays a page for creation of a new job.
@param language: language of the page
"""
job = fieldexporter_dblayer.Job()
return fieldexporter_templates.tmpl_edit_job(job, language = language)
def perform_request_edit_job(job_id, user_id, language = CFG_SITE_LANG):
"""Displays a page where the user can edit information
about a job.
@param job_id: identifier of the job that will be edited
@param user_id: identifier of the user
@param language: language of the page
"""
_check_user_ownership_on_job(user_id, job_id, language)
job = fieldexporter_dblayer.get_job(job_id)
return fieldexporter_templates.tmpl_edit_job(job, language = language)
def perform_request_save_job(job, user_id, language = CFG_SITE_LANG):
"""Saves a job.
@param job: Object containing information about the job
@param user_id: identifier of the user saving the job
@param language: language of the page
@return: identifier of the job
"""
job_id = job.get_id()
_check_user_ownership_on_job(user_id, job_id, language)
return fieldexporter_dblayer.save_job(user_id, job)
def perform_request_delete_jobs(job_ids, user_id, language = CFG_SITE_LANG):
"""Deletes all the jobs which ids are given as a parameter.
@param job_ids: list with identifiers of jobs that have to be deleted
@param user_id: identifier of the user deleting the jobs
@param language: language of the page
"""
for job_id in job_ids:
_check_user_ownership_on_job(user_id, job_id, language)
fieldexporter_dblayer.delete_job(job_id)
def perform_request_run_jobs(job_ids, user_id, language = CFG_SITE_LANG):
"""Runs all the jobs which ids are given as a parameter
@param job_ids: list with identifiers of jobs that have to be run
@param user_id: identifier of the user running the jobs
@param language: language of the page
"""
for current_job_id in job_ids:
_check_user_ownership_on_job(user_id, current_job_id, language)
current_job = fieldexporter_dblayer.get_job(current_job_id)
_run_job(current_job)
def perform_request_jobs(user_id, language = CFG_SITE_LANG):
"""Displays a page containing list of all
jobs of the current user
@param user_id: identifier of the user owning the jobs
@param language: language of the page
"""
all_jobs = fieldexporter_dblayer.get_all_jobs(user_id)
return fieldexporter_templates.tmpl_display_jobs(jobs = all_jobs, language = language)
def perform_request_job_queries(job_id, user_id, language = CFG_SITE_LANG):
"""Displays a page containing list of all
all queries for a given job
@param job_id: identifier of the job containing the queries
@param user_id: identifier of the current user
@param language: language of the page
"""
_check_user_ownership_on_job(user_id, job_id, language)
queries = fieldexporter_dblayer.get_job_queries(job_id)
return fieldexporter_templates.tmpl_display_job_queries(job_queries = queries,
job_id = job_id,
language = language)
def perform_request_new_query(job_id, user_id, language = CFG_SITE_LANG):
"""Displays a page for creation of new query.
@param job_id: identifier of the job containing the query
@param user_id: identifier of user creating the query
@param language: language of the page
"""
_check_user_ownership_on_job(user_id, job_id, language)
query = fieldexporter_dblayer.Query()
return fieldexporter_templates.tmpl_edit_query(query, job_id, language)
def perform_request_edit_query(query_id, job_id, user_id, language = CFG_SITE_LANG):
"""Displays a page where the user can edit information
about a job.
@param query_id: identifier of the query that will be edited
@param job_id: identifier of the job containing the query
@param user_id: identifier of the user editing the query
@param language: language of the page
"""
_check_user_ownership_on_job(user_id, job_id, language)
_check_user_ownership_on_query(user_id, query_id, language)
query = fieldexporter_dblayer.get_query(query_id)
return fieldexporter_templates.tmpl_edit_query(query, job_id, language)
def perform_request_save_query(query, job_id, user_id, language = CFG_SITE_LANG):
"""Saves a query in database.
@param query: Query objectect containing the necessary informatoin
@param job_id: identifier of the job containing the query
@param user_id: identifier of the user saving the query
@param language: language of the page
"""
_check_user_ownership_on_job(user_id, job_id, language)
_check_user_ownership_on_query(user_id, query.get_id(), language)
fieldexporter_dblayer.save_query(query, job_id)
def perform_request_delete_queries(query_ids, user_id, language = CFG_SITE_LANG):
"""Deletes all the queries which ids are given as a parameter.
@param query_ids: list with identifiers of queries that have to be deleted
@param user_id: identifier of the user deleting the queries
@param language: language of the page
"""
for query_id in query_ids:
_check_user_ownership_on_query(user_id, query_id, language)
fieldexporter_dblayer.delete_query(query_id)
def perform_request_run_queries(query_ids, user_id, job_id, language = CFG_SITE_LANG):
"""Displays a page contining results from execution of given queries.
@param query_ids: list of query identifiers
@param user_id: identifier of the user running the queries
@param language: language of the page
"""
exporter = FieldExporter()
_check_user_ownership_on_job(user_id, job_id, language)
job = fieldexporter_dblayer.get_job(job_id)
job_result = fieldexporter_dblayer.JobResult(job)
queries_results = []
for current_id in query_ids:
_check_user_ownership_on_query(user_id, current_id, language)
current_query = fieldexporter_dblayer.get_query(current_id)
current_result = exporter.execute_query(current_query)
job_result.add_query_result(current_result)
return fieldexporter_templates.tmpl_display_queries_results(job_result, language)
def perform_request_job_history(user_id, language = CFG_SITE_LANG):
"""Displays a page containing information about the executed jobs.
@param user_id: identifier of the user owning the reuslts
@param language: language of the page
"""
job_result_identifiers = fieldexporter_dblayer.get_all_job_result_ids(user_id = user_id)
job_results = fieldexporter_dblayer.get_job_results(job_result_identifiers)
return fieldexporter_templates.tmpl_display_job_history(job_results, language)
def perform_request_job_results(job_result_id, user_id, language = CFG_SITE_LANG):
"""Displays a page with information about the results of a particular job.
@param job_result_id: identifier of the job result that should be displayed
@param user_id: identifier of the current user
@param language: language of the page
"""
_check_user_ownership_on_job_result(user_id, job_result_id, language)
job_result = fieldexporter_dblayer.get_job_result(job_result_id)
return fieldexporter_templates.tmpl_display_job_result_information(job_result, language)
def perform_request_download_job_result(req, job_result_id, output_format, user_id, language = CFG_SITE_LANG):
"""
Returns to the browser zip file containing the content of the job result
@param req: request as received from apache
@param job_result_id: identifier of the job result that should be displayed
@param user_id: identifier of the current user
@param language: language of the page
@param output_format: format for downloading the result
"""
_check_user_ownership_on_job_result(user_id, job_result_id, language)
job_result = fieldexporter_dblayer.get_job_result(job_result_id)
if output_format != fieldexporter_dblayer.Job.OUTPUT_FORMAT_MISSING:
job_result.get_job().set_output_format(output_format)
download_file_name = "result.zip"
temp_zip_file_path = ""
try:
temp_zip_file_path = fieldexporter_dblayer.create_temporary_zip_file_with_job_result(job_result)
bibdocfile.stream_file(req, temp_zip_file_path, download_file_name)
finally:
if os.path.exists(temp_zip_file_path):
os.remove(temp_zip_file_path)
def perform_request_display_job_result(job_result_id, output_format, user_id, language = CFG_SITE_LANG):
"""Displays a page with the results of a particular job.
@param job_result_id: identifier of the job result that should be displayed
@param user_id: identifier of the current user
@param language: language of the page
"""
_check_user_ownership_on_job_result(user_id, job_result_id, language)
job_result = fieldexporter_dblayer.get_job_result(job_result_id)
if output_format != fieldexporter_dblayer.Job.OUTPUT_FORMAT_MISSING:
job_result.get_job().set_output_format(output_format)
return fieldexporter_templates.tmpl_display_queries_results(job_result, language)
def _check_user_ownership_on_job(user_id, job_id, language = CFG_SITE_LANG):
"""Check if user owns a job. In case user is not the owner, exception is thrown.
@param user_id: identifier of the user
@param job_id: identifier of the job
@param language: language of the page
"""
if fieldexporter_dblayer.Job.ID_MISSING == job_id:
return
if not fieldexporter_dblayer.is_user_owner_of_job(user_id, job_id):
_ = gettext_set_language(language)
error_message = _("You are not authorised to access this resource.")
raise AccessDeniedError(error_message)
def _check_user_ownership_on_job_result(user_id, job_result_id, language = CFG_SITE_LANG):
"""Check if user owns a job result. In case user is not the owner, exception is thrown.
@param user_id: identifier of the user
@param job_result_id: identifier of the job result
@param language: language of the page
"""
if fieldexporter_dblayer.JobResult.ID_MISSING == job_result_id:
return
if not fieldexporter_dblayer.is_user_owner_of_job_result(user_id, job_result_id):
_ = gettext_set_language(language)
error_message = _("You are not authorised to access this resource.")
raise AccessDeniedError(error_message)
def _check_user_ownership_on_query(user_id, query_id, language = CFG_SITE_LANG):
"""Check if user owns a job result. In case user is not the owner, exception is thrown.
@param user_id: identifier of the user
@param job_result_id: identifier of the job result
@param language: language of the page
"""
if fieldexporter_dblayer.Query.ID_MISSING == query_id:
return
if not fieldexporter_dblayer.is_user_owner_of_query(user_id, query_id):
_ = gettext_set_language(language)
error_message = _("You are not authorised to access this resource.")
raise AccessDeniedError(error_message)
class AccessDeniedError(Exception):
"""Exception indicating an error during exportting for Google scholar."""
_error_message = ""
_inner_exception = None
def __init__(self, error_message, inner_exception = None):
"""Constructor of the exception"""
Exception.__init__(self, error_message, inner_exception)
self._error_message = error_message
self._inner_exception = inner_exception
def get_error_message(self):
"""Returns the error message that explains the reason for the exception"""
return self._error_message
def get_inner_exception(self):
"""Returns the inner exception that is the cause for the current exception"""
return self._inner_exception
def __str__(self):
"""Returns string representation"""
return self._error_message
| CERNDocumentServer/invenio | modules/bibexport/lib/bibexport_method_fieldexporter.py | Python | gpl-2.0 | 21,705 |
from collections import OrderedDict as dict
from ...core import Component
class CameraComponent(Component):
""" Camera Component class
"""
# Defaut type/name the component will have
DEFAULT_TYPE = "camera"
defaults = dict( {"camera": None} )
def __init__(self, *args, **kwargs):
""" Camera initialization
"""
super(CameraComponent,self).__init__(*args, **kwargs)
| jsa4000/OpenGL-Python | zero/components/geometry/camera.py | Python | apache-2.0 | 427 |
# coding: utf-8
from __future__ import unicode_literals
from collections import defaultdict
from datetime import datetime
from debug_toolbar.panels import Panel
from django.conf import settings
from django.core.cache import cache
from django.db.models.loading import get_models
from django.utils.translation import ugettext_lazy as _
from django.utils.timesince import timesince
from .utils import _get_table_cache_key
class CachalotPanel(Panel):
title = 'Cachalot'
template = 'cachalot/panel.html'
def __init__(self, *args, **kwargs):
self.last_invalidation = None
super(CachalotPanel, self).__init__(*args, **kwargs)
@property
def enabled(self):
enabled = super(CachalotPanel, self).enabled
if enabled:
self.enable_instrumentation()
else:
self.disable_instrumentation()
return enabled
def enable_instrumentation(self):
settings.CACHALOT_ENABLED = True
def disable_instrumentation(self):
settings.CACHALOT_ENABLED = False
def process_response(self, request, response):
self.collect_invalidations()
def collect_invalidations(self):
models = get_models()
data = defaultdict(list)
for db_alias in settings.DATABASES:
model_cache_keys = dict(
[(_get_table_cache_key(db_alias, model._meta.db_table), model)
for model in models])
cached_items = cache.get_many(model_cache_keys.keys()) or {}
for cache_key, timestamp in cached_items.items():
invalidation = datetime.fromtimestamp(timestamp)
model = model_cache_keys[cache_key]
data[db_alias].append(
(model._meta.app_label, model.__name__, invalidation))
if self.last_invalidation is None \
or invalidation > self.last_invalidation:
self.last_invalidation = invalidation
data[db_alias].sort(key=lambda row: row[2], reverse=True)
self.record_stats({'invalidations_per_db': data.items()})
@property
def nav_subtitle(self):
if self.enabled and self.last_invalidation is not None:
return (_('Last invalidation: %s')
% timesince(self.last_invalidation))
return ''
| GetAmbassador/django-cachalot | cachalot/panels.py | Python | bsd-3-clause | 2,344 |
import datetime, os
from sqlalchemy import *
from sqlalchemy import event
from sqlalchemy import sql
from sqlalchemy.orm import *
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.sql import operators
from test.lib import *
from test.lib.engines import testing_engine
from test.lib.testing import eq_
from nose import SkipTest
# TODO: ShardTest can be turned into a base for further subclasses
class ShardTest(fixtures.TestBase):
def setUp(self):
global db1, db2, db3, db4, weather_locations, weather_reports
try:
db1 = testing_engine('sqlite:///shard1.db', options=dict(pool_threadlocal=True))
except ImportError:
raise SkipTest('Requires sqlite')
db2 = testing_engine('sqlite:///shard2.db')
db3 = testing_engine('sqlite:///shard3.db')
db4 = testing_engine('sqlite:///shard4.db')
meta = MetaData()
ids = Table('ids', meta,
Column('nextid', Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
c = db1.contextual_connect()
nextid = c.execute(ids.select(for_update=True)).scalar()
c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1}))
return nextid
weather_locations = Table("weather_locations", meta,
Column('id', Integer, primary_key=True, default=id_generator),
Column('continent', String(30), nullable=False),
Column('city', String(50), nullable=False)
)
weather_reports = Table(
'weather_reports',
meta,
Column('id', Integer, primary_key=True),
Column('location_id', Integer,
ForeignKey('weather_locations.id')),
Column('temperature', Float),
Column('report_time', DateTime,
default=datetime.datetime.now),
)
for db in (db1, db2, db3, db4):
meta.create_all(db)
db1.execute(ids.insert(), nextid=1)
self.setup_session()
self.setup_mappers()
def tearDown(self):
clear_mappers()
for db in (db1, db2, db3, db4):
db.connect().invalidate()
for i in range(1,5):
os.remove("shard%d.db" % i)
@classmethod
def setup_session(cls):
global create_session
shard_lookup = {
'North America': 'north_america',
'Asia': 'asia',
'Europe': 'europe',
'South America': 'south_america',
}
def shard_chooser(mapper, instance, clause=None):
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
return ['north_america', 'asia', 'europe', 'south_america']
def query_chooser(query):
ids = []
class FindContinent(sql.ClauseVisitor):
def visit_binary(self, binary):
if binary.left.shares_lineage(
weather_locations.c.continent):
if binary.operator == operators.eq:
ids.append(shard_lookup[binary.right.value])
elif binary.operator == operators.in_op:
for bind in binary.right.clauses:
ids.append(shard_lookup[bind.value])
if query._criterion is not None:
FindContinent().traverse(query._criterion)
if len(ids) == 0:
return ['north_america', 'asia', 'europe',
'south_america']
else:
return ids
create_session = sessionmaker(class_=ShardedSession,
autoflush=True, autocommit=False)
create_session.configure(shards={
'north_america': db1,
'asia': db2,
'europe': db3,
'south_america': db4,
}, shard_chooser=shard_chooser, id_chooser=id_chooser,
query_chooser=query_chooser)
@classmethod
def setup_mappers(cls):
global WeatherLocation, Report
class WeatherLocation(object):
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(object):
def __init__(self, temperature):
self.temperature = temperature
mapper(WeatherLocation, weather_locations, properties={
'reports':relationship(Report, backref='location'),
'city': deferred(weather_locations.c.city),
})
mapper(Report, weather_reports)
def _fixture_data(self):
tokyo = WeatherLocation('Asia', 'Tokyo')
newyork = WeatherLocation('North America', 'New York')
toronto = WeatherLocation('North America', 'Toronto')
london = WeatherLocation('Europe', 'London')
dublin = WeatherLocation('Europe', 'Dublin')
brasilia = WeatherLocation('South America', 'Brasila')
quito = WeatherLocation('South America', 'Quito')
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
sess = create_session()
for c in [
tokyo,
newyork,
toronto,
london,
dublin,
brasilia,
quito,
]:
sess.add(c)
sess.commit()
sess.close()
return sess
def test_roundtrip(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city # reload 'city' attribute on tokyo
sess.expunge_all()
eq_(db2.execute(weather_locations.select()).fetchall(), [(1,
'Asia', 'Tokyo')])
eq_(db1.execute(weather_locations.select()).fetchall(), [(2,
'North America', 'New York'), (3, 'North America', 'Toronto'
)])
eq_(sess.execute(weather_locations.select(), shard_id='asia'
).fetchall(), [(1, 'Asia', 'Tokyo')])
t = sess.query(WeatherLocation).get(tokyo.id)
eq_(t.city, tokyo.city)
eq_(t.reports[0].temperature, 80.0)
north_american_cities = \
sess.query(WeatherLocation).filter(WeatherLocation.continent
== 'North America')
eq_(set([c.city for c in north_american_cities]),
set(['New York', 'Toronto']))
asia_and_europe = \
sess.query(WeatherLocation).filter(
WeatherLocation.continent.in_(['Europe', 'Asia']))
eq_(set([c.city for c in asia_and_europe]), set(['Tokyo',
'London', 'Dublin']))
def test_shard_id_event(self):
canary = []
def load(instance, ctx):
canary.append(ctx.attributes["shard_id"])
event.listen(WeatherLocation, "load", load)
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").set_shard("asia").one()
sess.query(WeatherLocation).all()
eq_(
canary,
['asia', 'north_america', 'north_america',
'europe', 'europe', 'south_america',
'south_america']
) | ioram7/keystone-federado-pgid2013 | build/sqlalchemy/test/ext/test_horizontal_shard.py | Python | apache-2.0 | 7,477 |
# -*- coding: utf-8 -*-
"""
blohg.vcs_backends.git.filectx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Model with classes to represent Git file context.
:copyright: (c) 2010-2013 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
import os
import time
from flask.helpers import locked_cached_property
from pygit2 import GIT_OBJ_BLOB, GIT_SORT_REVERSE, GIT_SORT_TIME, \
GIT_SORT_TOPOLOGICAL
from blohg.vcs import FileCtx as _FileCtx
class FileCtx(_FileCtx):
"""Base class that represents a file context."""
def __init__(self, repo, changectx, path, use_index=False):
self._repo = repo
self._changectx = changectx
self._path = path
self._use_index = use_index
try:
oid = self._changectx.oid
except AttributeError:
oid = self._changectx.target
self._ctx = self.get_fileobj_from_basetree(
self._repo[oid].tree, self._path)
if not self._ctx or self._ctx.type != GIT_OBJ_BLOB or use_index:
try:
self._ctx = self._repo[self._repo.index[self._path].oid]
except:
raise RuntimeError('Invalid file: %s' % self._path)
def get_fileobj_from_basetree(self, basetree, path):
tree = [basetree]
for piece in path.split('/'):
try:
tree.append(self._repo[tree.pop()[piece].oid])
except KeyError:
return None
return tree.pop()
@locked_cached_property
def _first_changeset(self):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
raise RuntimeError('Branch "master" not found!')
for commit in self._repo.walk(ref.target,
GIT_SORT_TOPOLOGICAL |
GIT_SORT_TIME |
GIT_SORT_REVERSE):
obj = self.get_fileobj_from_basetree(commit.tree, self._path)
if obj is not None:
return commit
@locked_cached_property
def _last_changeset(self):
try:
ref = self._repo.lookup_reference('refs/heads/master')
except Exception:
return
head = ref.get_object()
for commit in self._repo.walk(ref.target,
GIT_SORT_TOPOLOGICAL |
GIT_SORT_TIME):
diff = self._repo.diff(head, commit)
for patch in diff:
if patch.new_file_path == self._path:
return head
head = commit
@locked_cached_property
def path(self):
"""UTF-8 encoded file path, relative to the repository root."""
return self._path.decode('utf-8')
@locked_cached_property
def data(self):
"""Raw data of the file."""
# This hack avoids 'git add'ing files after every edit.
# File must be added to index once though.
if self._use_index:
real_file = os.path.join(self._repo.workdir, self._path)
if os.path.isfile(real_file):
with open(real_file, 'r') as fp:
return fp.read()
return self._ctx.data
@locked_cached_property
def content(self):
"""UTF-8 encoded content of the file."""
return self.data.decode('utf-8')
@locked_cached_property
def date(self):
"""Unix timestamp of the creation date of the file (date of the first
commit).
"""
try:
date = self._first_changeset.author.time
except:
date = time.time()
return int(date)
@locked_cached_property
def mdate(self):
"""Unix timestamp of the last modification date of the file (date of
the most recent commit).
"""
if self._last_changeset and \
self._last_changeset.oid != self._first_changeset.oid:
return int(self._last_changeset.author.time)
@locked_cached_property
def author(self):
"""The creator of the file (commiter of the first revision of the
file)."""
if self._first_changeset:
name = self._first_changeset.author.name
email = self._first_changeset.author.email
else:
name = self._repo.config['user.name']
email = self._repo.config['user.email']
return ('%s <%s>' % (name, email)).decode('utf-8')
| liuyxpp/blohg | blohg/vcs_backends/git/filectx.py | Python | gpl-2.0 | 4,508 |
#!/usr/bin/env python
"""
i3t.py -- list i3wm windows, get next window id, wrap/loop around
Configuration (``.i3/config``)::
set $i3t_alt_tab ~/-dotfiles/src/i3t/i3t.py n
set $i3t_alt_shift_tab ~/-dotfiles/src/i3t/i3t.py p
bindsym Mod1+Tab exec exec $i3t_alt_tab
bindsym Mod1+Shift+Tab exec $i3t_alt_shift_tab
History
===========
Version 0.0.1
+++++++++++++++
* The original source of this script is this answer to "How can I
configure i3wm to make Alt+Tab action just like in Windows?" by
@michaelschaefer:
https://faq.i3wm.org/question/1773/how-can-i-configure-i3wm-to-make-alttab-action-just-like-in-windows/?answer=1807#post-id-1807
License
=========
This code is licensed with CC-By-SA 3.0:
https://creativecommons.org/licenses/by-sa/3.0/legalcode
"""
import collections
import json
import subprocess
import logging
logging.basicConfig()
log = logging.getLogger()
def command_output(cmd, shell=False):
"""
Execute the given command and return the
output as a list of lines
Args:
cmd (str or list): subprocess.Popen(cmd=cmd)
Kwargs:
shell (bool): subprocess.Popen(shell=shell)
Returns:
list: list of strings from stdout
"""
output = []
if (cmd):
p = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output.append(line.rstrip())
return output
def output_to_dict(output_list):
"""
Args:
output_list:
Returns:
dict: tree_dict #TODO: ordered_pairs_hook
"""
output_string = ""
for line in output_list:
output_string += line
return json.loads(output_string)
def find_windows(tree_dict, window_list, i3barnameprefix='i3bar for output'):
"""
Args:
tree_dict: dict of i3 nodes
window_list: window list to append to
Returns:
list: list of windows nodes
"""
if ("nodes" in tree_dict and len(tree_dict["nodes"]) > 0):
for node in tree_dict["nodes"]:
find_windows(node, window_list)
else:
if (tree_dict["layout"] != "dockarea"
and not tree_dict["name"].startswith(i3barnameprefix)
and not tree_dict["window"] is None):
window_list.append(tree_dict)
return window_list
def get_i3_window_state():
"""
determine a window id of the 'next' or TODO 'previous' window
Returns:
OrderedDict: {prev: \d, current: \d, next: \d}
"""
cmd = ("i3-msg", "-t", "get_tree")
output = subprocess.check_output(cmd)
tree = output_to_dict(output)
window_list = find_windows(tree, [])
# find the current window
next_index = None
prev_index = None
cur_index = None
for i in range(len(window_list)):
if window_list[i]["focused"] is True:
cur_index = i
next_index = i+1
prev_index = i-1
break
if next_index == len(window_list):
next_index = 0
if prev_index == -1:
prev_index = len(window_list)-1
next_id = window_list[next_index]["window"]
prev_id = window_list[prev_index]["window"]
state = collections.OrderedDict((
('prev', prev_id),
('current', cur_index),
('next', next_id)))
log.debug(('state', state))
return state
def i3_change_window(window_id):
"""
Args:
window_id (int): i3 window id to change window to
Returns:
int: output from ``i3-msg [id="0123"] focus``
"""
cmd = ('i3-msg', '[id="{0:d}"] focus'.format(window_id))
return subprocess.check_call(cmd)
def main(argv=None):
"""
i3t main method
Kwargs:
argv (list): arguments e.g. from ``sys.argv[1:]``
Returns:
int: nonzero return code on
"""
argv_len = len(argv)
if argv_len == 1:
cmd = argv[0]
if cmd[0].lower() == 'n':
cmd = 'next'
elif cmd[0].lower() == 'p':
cmd = 'prev'
elif argv_len == 0:
cmd = 'next'
else:
raise ValueError("specify [n]ext or [p]rev")
retcode = 1
try:
state = get_i3_window_state()
new_window_id = state[cmd]
retcode = i3_change_window(new_window_id)
except subprocess.CalledProcessError:
retcode = 2
return retcode
if __name__ == "__main__":
import sys
sys.exit(main(argv=sys.argv[1:]))
| westurner/dotfiles | scripts/i3t.py | Python | bsd-3-clause | 4,459 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Vis utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.applications import efficientnet
from keras.utils import vis_utils
class ModelToDotFormatTest(tf.test.TestCase, parameterized.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
dot_img_file = 'model_1.png'
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, show_dtype=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name='dense', input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = 'model_2.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_add_loss(self):
inputs = keras.Input(shape=(None, 3))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.add_loss(tf.reduce_mean(outputs))
dot_img_file = 'model_3.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
model = keras.Sequential([
keras.Input(shape=(None, 3)), keras.layers.Dense(1)])
model.add_loss(tf.reduce_mean(model.output))
dot_img_file = 'model_4.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
@parameterized.parameters({
'show_shapes': False,
'show_dtype': False
}, {
'show_shapes': False,
'show_dtype': True
}, {
'show_shapes': True,
'show_dtype': False
}, {
'show_shapes': True,
'show_dtype': True
})
def test_plot_model_cnn_with_activations(self, show_shapes, show_dtype):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=2, input_shape=(9, 9, 3), activation='relu'))
model.add(
keras.layers.Conv2D(
filters=4, kernel_size=2, strides=(2, 2), activation='relu'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='head', activation='softmax'))
dot_img_file = 'model_5.png'
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_activations=True)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_project_conv', 'block1a_activation']},
{'layer_range': ['block1a_activation', 'block1a_project_conv']},
{'layer_range': [r'block*', 'block2a_se_excite']},
{'layer_range': [r'block\da_activation', r'block\da_project_bn']})
def test_dot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
layer_ids_from_model = get_layer_ids_from_model(model, layer_range)
try:
dot = vis_utils.model_to_dot(model, layer_range=layer_range)
dot_edges = dot.get_edges()
layer_ids_from_dot = get_layer_ids_from_dot(dot_edges)
self.assertAllEqual(
sorted(layer_ids_from_model), sorted(layer_ids_from_dot))
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_project_conv', 'block1a_activation']},
{'layer_range': ['block1a_activation', 'block1a_project_conv']},
{'layer_range': [r'block*', 'block2a_se_excite']},
{'layer_range': [r'block\da_activation', r'block\da_project_bn']})
def test_plot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
effnet_subplot = 'model_effnet.png'
try:
vis_utils.plot_model(
model, to_file=effnet_subplot, layer_range=layer_range)
self.assertTrue(tf.io.gfile.exists(effnet_subplot))
except ImportError:
pass
finally:
if tf.io.gfile.exists(effnet_subplot):
tf.io.gfile.remove(effnet_subplot)
@parameterized.parameters(
{'layer_range': ['block1a_se_squeeze', 'block2a_project_conv']},
{'layer_range': [r'block\da_se_reshape', r'block*']})
def test_layer_range_assertion_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(AssertionError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(AssertionError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
@parameterized.parameters(
{'layer_range': ['block1a_activation']},
{'layer_range': []},
{'layer_range': ['input', 'block1a_activation', 'block1a_project_conv']},
{'layer_range': [9, 'block1a_activation']},
{'layer_range': [29, 9]},
{'layer_range': ['block8a_se_reshape', 'block*']})
def test_layer_range_value_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(ValueError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(ValueError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
def test_model_with_tf_op(self):
# Test fix for a bug in which inputs to a TFOp layer past the 1st one
# were not connected in the Keras model plot.
a = keras.Input((2,))
b = keras.Input((2,))
model = keras.Model(inputs=[a, b], outputs=a + b)
try:
dot = vis_utils.model_to_dot(model)
self.assertLen(dot.get_edges(), 2) # This model has 2 edges.
except ImportError:
pass
def get_layer_ids_from_model(model, layer_range):
layer_range = vis_utils.get_layer_index_bound_by_layer_name(
model, layer_range)
layer_ids_from_model = []
for i, layer in enumerate(model.layers):
if i >= layer_range[0] and i <= layer_range[1]:
layer_ids_from_model.append(str(id(layer)))
return layer_ids_from_model
def get_layer_ids_from_dot(dot_edges):
layer_ids_from_dot = []
for edge in dot_edges:
for pt in edge.obj_dict['points']:
if pt not in layer_ids_from_dot:
layer_ids_from_dot.append(pt)
return layer_ids_from_dot
if __name__ == '__main__':
tf.test.main()
| keras-team/keras | keras/utils/vis_utils_test.py | Python | apache-2.0 | 8,431 |
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpKebi(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220- Kebi FTP Server", re.IGNORECASE)
version_re = re.compile("\(Version (\d+(?:\.\d+)*)\)", re.IGNORECASE)
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.local_metadata.product = "Kebi Ftpd"
match = self.version_re.search(banner)
if match:
meta.local_metadata.version = match.group(1)
return meta
""" Tests
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 SINN \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Easy FTP\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by Cho Manik - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"""
| zmap/ztag | ztag/annotations/FtpKebi.py | Python | apache-2.0 | 3,682 |
# -*- coding: utf-8 -*-
'''
XBMC ESO video add-on.
Copyright (C) 2013 José Antonio Montes (jamontes)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This is the first trial of the ESO video add-on for XBMC.
This add-on gets the videos from ESO web site and shows them properly ordered.
You can choose the preferred subtitled language for the videos, if it is available.
This plugin depends on the lutil library functions.
'''
import lutil
pluginhandle = int(sys.argv[1])
plugin_id = 'plugin.video.eso'
settings = lutil.get_plugin_settings(plugin_id)
lutil.set_debug_mode(settings.getSetting("debug"))
translation = settings.getLocalizedString
root_dir = settings.getAddonInfo('path')
lutil.set_fanart_file(root_dir)
st_release = settings.getSetting('version')
current_release = settings.getAddonInfo('version')
sort_url_param = '' if settings.getSetting("rated") == "true" else '?sort=-release_date'
update_settings = False
# This is to make it sure that settings are correctly setup on every addon update or on first run.
if not st_release:
lutil.log("eso Warning: First run. Update settings.")
settings.openSettings()
settings.setSetting('version', current_release)
elif st_release != current_release:
lutil.log("eso Warning: updated release. Check for update settings.")
if update_settings:
settings.openSettings()
settings.setSetting('version', current_release)
# Gets the quality for videos from settings
try:
quality = int(settings.getSetting('quality'))
except:
settings.setSetting('quality', '0')
quality = 0
lutil.log('eso quality setup to "%s"' % ('SD', 'HD', 'UltraHD')[quality])
eso_url = 'http://www.eso.org'
space_url = 'http://www.spacetelescope.org'
# Entry point
def run():
lutil.log("eso.run")
# Get params
params = lutil.get_plugin_parms()
if params.get("action") is None:
create_index(params)
else:
action = params.get("action")
exec action+"(params)"
# Main menu
def create_index(params):
lutil.log("eso.create_index "+repr(params))
action = 'main_list'
# All Videos entry
url = params.get("url", 'http://www.eso.org/public/videos/list/1/') + sort_url_param
title = translation(30107)
genre = 'All the Videos'
lutil.log('eso.create_index action=["%s"] title=["All the Videos"] url=["%s"]' % (action, url))
lutil.addDir(action=action, title=title, url=url, genre=genre)
array_index = 0 if eso_url in url else 1
root_url = (eso_url, space_url)[array_index]
buffer_web = lutil.carga_web(url)
pattern_genre= ('<a href="(/public/videos/archive/category/[^"]+)">([^<]+)</a>', '<a href="(/videos/archive/category/[^"]+)">([^<]+)</a>')[array_index]
# Category list
# This is a hack to avoid repeat the category list at the same time it uses the old order list.
category_list = []
for genre_url, genre_title in lutil.find_multiple(buffer_web, pattern_genre):
url = '%s%s%s' % (root_url, genre_url, '' if 'sort=' in genre_url else sort_url_param)
title = genre_title.strip().replace('"', '"').replace(''', '´').replace('&', '&') # Cleanup the title.
if title not in category_list:
category_list.append(title)
lutil.log('eso.create_index action=["%s"] title=["%s"] url=["%s"]' % (action, title, url))
lutil.addDir(action=action, title=title, url=url, genre=title)
# Spacetelescope web site
if root_url == eso_url:
action = 'create_index'
url = 'http://www.spacetelescope.org/videos/'
title = 'Hubble Space Telescope'
lutil.log('eso.create_index action=["%s"] title=["%s"] url=["%s"]' % (action, title, url))
lutil.addDir(action=action, title=title, url=url, genre=title)
# Search
action = 'search'
url = ('http://www.eso.org/public/videos/?search=', 'http://www.spacetelescope.org/videos/?search=')[array_index]
title = translation(30104)
genre = 'Search'
lutil.log('eso.create_index action=["%s"] title=["Search"] url=["%s"]' % (action, url))
lutil.addDir(action=action, title=title, url=url, genre=genre)
lutil.close_dir(pluginhandle, updateListing=False)
# Main list menu
def main_list(params):
lutil.log("eso.main_list "+repr(params))
# Loads the web page from ESO with the video list.
page_url = params.get("url")
reset_cache = params.get("reset_cache")
genre = params.get("genre")
array_index = 0 if eso_url in page_url else 1
root_url = (eso_url, space_url)[array_index]
buffer_web = lutil.carga_web(page_url)
# Extract video items from the html content
pattern_nextpage = '<a href="([^"]*?)">Next</a>'
pattern_prevpage = '<a href="([^"]*?)">Previous</a>'
pattern_lastpage = '<a href="[^"]*?">([0-9]+)</a>'
pattern_pagenum = '/([0-9]+)/'
pattern_videos = ('</span><img src="([^"]+)" class="[^"]+" alt="([^"]+)">.*?<a href="(/public/videos/[^"]*?)">',
'</span><img src="([^"]+)" class="[^"]+" alt="([^"]+)">.*?<a href="(/videos/[^"]*?)">')[array_index]
lutil.set_content_list(pluginhandle, 'tvshows')
lutil.set_plugin_category(pluginhandle, genre)
# We must setup the previous page entry from the second page onwards.
prev_page_url = lutil.find_first(buffer_web, pattern_prevpage)
if prev_page_url:
prev_page = lutil.find_first(prev_page_url, pattern_pagenum)
lutil.log('eso.main_list Value of prev_page: %s prev_page_url: "%s%s"' % (prev_page, root_url, prev_page_url))
prev_page_url = "%s%s" % (root_url, prev_page_url.replace('&', '&').replace('"', '"'))
reset_cache = "yes"
lutil.addDir(action="main_list", title="<< %s (%s)" % (translation(30106), prev_page), url=prev_page_url, reset_cache=reset_cache, genre=genre)
# This is to force ".." option to go back to main index instead of previous page list.
updateListing = reset_cache == "yes"
for thumbnail, title, video_link in lutil.find_multiple(buffer_web, pattern_videos):
video_info = {}
url = '%s%s' % (root_url, video_link)
if not thumbnail.startswith('http'):
thumbnail = '%s%s' % (root_url, thumbnail)
title = title.strip().replace('"', '"').replace(''', '´').replace('&', '&') # Cleanup the title.
video_info['Genre'] = genre
video_info['Plot'] = title
# Appends a new item to the xbmc item list
lutil.addLink(action="play_video", title=title, url=url, thumbnail=thumbnail, video_info=video_info)
# Here we get the next page URL to add it at the end of the current video list page.
next_page_url = lutil.find_first(buffer_web, pattern_nextpage)
if next_page_url:
last_page = lutil.find_multiple(buffer_web, pattern_lastpage)[-1]
next_page = lutil.find_first(next_page_url, pattern_pagenum)
lutil.log('eso.main_list Value of next_page: %s last_page: %s next_page_url: "%s%s"' % (next_page, last_page, root_url, next_page_url))
next_page_url = "%s%s" % (root_url, next_page_url.replace('&', '&').replace('"', '"'))
lutil.addDir(action="main_list", title=">> %s (%s/%s)" % (translation(30010), next_page, last_page), url=next_page_url, reset_cache=reset_cache, genre=genre)
lutil.close_dir(pluginhandle, updateListing=updateListing)
# This function performs a search through all the videos catalogue.
def search(params):
search_string = lutil.get_keyboard_text(translation(30105))
if search_string:
params['url'] += lutil.get_url_encoded(search_string) + sort_url_param.replace('?', '&')
lutil.log("eso.search Value of search url: %s" % params['url'])
return main_list(params)
return lutil.close_dir(pluginhandle)
# This function search into the URL link to get the video link from the different sources.
def play_video(params):
lutil.log("eso.play "+repr(params))
page_url = params.get("url")
buffer_link = lutil.carga_web(page_url)
pattern_video = '<span class="archive_dl_text"><a href="([^"]*?)"'
quality_list = { 'UltraHD' : 'ultra_hd/', 'HD' : 'hd_and_apple', 'SD': 'medium_podcast' }
video_list = [url for url in lutil.find_multiple(buffer_link, pattern_video) if not url.endswith(('zip','srt','pdf','mxf','wav'))]
lutil.log("eso.play video list"+repr(video_list))
video_options = dict((vquality, url) for url in video_list for vquality in quality_list.keys() if quality_list[vquality] in url)
lutil.log("eso.play video options"+repr(video_options))
video_url = video_options.get('%s' % ('SD', 'HD', 'UltraHD')[quality], '') or video_options.get('SD', '') or video_list[0] if len(video_list) else ''
if video_url:
if video_url.startswith('//'):
video_url = "%s%s" % ('http:', video_url)
elif video_url.startswith('/'):
root_url = eso_url if eso_url in page_url else space_url
video_url = "%s%s" % (root_url, video_url)
try:
lutil.log("eso.play: We have found this video: '%s' and let's going to play it!" % video_url)
return lutil.play_resolved_url(pluginhandle = pluginhandle, url = video_url)
except:
lutil.log('eso.play ERROR: we cannot reproduce this video URL: "%s"' % video_url)
return lutil.showWarning(translation(30012))
else:
lutil.log('eso.play ERROR: we cannot play the video from this source yet: "%s"' % params.get("url"))
return lutil.showWarning(translation(30011))
run()
| jamontes/plugin.video.eso | default.py | Python | gpl-3.0 | 10,225 |
import os
import logging
import json
from wpamod.plugins.base.analysis_plugin import AnalysisPlugin, SPEED_VERY_FAST
REQUESTS_PER_MINUTE = 'Requests per minute'
class CoreStatus(AnalysisPlugin):
SPEED = SPEED_VERY_FAST
DATA_KEYS = {REQUESTS_PER_MINUTE, 'Crawl queue input speed',
'Crawl queue output speed', 'Crawl queue size',
'Audit queue input speed', 'Audit queue output speed',
'Audit queue size'}
CACHE_STATS = 'Cache stats'
def analyze(self):
"""
Show the core status data, which looks like this:
data = {'Requests per minute': s.get_rpm(),
'Crawl queue input speed': s.get_crawl_input_speed(),
'Crawl queue output speed': s.get_crawl_output_speed(),
'Crawl queue size': s.get_crawl_qsize(),
'Audit queue input speed': s.get_audit_input_speed(),
'Audit queue output speed': s.get_audit_output_speed(),
'Audit queue size': s.get_audit_qsize()}
"""
output = []
for i, core_dump in enumerate(self.get_input_files('*.core')):
logging.debug('Analyzing "%s" core status dump' % core_dump)
try:
core_stat_json = json.load(file(core_dump))
except ValueError:
logging.debug('Ignoring %s - JSON decode failed!' % core_dump)
continue
if self.CACHE_STATS in core_stat_json:
cache_stats = core_stat_json.pop(self.CACHE_STATS)
core_stats_items = core_stat_json.items()
core_stats_items.append((self.CACHE_STATS, tuple(cache_stats.items())))
else:
core_stats_items = core_stat_json.items()
core_stats = list(core_stats_items)
core_stats.sort()
dumpfname = os.path.split(core_dump)[1]
output.append(('Measurement #%s (%s)' % (i, dumpfname), core_stats))
return output
def generate_graph_data(self):
"""
:return: The data to use in the HTML graph
"""
raw_data = self.analyze()
graph_data = {}
for measurement_id, data in raw_data:
key = int(measurement_id.split('#')[1].split(' ')[0])
values = data
graph_data[key] = dict(values)
return graph_data
def get_output_name(self):
return 'Core status summary'
| andresriancho/w3af-performance-analysis | wpamod/plugins/core_status.py | Python | gpl-2.0 | 2,487 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-10 20:15
from __future__ import unicode_literals
from django.db import migrations, models
def move_catalog_number_to_work(apps, schema_editor):
Album = apps.get_model("mangaki", "Album")
# The catalog_number field is now in the Work base class, while the deprecated_catalog_number
# is in the two derived classes and contains the value of interest.
for album in Album.objects.all():
album.catalog_number = album.deprecated_catalog_number
album.save()
def move_catalog_number_from_work(apps, schema_editor):
Album = apps.get_model("mangaki", "Album")
for album in Album.objects.all():
album.deprecated_catalog_number = album.catalog_number
album.save()
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0044_migrate_album_composer'),
]
operations = [
migrations.RenameField(
model_name='album',
old_name='catalog_number',
new_name='deprecated_catalog_number',
),
migrations.AddField(
model_name='work',
name='catalog_number',
field=models.CharField(blank=True, max_length=20),
),
migrations.RunPython(move_catalog_number_to_work, reverse_code=move_catalog_number_from_work),
]
| mangaki/mangaki | mangaki/mangaki/migrations/0045_migrate_catalog_number.py | Python | agpl-3.0 | 1,358 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_lazyconn
----------------------------------
Tests for `lazyconn` module.
"""
import unittest
from lazyconn import LazyConnection
from lazyconn import thread_safe
from lazyconn.globals import lg
class TService(object):
def hello(self, name):
return "hello {0}".format(name)
def create_test_client():
return TService()
class TestLazyconn(unittest.TestCase):
def setUp(self):
LazyConnection.register_factory('test', create_test_client)
def test_something(self):
with LazyConnection() as conn:
print conn.test.hello('wayhome')
print lg.conn.test.hello('wayhome2')
def test_decorator(self):
@thread_safe
def test():
print lg.conn.test.hello("wayhome3")
test()
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| youngking/lazyconn | tests/test_lazyconn.py | Python | bsd-3-clause | 914 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import IonTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(IonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-prune=550"]]
def setup_network(self):
self.setup_nodes()
self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=self.start_node(0, self.options.tmpdir, self.full_node_default_args, timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
# Add node1's wallet transactions back to the mempool, to
# avoid the mined blocks from being too small.
self.nodes[1].resendwallettransactions()
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1] = self.start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1] = self.start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
# Get node0's wallet transactions back in its mempool, to avoid the
# mined blocks from being too small.
self.nodes[0].resendwallettransactions()
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = self.start_node(node_number, self.options.tmpdir, timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = self.start_node(node_number, self.options.tmpdir, ["-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = self.start_node(node_number, self.options.tmpdir, ["-prune=550"], timewait=900)
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
self.start_node(2, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
self.start_node(5, self.options.tmpdir, ["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
self.log.info("Mining a big blockchain of 995 blocks")
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| aspaas/ion | test/functional/pruning.py | Python | mit | 21,401 |
# coding=utf-8
import datetime
import logging
import os
import tempfile
import requests
from dogpile.cache.backends.file import AbstractFileLock
from dogpile.cache.region import make_region
from dogpile.util.readwrite_lock import ReadWriteMutex
import autosubliminal
log = logging.getLogger(__name__)
# Expiration time for video scan
SCAN_VIDEO_EXPIRATION_TIME = datetime.timedelta(days=1).total_seconds()
# MutexFileLock: copied from subliminal.cli so we don't depend on subliminal for our cache
class MutexFileLock(AbstractFileLock):
""":class:`MutexFileLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`."""
def __init__(self, filename):
self.mutex = ReadWriteMutex()
def acquire_read_lock(self, wait):
ret = self.mutex.acquire_read_lock(wait)
return wait or ret
def acquire_write_lock(self, wait):
ret = self.mutex.acquire_write_lock(wait)
return wait or ret
def release_read_lock(self):
return self.mutex.release_read_lock()
def release_write_lock(self):
return self.mutex.release_write_lock()
def cache_artwork(indexer_name, indexer_id, artwork_type, artwork_url, thumbnail=False):
"""Store the artwork in the cache."""
try:
img_data = requests.get(artwork_url).content
file_path = get_artwork_cache_path(indexer_name, indexer_id, artwork_type, thumbnail=thumbnail)
with open(file_path, mode='wb') as handler:
handler.write(img_data)
except Exception:
log.exception('Unable to store artwork in cache')
def is_artwork_cached(indexer_name, indexer_id, artwork_type, thumbnail=False):
"""Check if the artwork is cached."""
return os.path.exists(get_artwork_cache_path(indexer_name, indexer_id, artwork_type, thumbnail=thumbnail))
def get_artwork_cache_path(indexer_name, indexer_id, artwork_type, thumbnail=False):
"""Get the path of the artwork in the cache."""
# Make sure the cache path exists
cache_location = os.path.join(autosubliminal.CACHEDIR, 'artwork', indexer_name, artwork_type)
if thumbnail:
cache_location = os.path.join(cache_location, 'thumbnail')
cache_path = os.path.abspath(cache_location)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Return artwork cache path
return os.path.abspath(os.path.join(cache_path, str(indexer_id) + '.jpg'))
def clear_cache():
"""Clear all caches."""
_clear_autosubliminal_cache()
_clear_subliminal_cache()
_clear_imdbpie_cache()
def _clear_autosubliminal_cache():
cache_file = os.path.abspath(os.path.join(autosubliminal.CACHEDIR, 'autosubliminal.cache.dbm'))
if os.path.exists(cache_file):
os.remove(cache_file)
def _clear_subliminal_cache():
cache_file = os.path.abspath(os.path.join(autosubliminal.CACHEDIR, 'subliminal.cache.dbm'))
if os.path.exists(cache_file):
os.remove(cache_file)
def _clear_imdbpie_cache():
# Cache is created by imdbpie in temp location (see auth.py in imdbpie)
# Cleanup is required when switching between python versions
# If not, 'ValueError: unsupported pickle protocol' is thrown
cache_file = os.path.abspath(os.path.join(tempfile.gettempdir(), 'cache.db'))
if os.path.exists(cache_file):
os.remove(cache_file)
# Global cache region
region = make_region()
| h3llrais3r/Auto-Subliminal | autosubliminal/core/cache.py | Python | gpl-3.0 | 3,378 |
# -*- encoding: utf-8 -*-
from psycopg2 import IntegrityError
from openerp.tests.common import TransactionCase
from openerp.exceptions import ValidationError
# from openerp.tools import mute_logger
class GlobalTestOpenAcademySession(TransactionCase):
'''
Global test to openacademy session model.
Test create session and trigger constraints.
'''
# Pseudo-constructor method
def setUp(self):
super(GlobalTestOpenAcademySession, self).setUp()
self.session = self.env['openacademy.session']
self.partner_vauxoo = self.env.ref('base.res_partner_23').id
self.course = self.env.ref('openacademy.course1')
self.partner_attendee = self.env.ref('base.res_partner_5').id
# Generic methods
# def create_session(self, name, )
# Test methods
def test_10_instructor_is_attendee(self):
'''
Check that raise of 'A session's instructor can't be an attendee'
'''
with self.assertRaisesRegexp(
ValidationError,
"A session's instructor can't be an attendee"
):
self.session.create({
'name': 'Session test 1',
'seats': 1,
'course_id': 1,
'instructor_id': self.partner_vauxoo,
'attendee_ids': [(6, 0, [self.partner_vauxoo])],
})
def test_20_wkf_done(self):
'''
Checks that the workflow works fine!
'''
session_test = self.session.create({
'name': 'Session test 1',
'seats': 10,
'course_id': 1,
'instructor_id': self.partner_vauxoo,
'attendee_ids': [(6, 0, [self.partner_attendee])],
})
# Check inital state
self.assertEqual(session_test.state, 'draft',
'Initial state should be on draft')
session_test.signal_workflow('confirm')
# Check next tate and check it
self.assertEqual(session_test.state, 'confirmed',
'State should be on confirmed')
session_test.signal_workflow('done')
# Check next tate and check it
self.assertEqual(session_test.state, 'done', 'State should be on done')
def INACTIVE_test_30_sessions_without_course(self):
'''
Checks that a session needs to be assigned to a course.
'''
# @mute_logger('openerp.sql_db')
with self.assertRaisesRegexp(
IntegrityError,
'null value in column "course_id" violates not-null constraint'
):
self.session.create({
'name': 'Session test 1',
'seats': 10,
'instructor_id': self.partner_vauxoo,
'attendee_ids': [(6, 0, [self.partner_attendee])],
})
| keylor2906/openacademy-project | openacademy/tests/test_openacademy_session.py | Python | apache-2.0 | 2,851 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools import find_packages, setup
import versioneer
setup(
name='qiime2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
url='https://qiime2.org',
packages=find_packages(),
entry_points={
'qiime2.plugins': [
'dummy-plugin=qiime2.core.testing.plugin:dummy_plugin'
]
},
package_data={
'qiime2.metadata.tests': ['data/*/*'],
'qiime2.core.testing': ['citations.bib'],
'qiime2.sdk.tests': ['data/*'],
'qiime2': ['citations.bib']
},
zip_safe=False,
)
| thermokarst/qiime2 | setup.py | Python | bsd-3-clause | 958 |
#!/usr/bin/env python
import numpy as np
import os
# on Windows, we need the original PATH without Anaconda's compiler in it:
PATH = os.environ.get('PATH') + ';C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin'
from distutils.spawn import spawn, find_executable
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
import sys
# CUDA specific config
# nvcc is assumed to be in user's PATH
nvcc_compile_args = ['-O', '--ptxas-options=-v', '-arch=compute_35', '-code=sm_35,sm_52,sm_61', '-c', '--compiler-options=-fPIC']
nvcc_compile_args = os.environ.get('NVCCFLAGS', '').split() + nvcc_compile_args
cuda_libs = ['cublas']
nvcc_bin = 'nvcc.exe'
lib_dir = 'lib/x64'
import distutils.msvc9compiler
distutils.msvc9compiler.VERSION = 14.0
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
cudamat_ext = Extension('gpu_nms',
sources=[
'gpu_nms.cu'
],
language='c++',
libraries=cuda_libs,
extra_compile_args=nvcc_compile_args,
include_dirs = [numpy_include, 'C:\\Programming\\CUDA\\v8.0\\include'])
class CUDA_build_ext(build_ext):
"""
Custom build_ext command that compiles CUDA files.
Note that all extension source files will be processed with this compiler.
"""
def build_extensions(self):
self.compiler.src_extensions.append('.cu')
self.compiler.set_executable('compiler_so', 'nvcc')
self.compiler.set_executable('linker_so', 'nvcc --shared')
if hasattr(self.compiler, '_c_extensions'):
self.compiler._c_extensions.append('.cu') # needed for Windows
self.compiler.spawn = self.spawn
build_ext.build_extensions(self)
def spawn(self, cmd, search_path=1, verbose=0, dry_run=0):
"""
Perform any CUDA specific customizations before actually launching
compile/link etc. commands.
"""
if (sys.platform == 'darwin' and len(cmd) >= 2 and cmd[0] == 'nvcc' and
cmd[1] == '--shared' and cmd.count('-arch') > 0):
# Versions of distutils on OSX earlier than 2.7.9 inject
# '-arch x86_64' which we need to strip while using nvcc for
# linking
while True:
try:
index = cmd.index('-arch')
del cmd[index:index+2]
except ValueError:
break
elif self.compiler.compiler_type == 'msvc':
# There are several things we need to do to change the commands
# issued by MSVCCompiler into one that works with nvcc. In the end,
# it might have been easier to write our own CCompiler class for
# nvcc, as we're only interested in creating a shared library to
# load with ctypes, not in creating an importable Python extension.
# - First, we replace the cl.exe or link.exe call with an nvcc
# call. In case we're running Anaconda, we search cl.exe in the
# original search path we captured further above -- Anaconda
# inserts a MSVC version into PATH that is too old for nvcc.
cmd[:1] = ['nvcc', '--compiler-bindir',
os.path.dirname(find_executable("cl.exe", PATH))
or cmd[0]]
# - Secondly, we fix a bunch of command line arguments.
for idx, c in enumerate(cmd):
# create .dll instead of .pyd files
#if '.pyd' in c: cmd[idx] = c = c.replace('.pyd', '.dll') #20160601, by MrX
# replace /c by -c
if c == '/c': cmd[idx] = '-c'
# replace /DLL by --shared
elif c == '/DLL': cmd[idx] = '--shared'
# remove --compiler-options=-fPIC
elif '-fPIC' in c: del cmd[idx]
# replace /Tc... by ...
elif c.startswith('/Tc'): cmd[idx] = c[3:]
# replace /Fo... by -o ...
elif c.startswith('/Fo'): cmd[idx:idx+1] = ['-o', c[3:]]
# replace /LIBPATH:... by -L...
elif c.startswith('/LIBPATH:'): cmd[idx] = '-L' + c[9:]
# replace /OUT:... by -o ...
elif c.startswith('/OUT:'): cmd[idx:idx+1] = ['-o', c[5:]]
# remove /EXPORT:initlibcudamat or /EXPORT:initlibcudalearn
elif c.startswith('/EXPORT:'): del cmd[idx]
# replace cublas.lib by -lcublas
elif c == 'cublas.lib': cmd[idx] = '-lcublas'
# - Finally, we pass on all arguments starting with a '/' to the
# compiler or linker, and have nvcc handle all other arguments
if '--shared' in cmd:
pass_on = '--linker-options='
# we only need MSVCRT for a .dll, remove CMT if it sneaks in:
cmd.append('/NODEFAULTLIB:libcmt.lib')
else:
pass_on = '--compiler-options='
cmd = ([c for c in cmd if c[0] != '/'] +
[pass_on + ','.join(c for c in cmd if c[0] == '/')])
# For the future: Apart from the wrongly set PATH by Anaconda, it
# would suffice to run the following for compilation on Windows:
# nvcc -c -O -o <file>.obj <file>.cu
# And the following for linking:
# nvcc --shared -o <file>.dll <file1>.obj <file2>.obj -lcublas
# This could be done by a NVCCCompiler class for all platforms.
spawn(cmd, search_path, verbose, dry_run)
setup(name="py_fast_rcnn_gpu",
description="Performs linear algebra computation on the GPU via CUDA",
ext_modules=[cudamat_ext],
cmdclass={'build_ext': CUDA_build_ext},
)
| msracver/Deformable-ConvNets | lib/nms/setup_windows_cuda.py | Python | mit | 6,031 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DaskExecutor
.. seealso::
For more information on how the DaskExecutor works, take a look at the guide:
:ref:`executor:DaskExecutor`
"""
import subprocess
from typing import Any, Dict, Optional
from distributed import Client, Future, as_completed
from distributed.security import Security
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, BaseExecutor, CommandType
from airflow.models.taskinstance import TaskInstanceKey
# queue="default" is a special case since this is the base config default queue name,
# with respect to DaskExecutor, treat it as if no queue is provided
_UNDEFINED_QUEUES = {None, 'default'}
class DaskExecutor(BaseExecutor):
"""DaskExecutor submits tasks to a Dask Distributed cluster."""
def __init__(self, cluster_address=None):
super().__init__(parallelism=0)
if cluster_address is None:
cluster_address = conf.get('dask', 'cluster_address')
if not cluster_address:
raise ValueError('Please provide a Dask cluster address in airflow.cfg')
self.cluster_address = cluster_address
# ssl / tls parameters
self.tls_ca = conf.get('dask', 'tls_ca')
self.tls_key = conf.get('dask', 'tls_key')
self.tls_cert = conf.get('dask', 'tls_cert')
self.client: Optional[Client] = None
self.futures: Optional[Dict[Future, TaskInstanceKey]] = None
def start(self) -> None:
if self.tls_ca or self.tls_key or self.tls_cert:
security = Security(
tls_client_key=self.tls_key,
tls_client_cert=self.tls_cert,
tls_ca_file=self.tls_ca,
require_encryption=True,
)
else:
security = None
self.client = Client(self.cluster_address, security=security)
self.futures = {}
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
self.validate_command(command)
def airflow_run():
return subprocess.check_call(command, close_fds=True)
if not self.client:
raise AirflowException(NOT_STARTED_MESSAGE)
resources = None
if queue not in _UNDEFINED_QUEUES:
scheduler_info = self.client.scheduler_info()
avail_queues = {
resource for d in scheduler_info['workers'].values() for resource in d['resources']
}
if queue not in avail_queues:
raise AirflowException(f"Attempted to submit task to an unavailable queue: '{queue}'")
resources = {queue: 1}
future = self.client.submit(airflow_run, pure=False, resources=resources)
self.futures[future] = key # type: ignore
def _process_future(self, future: Future) -> None:
if not self.futures:
raise AirflowException(NOT_STARTED_MESSAGE)
if future.done():
key = self.futures[future]
if future.exception():
self.log.error("Failed to execute task: %s", repr(future.exception()))
self.fail(key)
elif future.cancelled():
self.log.error("Failed to execute task")
self.fail(key)
else:
self.success(key)
self.futures.pop(future)
def sync(self) -> None:
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
# make a copy so futures can be popped during iteration
for future in self.futures.copy():
self._process_future(future)
def end(self) -> None:
if not self.client:
raise AirflowException(NOT_STARTED_MESSAGE)
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
self.client.cancel(list(self.futures.keys()))
for future in as_completed(self.futures.copy()):
self._process_future(future)
def terminate(self):
if self.futures is None:
raise AirflowException(NOT_STARTED_MESSAGE)
self.client.cancel(self.futures.keys())
self.end()
| apache/incubator-airflow | airflow/executors/dask_executor.py | Python | apache-2.0 | 5,095 |
def median(lst):
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) %2 == 1:
return lst[((len(lst)+1)/2)-1]
else:
return float(sum(lst[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0
infile = open('jellyhash_bucket_histo')
i=0;
bucket_sizes = []
zeros = 0
for line in infile:
n = int(line.split(" ")[1])
if i%25000000 == 0:
print i, n
if n == 0:
zeros = zeros + 1
else:
bucket_sizes.append(n)
i += 1
infile.close()
print "length including 0s:", len(bucket_sizes) + zeros
print "number non-zeros:", len(bucket_sizes)
print "NOTE: for below stats, we don't count zeros"
print "min:", min(bucket_sizes)
print "max:", max(bucket_sizes)
print "mean:", sum(bucket_sizes) * 1.0 / len(bucket_sizes)
print "median:", median(bucket_sizes) | jgao/dreamchallenge | jellyhash_stats.py | Python | gpl-3.0 | 794 |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import rtree
import numpy as np
import netCDF4 as nc4
from django.conf import settings
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
from wms.utils import find_appropriate_time
from wms.models import VirtualLayer, Layer, Style
from wms import logger # noqa
def try_float(obj):
try:
return int(obj)
except ValueError:
return None
class NetCDFDataset(object):
@contextmanager
def dataset(self):
try:
# Dataset is already loaded
self._dataset.variables
yield self._dataset
except AttributeError:
try:
self._dataset = EnhancedDataset(self.path())
yield self._dataset
except (OSError, RuntimeError, FileNotFoundError):
try:
self._dataset = EnhancedMFDataset(self.path(), aggdim='time')
yield self._dataset
except (OSError, IndexError, RuntimeError, FileNotFoundError):
yield None
@contextmanager
def topology(self):
try:
self._topology.variables
yield self._topology
except AttributeError:
try:
self._topology = EnhancedDataset(self.topology_file)
yield self._topology
except RuntimeError:
yield None
def close(self):
try:
self._dataset.close()
except BaseException:
pass
try:
self._topology.close()
except BaseException:
pass
@property
def topology_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.nc'.format(self.safe_filename))
@property
def time_cache_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.npy'.format(self.safe_filename))
@property
def domain_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.domain'.format(self.safe_filename))
@property
def node_tree_root(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.nodes').format(self.safe_filename)
@property
def node_tree_data_file(self):
return '{}.dat'.format(self.node_tree_root)
@property
def node_tree_index_file(self):
return '{}.idx'.format(self.node_tree_root)
@property
def face_tree_root(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.faces').format(self.safe_filename)
@property
def face_tree_data_file(self):
return '{}.dat'.format(self.face_tree_root)
@property
def face_tree_index_file(self):
return '{}.idx'.format(self.face_tree_root)
def setup_getfeatureinfo(self, layer, request, location=None):
location = location or 'face'
tree = None
try:
latitude = request.GET['latitude']
longitude = request.GET['longitude']
# Find closest cell or node (only node for now)
if location == 'face':
tree = rtree.index.Index(self.face_tree_root)
elif location == 'node':
tree = rtree.index.Index(self.node_tree_root)
else:
raise NotImplementedError("No RTree for location '{}'".format(location))
try:
nindex = list(tree.nearest((longitude, latitude, longitude, latitude), 1, objects=True))[0]
except IndexError:
raise ValueError("No cells in the {} tree for point {}, {}".format(location, longitude, latitude))
closest_x, closest_y = tuple(nindex.bbox[2:])
geo_index = nindex.object
except BaseException:
raise
finally:
if tree is not None:
tree.close()
all_times = self.times(layer)
start_nc_index = np.searchsorted(all_times, request.GET['starting'], side='left')
start_nc_index = min(start_nc_index, len(all_times) - 1)
end_nc_index = np.searchsorted(all_times, request.GET['ending'], side='right')
end_nc_index = max(end_nc_index, 1) # Always pull the first index
return_dates = all_times[start_nc_index:end_nc_index]
return geo_index, closest_x, closest_y, start_nc_index, end_nc_index, return_dates
def __del__(self):
self.close()
def analyze_virtual_layers(self):
with self.dataset() as nc:
if nc is not None:
# Earth Projected Sea Water Velocity
u_names = ['eastward_sea_water_velocity', 'eastward_sea_water_velocity_assuming_no_tide']
v_names = ['northward_sea_water_velocity', 'northward_sea_water_velocity_assuming_no_tide']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'sea_water_velocity', 'vectors', self.id)
# Grid projected Sea Water Velocity
u_names = ['x_sea_water_velocity', 'grid_eastward_sea_water_velocity']
v_names = ['y_sea_water_velocity', 'grid_northward_sea_water_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'grid_sea_water_velocity', 'vectors', self.id)
# Earth projected Winds
u_names = ['eastward_wind']
v_names = ['northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
# Hopefully we support barbs eventually
VirtualLayer.make_vector_layer(us, vs, 'winds', 'barbs', self.id)
# Grid projected Winds
u_names = ['x_wind', 'grid_eastward_wind']
v_names = ['y_wind', 'grid_northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
# Hopefully we support barbs eventually
VirtualLayer.make_vector_layer(us, vs, 'grid_winds', 'barbs', self.id)
# Earth projected Ice velocity
u_names = ['eastward_sea_ice_velocity']
v_names = ['northward_sea_ice_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'sea_ice_velocity', 'vectors', self.id)
def update_layers(self):
with self.dataset() as nc:
if nc is not None:
for v in nc.variables:
l, _ = Layer.objects.get_or_create(dataset_id=self.id, var_name=v)
nc_var = nc.variables[v]
# *_min and *_max attributes take presendence over the *_range attributes
# scale_* attributes take presedence over valid_* attributes
# *_range
if hasattr(nc_var, 'scale_range'):
l.default_min = try_float(nc_var.scale_range[0])
l.default_max = try_float(nc_var.scale_range[-1])
elif hasattr(nc_var, 'valid_range'):
l.default_min = try_float(nc_var.valid_range[0])
l.default_max = try_float(nc_var.valid_range[-1])
# *_min
if hasattr(nc_var, 'scale_min'):
l.default_min = try_float(nc_var.scale_min)
elif hasattr(nc_var, 'valid_min'):
l.default_min = try_float(nc_var.valid_min)
# *_max
if hasattr(nc_var, 'scale_max'):
l.default_max = try_float(nc_var.scale_max)
elif hasattr(nc_var, 'valid_max'):
l.default_max = try_float(nc_var.valid_max)
# type
if hasattr(nc_var, 'scale_type'):
if nc_var.scale_type in ['logarithmic', 'log']:
l.logscale = True
elif nc_var.scale_type in ['linear']:
l.logscale = False
if hasattr(nc_var, 'standard_name'):
std_name = nc_var.standard_name
l.std_name = std_name
if len(nc_var.dimensions) > 1:
l.active = True
if hasattr(nc_var, 'long_name'):
l.description = nc_var.long_name
if hasattr(nc_var, 'units'):
l.units = nc_var.units
# Set some standard styles
l.styles.add(*Style.defaults())
l.save()
self.analyze_virtual_layers()
def nearest_time(self, layer, time):
"""
Return the time index and time value that is closest
"""
with self.dataset() as nc:
time_vars = nc.get_variables_by_attributes(standard_name='time')
if not time_vars:
return None, None
if len(time_vars) == 1:
time_var = time_vars[0]
else:
# if there is more than variable with standard_name = time
# fine the appropriate one to use with the layer
var_obj = nc.variables[layer.access_name]
time_var_name = find_appropriate_time(var_obj, time_vars)
time_var = nc.variables[time_var_name]
units = time_var.units
if hasattr(time_var, 'calendar'):
calendar = time_var.calendar
else:
calendar = 'gregorian'
num_date = round(nc4.date2num(time, units=units, calendar=calendar))
times = time_var[:]
time_index = np.searchsorted(times, num_date, side='left')
time_index = min(time_index, len(times) - 1) # Don't do over the length of time
return time_index, times[time_index]
| sci-wms/sci-wms | wms/models/datasets/netcdf.py | Python | gpl-3.0 | 10,531 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext as gettext_module
import os
import shutil
import stat
import unittest
from subprocess import Popen
from django.core.management import (
CommandError, call_command, execute_from_command_line,
)
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six, translation
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import ugettext
has_msgfmt = find_command('msgfmt')
@unittest.skipUnless(has_msgfmt, 'msgfmt is mandatory for compilation tests')
class MessageCompilationTests(SimpleTestCase):
test_dir = os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), 'commands'))
def setUp(self):
self._cwd = os.getcwd()
self.addCleanup(os.chdir, self._cwd)
os.chdir(self.test_dir)
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
class PoFileTests(MessageCompilationTests):
LOCALE = 'es_AR'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
with self.assertRaises(CommandError) as cm:
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertIn("file has a BOM (Byte Order Mark)", cm.exception.args[0])
self.assertFalse(os.path.exists(self.MO_FILE))
def test_no_write_access(self):
mo_file_en = 'locale/en/LC_MESSAGES/django.mo'
err_buffer = StringIO()
# put file in read-only mode
old_mode = os.stat(mo_file_en).st_mode
os.chmod(mo_file_en, stat.S_IREAD)
try:
call_command('compilemessages', locale=['en'], stderr=err_buffer, verbosity=0)
err = err_buffer.getvalue()
self.assertIn("not writable location", force_text(err))
finally:
os.chmod(mo_file_en, old_mode)
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE = 'fr'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PoFileContentsTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(self.test_dir, self.MO_FILE))
def test_percent_symbol_in_po_file(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE))
class MultipleLocaleCompilationTests(MessageCompilationTests):
MO_FILE_HR = None
MO_FILE_FR = None
def setUp(self):
super(MultipleLocaleCompilationTests, self).setUp()
localedir = os.path.join(self.test_dir, 'locale')
self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')
self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_HR))
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_FR))
def test_one_locale(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
def test_multiple_locales(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
self.assertTrue(os.path.exists(self.MO_FILE_FR))
class ExcludedLocaleCompilationTests(MessageCompilationTests):
test_dir = os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), 'exclude'))
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo'
def setUp(self):
super(ExcludedLocaleCompilationTests, self).setUp()
shutil.copytree('canned_locale', 'locale')
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'compilemessages'])
def test_one_locale_excluded(self):
call_command('compilemessages', exclude=['it'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertTrue(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded(self):
call_command('compilemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_one_locale_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
class CompilationErrorHandling(MessageCompilationTests):
def test_error_reported_by_msgfmt(self):
# po file contains wrong po formatting.
mo_file = 'locale/ja/LC_MESSAGES/django.mo'
self.addCleanup(self.rmfile, os.path.join(self.test_dir, mo_file))
with self.assertRaises(CommandError):
call_command('compilemessages', locale=['ja'], verbosity=0)
def test_msgfmt_error_including_non_ascii(self):
# po file contains invalid msgstr content (triggers non-ascii error content).
mo_file = 'locale/ko/LC_MESSAGES/django.mo'
self.addCleanup(self.rmfile, os.path.join(self.test_dir, mo_file))
# Make sure the output of msgfmt is unaffected by the current locale.
env = os.environ.copy()
env.update({'LANG': 'C'})
with mock.patch('django.core.management.utils.Popen', lambda *args, **kwargs: Popen(*args, env=env, **kwargs)):
if six.PY2:
# Various assertRaises on PY2 don't support unicode error messages.
try:
call_command('compilemessages', locale=['ko'], verbosity=0)
except CommandError as err:
self.assertIn("'�' cannot start a field name", six.text_type(err))
else:
cmd = MakeMessagesCommand()
if cmd.gettext_version < (0, 18, 3):
raise unittest.SkipTest("python-brace-format is a recent gettext addition.")
with self.assertRaisesMessage(CommandError, "'�' cannot start a field name"):
call_command('compilemessages', locale=['ko'], verbosity=0)
class ProjectAndAppTests(MessageCompilationTests):
LOCALE = 'ru'
PROJECT_MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
APP_MO_FILE = 'app_with_locale/locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(ProjectAndAppTests, self).setUp()
self.addCleanup(self.rmfile, os.path.join(self.test_dir, self.PROJECT_MO_FILE))
self.addCleanup(self.rmfile, os.path.join(self.test_dir, self.APP_MO_FILE))
class FuzzyTranslationTest(ProjectAndAppTests):
def setUp(self):
super(FuzzyTranslationTest, self).setUp()
gettext_module._translations = {} # flush cache or test will be useless
def test_nofuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Vodka'))
def test_fuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], fuzzy=True, stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Водка'))
class AppCompilationTest(ProjectAndAppTests):
def test_app_locale_compiled(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.PROJECT_MO_FILE))
self.assertTrue(os.path.exists(self.APP_MO_FILE))
| filias/django | tests/i18n/test_compilation.py | Python | bsd-3-clause | 9,555 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import copy
import random
from oslo_log import log
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
LOG = log.getLogger(__name__)
OVN_SCHEDULER_CHANCE = 'chance'
OVN_SCHEDULER_LEAST_LOADED = 'leastloaded'
class OVNGatewayScheduler(object, metaclass=abc.ABCMeta):
def __init__(self):
pass
@abc.abstractmethod
def select(self, nb_idl, sb_idl, gateway_name, candidates=None):
"""Schedule the gateway port of a router to an OVN chassis.
Schedule the gateway router port only if it is not already
scheduled.
"""
def filter_existing_chassis(self, nb_idl, gw_chassis,
physnet, chassis_physnets,
existing_chassis):
chassis_list = copy.copy(existing_chassis)
for chassis_name in existing_chassis:
if utils.is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
LOG.debug("Chassis %(chassis)s is invalid for scheduling "
"router in physnet: %(physnet)s.",
{'chassis': chassis_name,
'physnet': physnet})
chassis_list.remove(chassis_name)
return chassis_list
def _schedule_gateway(self, nb_idl, sb_idl, gateway_name, candidates,
existing_chassis):
existing_chassis = existing_chassis or []
candidates = candidates or self._get_chassis_candidates(sb_idl)
candidates = list(set(candidates) - set(existing_chassis))
# If no candidates, or gateway scheduled on MAX_GATEWAY_CHASSIS nodes
# or all candidates in existing_chassis, return existing_chassis.
# Otherwise, if more candidates present, then schedule them.
if existing_chassis:
if not candidates or (
len(existing_chassis) == ovn_const.MAX_GW_CHASSIS):
return existing_chassis
if not candidates:
return [ovn_const.OVN_GATEWAY_INVALID_CHASSIS]
chassis_count = ovn_const.MAX_GW_CHASSIS - len(existing_chassis)
# The actual binding of the gateway to a chassis via the options
# column or gateway_chassis column in the OVN_Northbound is done
# by the caller
chassis = self._select_gateway_chassis(
nb_idl, candidates)[:chassis_count]
# priority of existing chassis is higher than candidates
chassis = existing_chassis + chassis
LOG.debug("Gateway %s scheduled on chassis %s",
gateway_name, chassis)
return chassis
@abc.abstractmethod
def _select_gateway_chassis(self, nb_idl, candidates):
"""Choose a chassis from candidates based on a specific policy."""
def _get_chassis_candidates(self, sb_idl):
# TODO(azbiswas): Allow selection of a specific type of chassis when
# the upstream code merges.
# return (sb_idl.get_all_chassis('gateway_router') or
# sb_idl.get_all_chassis())
return sb_idl.get_all_chassis()
class OVNGatewayChanceScheduler(OVNGatewayScheduler):
"""Randomly select an chassis for a gateway port of a router"""
def select(self, nb_idl, sb_idl, gateway_name, candidates=None,
existing_chassis=None):
return self._schedule_gateway(nb_idl, sb_idl, gateway_name,
candidates, existing_chassis)
def _select_gateway_chassis(self, nb_idl, candidates):
candidates = copy.deepcopy(candidates)
random.shuffle(candidates)
return candidates
class OVNGatewayLeastLoadedScheduler(OVNGatewayScheduler):
"""Select the least loaded chassis for a gateway port of a router"""
def select(self, nb_idl, sb_idl, gateway_name, candidates=None,
existing_chassis=None):
return self._schedule_gateway(nb_idl, sb_idl, gateway_name,
candidates, existing_chassis)
@staticmethod
def _get_chassis_load_by_prios(chassis_info):
"""Retrieve the amount of ports by priorities hosted in the chassis.
@param chassis_info: list of (port, prio) hosted by this chassis
@type chassis_info: []
@return: A list of (prio, number_of_ports) tuples.
"""
chassis_load = {}
for lrp, prio in chassis_info:
chassis_load[prio] = chassis_load.get(prio, 0) + 1
return chassis_load.items()
@staticmethod
def _get_chassis_load(chassis):
chassis_ports_prios = chassis[1]
return sorted(
OVNGatewayLeastLoadedScheduler._get_chassis_load_by_prios(
chassis_ports_prios), reverse=True)
def _select_gateway_chassis(self, nb_idl, candidates):
chassis_bindings = nb_idl.get_all_chassis_gateway_bindings(candidates)
return [chassis for chassis, load in sorted(chassis_bindings.items(),
key=OVNGatewayLeastLoadedScheduler._get_chassis_load)]
OVN_SCHEDULER_STR_TO_CLASS = {
OVN_SCHEDULER_CHANCE: OVNGatewayChanceScheduler,
OVN_SCHEDULER_LEAST_LOADED: OVNGatewayLeastLoadedScheduler}
def get_scheduler():
return OVN_SCHEDULER_STR_TO_CLASS[ovn_conf.get_ovn_l3_scheduler()]()
| openstack/neutron | neutron/scheduler/l3_ovn_scheduler.py | Python | apache-2.0 | 5,991 |
# Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 12 - Training Artificial Neural Networks for Image Recognition
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import os
import struct
import numpy as np
from scipy.special import expit
import sys
import matplotlib.pyplot as plt
#############################################################################
print(50 * '=')
print('Obtaining the MNIST dataset')
print(50 * '-')
s = """
The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/
and consists of the following four parts:
- Training set images: train-images-idx3-ubyte.gz
(9.9 MB, 47 MB unzipped, 60,000 samples)
- Training set labels: train-labels-idx1-ubyte.gz
(29 KB, 60 KB unzipped, 60,000 labels)
- Test set images: t10k-images-idx3-ubyte.gz
(1.6 MB, 7.8 MB, 10,000 samples)
- Test set labels: t10k-labels-idx1-ubyte.gz
(5 KB, 10 KB unzipped, 10,000 labels)
In this section, we will only be working with a subset of MNIST, thus,
we only need to download the training set images and training set labels.
After downloading the files, I recommend unzipping the files using
the Unix/Linux gzip tool from
the terminal for efficiency, e.g., using the command
gzip *ubyte.gz -d
in your local MNIST download directory, or, using your
favorite unzipping tool if you are working with a machine
running on Microsoft Windows. The images are stored in byte form,
and using the following function, we will read them into NumPy arrays
that we will use to train our MLP.
"""
print(s)
_ = input("Please hit enter to continue.")
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Training rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Test rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_all.png', dpi=300)
plt.show()
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = X_train[y_train == 7][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_7.png', dpi=300)
plt.show()
"""
Uncomment the following lines to optionally save the data in CSV format.
However, note that those CSV files will take up a
substantial amount of storage space:
- train_img.csv 1.1 GB (gigabytes)
- train_labels.csv 1.4 MB (megabytes)
- test_img.csv 187.0 MB
- test_labels 144 KB (kilobytes)
"""
# np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')
# np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')
# X_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')
# y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')
# np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')
# np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')
# X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')
# y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')
#############################################################################
print(50 * '=')
print('Implementing a multi-layer perceptron')
print(50 * '-')
class NeuralNetMLP(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[:, idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X_data[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn = NeuralNetMLP(n_output=10,
n_features=X_train.shape[1],
n_hidden=50,
l2=0.1,
l1=0.0,
epochs=1000,
eta=0.001,
alpha=0.001,
decrease_const=0.00001,
minibatches=50,
shuffle=True,
random_state=1)
nn.fit(X_train, y_train, print_progress=True)
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
# plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
batches = np.array_split(range(len(nn.cost_)), 1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)), cost_avgs, color='red')
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
# plt.tight_layout()
# plt.savefig('./figures/cost2.png', dpi=300)
plt.show()
y_train_pred = nn.predict(X_train)
if sys.version_info < (3, 0):
acc = ((np.sum(y_train == y_train_pred, axis=0)).astype('float') /
X_train.shape[0])
else:
acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
y_test_pred = nn.predict(X_test)
if sys.version_info < (3, 0):
acc = ((np.sum(y_test == y_test_pred, axis=0)).astype('float') /
X_test.shape[0])
else:
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (acc * 100))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab = y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
# plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Debugging neural networks with gradient checking')
print(50 * '-')
class MLPGradientCheck(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0,
size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0,
size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) +
np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() +
np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
Parameters
----------
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):
""" Apply gradient checking (for debugging only)
Returns
---------
relative_error : float
Relative error between the numerically
approximated gradients and the backpropagated gradients.
"""
num_grad1 = np.zeros(np.shape(w1))
epsilon_ary1 = np.zeros(np.shape(w1))
for i in range(w1.shape[0]):
for j in range(w1.shape[1]):
epsilon_ary1[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 - epsilon_ary1, w2)
cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)
a1, z2, a2, z3, a3 = self._feedforward(X,
w1 + epsilon_ary1, w2)
cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)
num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary1[i, j] = 0
num_grad2 = np.zeros(np.shape(w2))
epsilon_ary2 = np.zeros(np.shape(w2))
for i in range(w2.shape[0]):
for j in range(w2.shape[1]):
epsilon_ary2[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 - epsilon_ary2)
cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1,
w2 + epsilon_ary2)
cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)
num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary2[i, j] = 0
num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))
grad = np.hstack((grad1.flatten(), grad2.flatten()))
norm1 = np.linalg.norm(num_grad - grad)
norm2 = np.linalg.norm(num_grad)
norm3 = np.linalg.norm(grad)
relative_error = norm1 / (norm2 + norm3)
return relative_error
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx],
self.w1,
self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
# start gradient checking
grad_diff = self._gradient_checking(X=X_data[idx],
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2,
epsilon=1e-5,
grad1=grad1,
grad2=grad2)
if grad_diff <= 1e-7:
print('Ok: %s' % grad_diff)
elif grad_diff <= 1e-4:
print('Warning: %s' % grad_diff)
else:
print('PROBLEM: %s' % grad_diff)
# update weights; [alpha * delta_w_prev] for momentum learning
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn_check = MLPGradientCheck(n_output=10,
n_features=X_train.shape[1],
n_hidden=10,
l2=0.0,
l1=0.0,
epochs=10,
eta=0.001,
alpha=0.0,
decrease_const=0.0,
minibatches=1,
shuffle=False,
random_state=1)
nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
| 1iyiwei/pyml | code/optional-py-scripts/ch12.py | Python | mit | 33,098 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pep8
import textwrap
from senlin.hacking import checks
from senlin.tests.unit.common import base
class HackingTestCase(base.SenlinTestCase):
def test_assert_equal_none(self):
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))))
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))))
self.assertEqual(0, len(list(checks.assert_equal_none(
"self.assertIsNone()"))))
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("S319: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(__get_msg(method), list(checks.use_jsonutils(
"json.%s(" % method, "./senlin/engine/cluster.py")))
self.assertEqual(0, len(list(checks.use_jsonutils(
"jsonx.%s(" % method, "./senlin/engine/cluster.py"))))
self.assertEqual(0, len(list(checks.use_jsonutils(
"json.dumb", "./senlin/engine/cluster.py"))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def create_cluster(mapping={}, **params)"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
@mock.patch("pep8._checks",
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def test_api_version_decorator(self):
code = """
@some_other_decorator
@wsgi.api_version("2.2")
def my_method():
pass
"""
lines = textwrap.dedent(code).strip().splitlines(True)
pep8.register_check(checks.check_api_version_decorator)
checker = pep8.Checker(filename=None, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
actual_error = checker.report._deferred_print[0]
self.assertEqual(2, actual_error[0])
self.assertEqual(0, actual_error[1])
self.assertEqual('S321', actual_error[2])
self.assertEqual(' The api_version decorator must be the first '
'decorator on a method.',
actual_error[3])
@mock.patch("pep8._checks",
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def test_api_version_decorator_good(self):
code = """
class SomeController():
@wsgi.api_version("2.2")
def my_method():
pass
"""
lines = textwrap.dedent(code).strip().splitlines(True)
pep8.register_check(checks.check_api_version_decorator)
checker = pep8.Checker(filename=None, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
actual_error = checker.report._deferred_print
self.assertEqual(0, len(actual_error))
| tengqm/senlin-container | senlin/tests/unit/test_hacking.py | Python | apache-2.0 | 3,738 |
import time
import unittest
# this import must be done *BEFORE* Gtk/Glib/etc *AND* pytestshot !
from . import paperwork
import pyinsane2
import pytestshot
import gi
gi.require_version('Pango', '1.0')
gi.require_version('PangoCairo', '1.0')
gi.require_version('Poppler', '0.18')
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib
from paperwork.frontend.util.config import load_config
class TestSettings(unittest.TestCase):
def setUp(self):
# to speed up the scanner search during tests, we do one right now
pyinsane2.get_devices()
self.pw = paperwork.PaperworkInstance()
def test_open(self):
self.pw.start()
try:
action = self.pw.main_window.actions['open_settings'][1]
GLib.idle_add(action.do)
time.sleep(3)
try:
self.pw.wait()
self.assertNotEqual(action.dialog, None)
sc = pytestshot.screenshot(action.dialog.window.get_window())
finally:
if action.dialog:
GLib.idle_add(action.dialog.window.destroy)
finally:
self.pw.stop()
pytestshot.assertScreenshot(self, "test_settings_open", sc)
def test_uncheck_ocr(self):
self.pw.start()
try:
action = self.pw.main_window.actions['open_settings'][1]
GLib.idle_add(action.do)
time.sleep(3)
try:
self.pw.wait()
self.assertNotEqual(action.dialog, None)
sc = pytestshot.screenshot(action.dialog.window.get_window())
pytestshot.assertScreenshot(self, "test_settings_open", sc)
widget = action.dialog.ocr_settings['enabled']['gui']
GLib.idle_add(widget.set_active, False)
self.pw.wait()
sc = pytestshot.screenshot(action.dialog.window.get_window())
pytestshot.assertScreenshot(
self, "test_settings_uncheck_ocr", sc
)
GLib.idle_add(
action.dialog.window.emit, 'delete-event', None
)
self.pw.wait()
finally:
if action.dialog:
GLib.idle_add(action.dialog.window.destroy)
self.pw.wait()
finally:
self.pw.stop()
config = load_config()
config.read()
self.assertFalse(config['ocr_enabled'].value)
| jflesch/paperwork-tests | tests/tests_settings.py | Python | gpl-3.0 | 2,532 |
#
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import uuid
import os
import re
import paasmaker
import tornado
import tornado.testing
import colander
class BaseSCMConfigurationSchema(colander.MappingSchema):
# No options defined.
pass
class BaseSCMParametersSchema(colander.MappingSchema):
# Must have a location parameter.
location = colander.SchemaNode(colander.String(),
title="Location of source",
description="The location to fetch the source code from - typically a URL of some kind.")
# TODO: Document that SCM plugins should emit the following into the
# context tree, and it will be stored along with the other SCM params:
# scm['revision']: the revision used.
# scm['tool_version']: the version of the tool used (eg, version of GIT)
# This dict is merged with the SCM params supplied by the user, so you
# can use this cleverly to store only a few extra details.
class BaseSCM(paasmaker.util.plugin.Plugin):
"""
SCM plugins are responsible for fetching raw application source code,
ready for it to be prepared and then packed for storage. It should end
up with a new copy that can be modified.
SCMs should cache checkouts where possible to speed up lookups. For
example, the git SCM stores a persistent checkout, and just pulls
new changes each time.
"""
# These are defaults - you should set your own.
MODES = {
paasmaker.util.plugin.MODE.SCM_EXPORT: BaseSCMParametersSchema(),
paasmaker.util.plugin.MODE.SCM_FORM: None
}
OPTIONS_SCHEMA = BaseSCMConfigurationSchema()
def _get_this_scm_path(self, postfix):
scratch_path = self.configuration.get_flat('scratch_directory')
path = os.path.join(scratch_path, 'scm', self.__class__.__name__, postfix)
if not os.path.exists(path):
os.makedirs(path, 0750)
return path
def _get_temporary_scm_dir(self):
"""
Get a temporary directory to unpack the source into.
"""
random = str(uuid.uuid4())
return self._get_this_scm_path(random)
def _get_persistent_scm_dir(self):
"""
Get a persistent directory to unpack the source into.
This is designed for SCMs that can update their code,
so can be persistent between SCM runs.
"""
# TODO: Consider how to lock this!
name = self.raw_parameters['location']
name = re.sub(r'[^.A-Za-z]', '_', name)
name = name.replace("__", "_")
name = name.replace("__", "_")
return self._get_this_scm_path(name)
def _get_persistent_output_dir(self):
"""
Get a persistent directory to output the result of repo
into, that can be used by prepare commands to get ready.
"""
# TODO: Consider how to lock this!
name = self.raw_parameters['location']
name = re.sub(r'[^.A-Za-z]', '_', name)
name = name.replace("__", "_")
name = name.replace("__", "_")
return self._get_this_scm_path(name + '_output')
def create_working_copy(self, callback, error_callback):
"""
From your input parameters, create a working copy that Paasmaker can
write to and mutate. If possible, cache whatever you can and just
make a copy of it for Paasmaker. Call the callback with
the new directory, and an optional dict of output parameters.
"""
raise NotImplementedError("You must implement create_working_copy().")
def create_summary(self):
"""
Return a dict, with the keys being required or optional parameters,
and the values being a short description of what should be returned
for that value.
"""
raise NotImplementedError("You must implement create_summary().")
def abort(self):
"""
Helper function called by the code that invoked this SCM, indicating
that it should abort it's processing and clean up, if it can.
Subclasses should override ``_abort()`` instead of this function.
"""
self.aborted = True
self._abort()
def _abort(self):
# By default... do nothing.
pass
def _is_aborted(self):
if self.hasattr(self, 'aborted'):
return self.aborted
else:
return False
class BaseSCMTest(tornado.testing.AsyncTestCase):
def setUp(self):
super(BaseSCMTest, self).setUp()
self.configuration = paasmaker.common.configuration.ConfigurationStub(0, ['pacemaker'], io_loop=self.io_loop)
self.registry = self.configuration.plugins
self.path = None
self.params = {}
self.success = None
self.message = None
def tearDown(self):
self.configuration.cleanup(self.stop, self.stop)
self.wait()
super(BaseSCMTest, self).tearDown()
def success_callback(self, path, message, params={}):
self.success = True
self.message = message
self.path = path
self.params = params
self.stop()
def failure_callback(self, message):
self.success = False
self.message = message
self.path = None
self.stop() | kaze/paasmaker | paasmaker/pacemaker/scm/base.py | Python | mpl-2.0 | 4,827 |
##
# .api - ABCs for database interface elements
##
"""
Application Programmer Interfaces for PostgreSQL.
``postgresql.api`` is a collection of Python APIs for the PostgreSQL DBMS. It
is designed to take full advantage of PostgreSQL's features to provide the
Python programmer with substantial convenience.
This module is used to define "PG-API". It creates a set of ABCs
that makes up the basic interfaces used to work with a PostgreSQL server.
"""
import collections
import abc
from .python.element import Element
__all__ = [
'Message',
'Statement',
'Chunks',
'Cursor',
'Connector',
'Category',
'Database',
'TypeIO',
'Connection',
'Transaction',
'Settings',
'StoredProcedure',
'Driver',
'Installation',
'Cluster',
]
class Message(Element):
"""
A message emitted by PostgreSQL.
A message being a NOTICE, WARNING, INFO, etc.
"""
_e_label = 'MESSAGE'
severities = (
'DEBUG',
'INFO',
'NOTICE',
'WARNING',
'ERROR',
'FATAL',
'PANIC',
)
sources = (
'SERVER',
'CLIENT',
)
@property
@abc.abstractmethod
def source(self) -> str:
"""
Where the message originated from. Normally, 'SERVER', but sometimes
'CLIENT'.
"""
@property
@abc.abstractmethod
def code(self) -> str:
"""
The SQL state code of the message.
"""
@property
@abc.abstractmethod
def message(self) -> str:
"""
The primary message string.
"""
@property
@abc.abstractmethod
def details(self) -> dict:
"""
The additional details given with the message. Common keys *should* be the
following:
* 'severity'
* 'context'
* 'detail'
* 'hint'
* 'file'
* 'line'
* 'function'
* 'position'
* 'internal_position'
* 'internal_query'
"""
@abc.abstractmethod
def isconsistent(self, other) -> bool:
"""
Whether the fields of the `other` Message object is consistent with the
fields of `self`.
This *must* return the result of the comparison of code, source, message,
and details.
This method is provided as the alternative to overriding equality;
often, pointer equality is the desirable means for comparison, but
equality of the fields is also necessary.
"""
class Result(Element):
"""
A result is an object managing the results of a prepared statement.
These objects represent a binding of parameters to a given statement object.
For results that were constructed on the server and a reference passed back
to the client, statement and parameters may be None.
"""
_e_label = 'RESULT'
_e_factors = ('statement', 'parameters', 'cursor_id')
@abc.abstractmethod
def close(self) -> None:
"""
Close the Result handle.
"""
@property
@abc.abstractmethod
def cursor_id(self) -> str:
"""
The cursor's identifier.
"""
@property
@abc.abstractmethod
def sql_column_types(self) -> [str]:
"""
The type of the columns produced by the cursor.
A sequence of `str` objects stating the SQL type name::
['INTEGER', 'CHARACTER VARYING', 'INTERVAL']
"""
@property
@abc.abstractmethod
def pg_column_types(self) -> [int]:
"""
The type Oids of the columns produced by the cursor.
A sequence of `int` objects stating the SQL type name::
[27, 28]
"""
@property
@abc.abstractmethod
def column_names(self) -> [str]:
"""
The attribute names of the columns produced by the cursor.
A sequence of `str` objects stating the column name::
['column1', 'column2', 'emp_name']
"""
@property
@abc.abstractmethod
def column_types(self) -> [str]:
"""
The Python types of the columns produced by the cursor.
A sequence of type objects::
[<class 'int'>, <class 'str'>]
"""
@property
@abc.abstractmethod
def parameters(self) -> (tuple, None):
"""
The parameters bound to the cursor. `None`, if unknown and an empty tuple
`()`, if no parameters were given.
These should be the *original* parameters given to the invoked statement.
This should only be `None` when the cursor is created from an identifier,
`postgresql.api.Database.cursor_from_id`.
"""
@property
@abc.abstractmethod
def statement(self) -> ("Statement", None):
"""
The query object used to create the cursor. `None`, if unknown.
This should only be `None` when the cursor is created from an identifier,
`postgresql.api.Database.cursor_from_id`.
"""
class Chunks(
Result,
collections.Iterator,
collections.Iterable,
):
pass
class Cursor(
Result,
collections.Iterator,
collections.Iterable,
):
"""
A `Cursor` object is an interface to a sequence of tuples(rows). A result
set. Cursors publish a file-like interface for reading tuples from a cursor
declared on the database.
`Cursor` objects are created by invoking the `Statement.declare`
method or by opening a cursor using an identifier via the
`Database.cursor_from_id` method.
"""
_e_label = 'CURSOR'
_seek_whence_map = {
0 : 'ABSOLUTE',
1 : 'RELATIVE',
2 : 'FROM_END',
3 : 'FORWARD',
4 : 'BACKWARD'
}
_direction_map = {
True : 'FORWARD',
False : 'BACKWARD',
}
@abc.abstractmethod
def clone(self) -> "Cursor":
"""
Create a new cursor using the same factors as `self`.
"""
def __iter__(self):
return self
@property
@abc.abstractmethod
def direction(self) -> bool:
"""
The default `direction` argument for read().
When `True` reads are FORWARD.
When `False` reads are BACKWARD.
Cursor operation option.
"""
@abc.abstractmethod
def read(self,
quantity : "Number of rows to read" = None,
direction : "Direction to fetch in, defaults to `self.direction`" = None,
) -> ["Row"]:
"""
Read, fetch, the specified number of rows and return them in a list.
If quantity is `None`, all records will be fetched.
`direction` can be used to override the default configured direction.
This alters the cursor's position.
Read does not directly correlate to FETCH. If zero is given as the
quantity, an empty sequence *must* be returned.
"""
@abc.abstractmethod
def __next__(self) -> "Row":
"""
Get the next tuple in the cursor.
Advances the cursor position by one.
"""
@abc.abstractmethod
def seek(self, offset, whence = 'ABSOLUTE'):
"""
Set the cursor's position to the given offset with respect to the
whence parameter and the configured direction.
Whence values:
``0`` or ``"ABSOLUTE"``
Absolute.
``1`` or ``"RELATIVE"``
Relative.
``2`` or ``"FROM_END"``
Absolute from end.
``3`` or ``"FORWARD"``
Relative forward.
``4`` or ``"BACKWARD"``
Relative backward.
Direction effects whence. If direction is BACKWARD, ABSOLUTE positioning
will effectively be FROM_END, RELATIVE's position will be negated, and
FROM_END will effectively be ABSOLUTE.
"""
class Execution(metaclass = abc.ABCMeta):
"""
The abstract class of execution methods.
"""
@abc.abstractmethod
def __call__(self, *parameters : "Positional Parameters") -> ["Row"]:
"""
Execute the prepared statement with the given arguments as parameters.
Usage:
>>> p=db.prepare("SELECT column FROM ttable WHERE key = $1")
>>> p('identifier')
[...]
"""
@abc.abstractmethod
def column(self, *parameters) -> collections.Iterable:
"""
Return an iterator producing the values of first column of the
rows produced by the cursor created from the statement bound with the
given parameters.
Column iterators are never scrollable.
Supporting cursors will be WITH HOLD when outside of a transaction to
allow cross-transaction access.
`column` is designed for the situations involving large data sets.
Each iteration returns a single value.
column expressed in sibling terms::
return map(operator.itemgetter(0), self.rows(*parameters))
"""
@abc.abstractmethod
def chunks(self, *parameters) -> collections.Iterable:
"""
Return an iterator producing sequences of rows produced by the cursor
created from the statement bound with the given parameters.
Chunking iterators are *never* scrollable.
Supporting cursors will be WITH HOLD when outside of a transaction.
`chunks` is designed for moving large data sets efficiently.
Each iteration returns sequences of rows *normally* of length(seq) ==
chunksize. If chunksize is unspecified, a default, positive integer will
be filled in. The rows contained in the sequences are only required to
support the basic `collections.Sequence` interfaces; simple and quick
sequence types should be used.
"""
@abc.abstractmethod
def rows(self, *parameters) -> collections.Iterable:
"""
Return an iterator producing rows produced by the cursor
created from the statement bound with the given parameters.
Row iterators are never scrollable.
Supporting cursors will be WITH HOLD when outside of a transaction to
allow cross-transaction access.
`rows` is designed for the situations involving large data sets.
Each iteration returns a single row. Arguably, best implemented::
return itertools.chain.from_iterable(self.chunks(*parameters))
"""
@abc.abstractmethod
def column(self, *parameters) -> collections.Iterable:
"""
Return an iterator producing the values of the first column in
the cursor created from the statement bound with the given parameters.
Column iterators are never scrollable.
Supporting cursors will be WITH HOLD when outside of a transaction to
allow cross-transaction access.
`column` is designed for the situations involving large data sets.
Each iteration returns a single value. `column` is equivalent to::
return map(operator.itemgetter(0), self.rows(*parameters))
"""
@abc.abstractmethod
def declare(self, *parameters) -> Cursor:
"""
Return a scrollable cursor with hold using the statement bound with the
given parameters.
"""
@abc.abstractmethod
def first(self, *parameters) -> "'First' object that is returned by the query":
"""
Execute the prepared statement with the given arguments as parameters.
If the statement returns rows with multiple columns, return the first
row. If the statement returns rows with a single column, return the
first column in the first row. If the query does not return rows at all,
return the count or `None` if no count exists in the completion message.
Usage:
>>> db.prepare("SELECT * FROM ttable WHERE key = $1").first("somekey")
('somekey', 'somevalue')
>>> db.prepare("SELECT 'foo'").first()
'foo'
>>> db.prepare("INSERT INTO atable (col) VALUES (1)").first()
1
"""
@abc.abstractmethod
def load_rows(self,
iterable : "A iterable of tuples to execute the statement with"
):
"""
Given an iterable, `iterable`, feed the produced parameters to the
query. This is a bulk-loading interface for parameterized queries.
Effectively, it is equivalent to:
>>> q = db.prepare(sql)
>>> for i in iterable:
... q(*i)
Its purpose is to allow the implementation to take advantage of the
knowledge that a series of parameters are to be loaded so that the
operation can be optimized.
"""
@abc.abstractmethod
def load_chunks(self,
iterable : "A iterable of chunks of tuples to execute the statement with"
):
"""
Given an iterable, `iterable`, feed the produced parameters of the chunks
produced by the iterable to the query. This is a bulk-loading interface
for parameterized queries.
Effectively, it is equivalent to:
>>> ps = db.prepare(...)
>>> for c in iterable:
... for i in c:
... q(*i)
Its purpose is to allow the implementation to take advantage of the
knowledge that a series of chunks of parameters are to be loaded so
that the operation can be optimized.
"""
class Statement(
Element,
collections.Callable,
collections.Iterable,
):
"""
Instances of `Statement` are returned by the `prepare` method of
`Database` instances.
A Statement is an Iterable as well as Callable.
The Iterable interface is supported for queries that take no arguments at
all. It allows the syntax::
>>> for x in db.prepare('select * FROM table'):
... pass
"""
_e_label = 'STATEMENT'
_e_factors = ('database', 'statement_id', 'string',)
@property
@abc.abstractmethod
def statement_id(self) -> str:
"""
The statment's identifier.
"""
@property
@abc.abstractmethod
def string(self) -> object:
"""
The SQL string of the prepared statement.
`None` if not available. This can happen in cases where a statement is
prepared on the server and a reference to the statement is sent to the
client which subsequently uses the statement via the `Database`'s
`statement` constructor.
"""
@property
@abc.abstractmethod
def sql_parameter_types(self) -> [str]:
"""
The type of the parameters required by the statement.
A sequence of `str` objects stating the SQL type name::
['INTEGER', 'VARCHAR', 'INTERVAL']
"""
@property
@abc.abstractmethod
def sql_column_types(self) -> [str]:
"""
The type of the columns produced by the statement.
A sequence of `str` objects stating the SQL type name::
['INTEGER', 'VARCHAR', 'INTERVAL']
"""
@property
@abc.abstractmethod
def pg_parameter_types(self) -> [int]:
"""
The type Oids of the parameters required by the statement.
A sequence of `int` objects stating the PostgreSQL type Oid::
[27, 28]
"""
@property
@abc.abstractmethod
def pg_column_types(self) -> [int]:
"""
The type Oids of the columns produced by the statement.
A sequence of `int` objects stating the SQL type name::
[27, 28]
"""
@property
@abc.abstractmethod
def column_names(self) -> [str]:
"""
The attribute names of the columns produced by the statement.
A sequence of `str` objects stating the column name::
['column1', 'column2', 'emp_name']
"""
@property
@abc.abstractmethod
def column_types(self) -> [type]:
"""
The Python types of the columns produced by the statement.
A sequence of type objects::
[<class 'int'>, <class 'str'>]
"""
@property
@abc.abstractmethod
def parameter_types(self) -> [type]:
"""
The Python types expected of parameters given to the statement.
A sequence of type objects::
[<class 'int'>, <class 'str'>]
"""
@abc.abstractmethod
def clone(self) -> "Statement":
"""
Create a new statement object using the same factors as `self`.
When used for refreshing plans, the new clone should replace references to
the original.
"""
@abc.abstractmethod
def close(self) -> None:
"""
Close the prepared statement releasing resources associated with it.
"""
Execution.register(Statement)
PreparedStatement = Statement
class StoredProcedure(
Element,
collections.Callable,
):
"""
A function stored on the database.
"""
_e_label = 'FUNCTION'
_e_factors = ('database',)
@abc.abstractmethod
def __call__(self, *args, **kw) -> (object, Cursor, collections.Iterable):
"""
Execute the procedure with the given arguments. If keyword arguments are
passed they must be mapped to the argument whose name matches the key.
If any positional arguments are given, they must fill in gaps created by
the stated keyword arguments. If too few or too many arguments are
given, a TypeError must be raised. If a keyword argument is passed where
the procedure does not have a corresponding argument name, then,
likewise, a TypeError must be raised.
In the case where the `StoredProcedure` references a set returning
function(SRF), the result *must* be an iterable. SRFs that return single
columns *must* return an iterable of that column; not row data. If the
SRF returns a composite(OUT parameters), it *should* return a `Cursor`.
"""
##
# Arguably, it would be wiser to isolate blocks, and savepoints, but the utility
# of the separation is not significant. It's really
# more interesting as a formality that the user may explicitly state the
# type of the transaction. However, this capability is not completely absent
# from the current interface as the configuration parameters, or lack thereof,
# help imply the expectations.
class Transaction(Element):
"""
A `Tranaction` is an element that represents a transaction in the session.
Once created, it's ready to be started, and subsequently committed or
rolled back.
Read-only transaction:
>>> with db.xact(mode = 'read only'):
... ...
Read committed isolation:
>>> with db.xact(isolation = 'READ COMMITTED'):
... ...
Savepoints are created if inside a transaction block:
>>> with db.xact():
... with db.xact():
... ...
"""
_e_label = 'XACT'
_e_factors = ('database',)
@property
@abc.abstractmethod
def mode(self) -> (None, str):
"""
The mode of the transaction block:
START TRANSACTION [ISOLATION] <mode>;
The `mode` property is a string and will be directly interpolated into the
START TRANSACTION statement.
"""
@property
@abc.abstractmethod
def isolation(self) -> (None, str):
"""
The isolation level of the transaction block:
START TRANSACTION <isolation> [MODE];
The `isolation` property is a string and will be directly interpolated into
the START TRANSACTION statement.
"""
@abc.abstractmethod
def start(self) -> None:
"""
Start the transaction.
If the database is in a transaction block, the transaction should be
configured as a savepoint. If any transaction block configuration was
applied to the transaction, raise a `postgresql.exceptions.OperationError`.
If the database is not in a transaction block, start one using the
configuration where:
`self.isolation` specifies the ``ISOLATION LEVEL``. Normally, ``READ
COMMITTED``, ``SERIALIZABLE``, or ``READ UNCOMMITTED``.
`self.mode` specifies the mode of the transaction. Normally, ``READ
ONLY`` or ``READ WRITE``.
If the transaction is already open, do nothing.
If the transaction has been committed or aborted, raise an
`postgresql.exceptions.OperationError`.
"""
begin = start
@abc.abstractmethod
def commit(self) -> None:
"""
Commit the transaction.
If the transaction is a block, issue a COMMIT statement.
If the transaction was started inside a transaction block, it should be
identified as a savepoint, and the savepoint should be released.
If the transaction has already been committed, do nothing.
"""
@abc.abstractmethod
def rollback(self) -> None:
"""
Abort the transaction.
If the transaction is a savepoint, ROLLBACK TO the savepoint identifier.
If the transaction is a transaction block, issue an ABORT.
If the transaction has already been aborted, do nothing.
"""
abort = rollback
@abc.abstractmethod
def __enter__(self):
"""
Run the `start` method and return self.
"""
@abc.abstractmethod
def __exit__(self, typ, obj, tb):
"""
If an exception is indicated by the parameters, run the transaction's
`rollback` method iff the database is still available(not closed), and
return a `False` value.
If an exception is not indicated, but the database's transaction state is
in error, run the transaction's `rollback` method and raise a
`postgresql.exceptions.InFailedTransactionError`. If the database is
unavailable, the `rollback` method should cause a
`postgresql.exceptions.ConnectionDoesNotExistError` exception to occur.
Otherwise, run the transaction's `commit` method.
When the `commit` is ultimately unsuccessful or not ran at all, the purpose
of __exit__ is to resolve the error state of the database iff the
database is available(not closed) so that more commands can be after the
block's exit.
"""
class Settings(
Element,
collections.MutableMapping
):
"""
A mapping interface to the session's settings. This provides a direct
interface to ``SHOW`` or ``SET`` commands. Identifiers and values need
not be quoted specially as the implementation must do that work for the
user.
"""
_e_label = 'SETTINGS'
@abc.abstractmethod
def __getitem__(self, key):
"""
Return the setting corresponding to the given key. The result should be
consistent with what the ``SHOW`` command returns. If the key does not
exist, raise a KeyError.
"""
@abc.abstractmethod
def __setitem__(self, key, value):
"""
Set the setting with the given key to the given value. The action should
be consistent with the effect of the ``SET`` command.
"""
@abc.abstractmethod
def __call__(self, **kw):
"""
Create a context manager applying the given settings on __enter__ and
restoring the old values on __exit__.
>>> with db.settings(search_path = 'local,public'):
... ...
"""
@abc.abstractmethod
def get(self, key, default = None):
"""
Get the setting with the corresponding key. If the setting does not
exist, return the `default`.
"""
@abc.abstractmethod
def getset(self, keys):
"""
Return a dictionary containing the key-value pairs of the requested
settings. If *any* of the keys do not exist, a `KeyError` must be raised
with the set of keys that did not exist.
"""
@abc.abstractmethod
def update(self, mapping):
"""
For each key-value pair, incur the effect of the `__setitem__` method.
"""
@abc.abstractmethod
def keys(self):
"""
Return an iterator to all of the settings' keys.
"""
@abc.abstractmethod
def values(self):
"""
Return an iterator to all of the settings' values.
"""
@abc.abstractmethod
def items(self):
"""
Return an iterator to all of the setting value pairs.
"""
class Database(Element):
"""
The interface to an individual database. `Connection` objects inherit from
this
"""
_e_label = 'DATABASE'
@property
@abc.abstractmethod
def backend_id(self) -> (int, None):
"""
The backend's process identifier.
"""
@property
@abc.abstractmethod
def version_info(self) -> tuple:
"""
A version tuple of the database software similar Python's `sys.version_info`.
>>> db.version_info
(8, 1, 3, '', 0)
"""
@property
@abc.abstractmethod
def client_address(self) -> (str, None):
"""
The client address that the server sees. This is obtainable by querying
the ``pg_catalog.pg_stat_activity`` relation.
`None` if unavailable.
"""
@property
@abc.abstractmethod
def client_port(self) -> (int, None):
"""
The client port that the server sees. This is obtainable by querying
the ``pg_catalog.pg_stat_activity`` relation.
`None` if unavailable.
"""
@property
@abc.abstractmethod
def xact(self,
isolation : "ISOLATION LEVEL to use with the transaction" = None,
mode : "Mode of the transaction, READ ONLY or READ WRITE" = None,
) -> Transaction:
"""
Create a `Transaction` object using the given keyword arguments as its
configuration.
"""
@property
@abc.abstractmethod
def settings(self) -> Settings:
"""
A `Settings` instance bound to the `Database`.
"""
@abc.abstractmethod
def do(language, source) -> None:
"""
Execute a DO statement using the given language and source.
Always returns `None`.
Likely to be a function of Connection.execute.
"""
@abc.abstractmethod
def execute(sql) -> None:
"""
Execute an arbitrary block of SQL. Always returns `None` and raise
an exception on error.
"""
@abc.abstractmethod
def prepare(self, sql : str) -> Statement:
"""
Create a new `Statement` instance bound to the connection
using the given SQL.
>>> s = db.prepare("SELECT 1")
>>> c = s()
>>> c.next()
(1,)
"""
@abc.abstractmethod
def statement_from_id(self,
statement_id : "The statement's identification string.",
) -> Statement:
"""
Create a `Statement` object that was already prepared on the
server. The distinction between this and a regular query is that it
must be explicitly closed if it is no longer desired, and it is
instantiated using the statement identifier as opposed to the SQL
statement itself.
"""
@abc.abstractmethod
def cursor_from_id(self,
cursor_id : "The cursor's identification string."
) -> Cursor:
"""
Create a `Cursor` object from the given `cursor_id` that was already
declared on the server.
`Cursor` objects created this way must *not* be closed when the object
is garbage collected. Rather, the user must explicitly close it for
the server resources to be released. This is in contrast to `Cursor`
objects that are created by invoking a `Statement` or a SRF
`StoredProcedure`.
"""
@abc.abstractmethod
def proc(self,
procedure_id : \
"The procedure identifier; a valid ``regprocedure`` or Oid."
) -> StoredProcedure:
"""
Create a `StoredProcedure` instance using the given identifier.
The `proc_id` given can be either an ``Oid``, or a ``regprocedure``
that identifies the stored procedure to create the interface for.
>>> p = db.proc('version()')
>>> p()
'PostgreSQL 8.3.0'
>>> qstr = "select oid from pg_proc where proname = 'generate_series'"
>>> db.prepare(qstr).first()
1069
>>> generate_series = db.proc(1069)
>>> list(generate_series(1,5))
[1, 2, 3, 4, 5]
"""
@abc.abstractmethod
def reset(self) -> None:
"""
Reset the connection into it's original state.
Issues a ``RESET ALL`` to the database. If the database supports
removing temporary tables created in the session, then remove them.
Reapply initial configuration settings such as path.
The purpose behind this method is to provide a soft-reconnect method
that re-initializes the connection into its original state. One
obvious use of this would be in a connection pool where the connection
is being recycled.
"""
@abc.abstractmethod
def notify(self, *channels, **channel_and_payload) -> int:
"""
NOTIFY the channels with the given payload.
Equivalent to issuing "NOTIFY <channel>" or "NOTIFY <channel>, <payload>"
for each item in `channels` and `channel_and_payload`. All NOTIFYs issued
*must* occur in the same transaction.
The items in `channels` can either be a string or a tuple. If a string,
no payload is given, but if an item is a `builtins.tuple`, the second item
will be given as the payload. `channels` offers a means to issue NOTIFYs
in guaranteed order.
The items in `channel_and_payload` are all payloaded NOTIFYs where the
keys are the channels and the values are the payloads. Order is undefined.
"""
@abc.abstractmethod
def listen(self, *channels) -> None:
"""
Start listening to the given channels.
Equivalent to issuing "LISTEN <x>" for x in channels.
"""
@abc.abstractmethod
def unlisten(self, *channels) -> None:
"""
Stop listening to the given channels.
Equivalent to issuing "UNLISTEN <x>" for x in channels.
"""
@abc.abstractmethod
def listening_channels(self) -> ["channel name", ...]:
"""
Return an *iterator* to all the channels currently being listened to.
"""
@abc.abstractmethod
def iternotifies(self, timeout = None) -> collections.Iterator:
"""
Return an iterator to the notifications received by the connection. The
iterator *must* produce triples in the form ``(channel, payload, pid)``.
If timeout is not `None`, `None` *must* be emitted at the specified
timeout interval. If the timeout is zero, all the pending notifications
*must* be yielded by the iterator and then `StopIteration` *must* be
raised.
If the connection is closed for any reason, the iterator *must* silently
stop by raising `StopIteration`. Further error control is then the
responsibility of the user.
"""
class TypeIO(Element):
_e_label = 'TYPIO'
def _e_metas(self):
return ()
class SocketFactory(object):
@property
@abc.abstractmethod
def fatal_exception(self) -> Exception:
"""
The exception that is raised by sockets that indicate a fatal error.
The exception can be a base exception as the `fatal_error_message` will
indicate if that particular exception is actually fatal.
"""
@property
@abc.abstractmethod
def timeout_exception(self) -> Exception:
"""
The exception raised by the socket when an operation could not be
completed due to a configured time constraint.
"""
@property
@abc.abstractmethod
def tryagain_exception(self) -> Exception:
"""
The exception raised by the socket when an operation was interrupted, but
should be tried again.
"""
@property
@abc.abstractmethod
def tryagain(self, err : Exception) -> bool:
"""
Whether or not `err` suggests the operation should be tried again.
"""
@abc.abstractmethod
def fatal_exception_message(self, err : Exception) -> (str, None):
"""
A function returning a string describing the failure, this string will be
given to the `postgresql.exceptions.ConnectionFailure` instance that will
subsequently be raised by the `Connection` object.
Returns `None` when `err` is not actually fatal.
"""
@abc.abstractmethod
def socket_secure(self, socket : "socket object") -> "secured socket":
"""
Return a reference to the secured socket using the given parameters.
If securing the socket for the connector is impossible, the user should
never be able to instantiate the connector with parameters requesting
security.
"""
@abc.abstractmethod
def socket_factory_sequence(self) -> [collections.Callable]:
"""
Return a sequence of `SocketCreator`s that `Connection` objects will use to
create the socket object.
"""
class Category(Element):
"""
A category is an object that initializes the subject connection for a
specific purpose.
Arguably, a runtime class for use with connections.
"""
_e_label = 'CATEGORY'
_e_factors = ()
@abc.abstractmethod
def __call__(self, connection):
"""
Initialize the given connection in order to conform to the category.
"""
class Connector(Element):
"""
A connector is an object providing the necessary information to establish a
connection. This includes credentials, database settings, and many times
addressing information.
"""
_e_label = 'CONNECTOR'
_e_factors = ('driver', 'category')
def __call__(self, *args, **kw):
"""
Create and connect. Arguments will be given to the `Connection` instance's
`connect` method.
"""
return self.driver.connection(self, *args, **kw)
def __init__(self,
user : "required keyword specifying the user name(str)" = None,
password : str = None,
database : str = None,
settings : (dict, [(str,str)]) = None,
category : Category = None,
):
if user is None:
# sure, it's a "required" keyword, makes for better documentation
raise TypeError("'user' is a required keyword")
self.user = user
self.password = password
self.database = database
self.settings = settings
self.category = category
if category is not None and not isinstance(category, Category):
raise TypeError("'category' must a be `None` or `postgresql.api.Category`")
class Connection(Database):
"""
The interface to a connection to a PostgreSQL database. This is a
`Database` interface with the additional connection management tools that
are particular to using a remote database.
"""
_e_label = 'CONNECTION'
_e_factors = ('connector',)
@property
@abc.abstractmethod
def connector(self) -> Connector:
"""
The :py:class:`Connector` instance facilitating the `Connection` object's
communication and initialization.
"""
@property
@abc.abstractmethod
def query(self) -> Execution:
"""
The :py:class:`Execution` instance providing a one-shot query interface::
connection.query.<method>(sql, *parameters) == connection.prepare(sql).<method>(*parameters)
"""
@property
@abc.abstractmethod
def closed(self) -> bool:
"""
`True` if the `Connection` is closed, `False` if the `Connection` is
open.
>>> db.closed
True
"""
@abc.abstractmethod
def clone(self) -> "Connection":
"""
Create another connection using the same factors as `self`. The returned
object should be open and ready for use.
"""
@abc.abstractmethod
def connect(self) -> None:
"""
Establish the connection to the server and initialize the category.
Does nothing if the connection is already established.
"""
cat = self.connector.category
if cat is not None:
cat(self)
@abc.abstractmethod
def close(self) -> None:
"""
Close the connection.
Does nothing if the connection is already closed.
"""
@abc.abstractmethod
def __enter__(self):
"""
Establish the connection and return self.
"""
@abc.abstractmethod
def __exit__(self, typ, obj, tb):
"""
Closes the connection and returns `False` when an exception is passed in,
`True` when `None`.
"""
class Driver(Element):
"""
The `Driver` element provides the `Connector` and other information
pertaining to the implementation of the driver. Information about what the
driver supports is available in instances.
"""
_e_label = "DRIVER"
_e_factors = ()
@abc.abstractmethod
def connect(**kw):
"""
Create a connection using the given parameters for the Connector.
"""
class Installation(Element):
"""
Interface to a PostgreSQL installation. Instances would provide various
information about an installation of PostgreSQL accessible by the Python
"""
_e_label = "INSTALLATION"
_e_factors = ()
@property
@abc.abstractmethod
def version(self):
"""
A version string consistent with what `SELECT version()` would output.
"""
@property
@abc.abstractmethod
def version_info(self):
"""
A tuple specifying the version in a form similar to Python's
sys.version_info. (8, 3, 3, 'final', 0)
See `postgresql.versionstring`.
"""
@property
@abc.abstractmethod
def type(self):
"""
The "type" of PostgreSQL. Normally, the first component of the string
returned by pg_config.
"""
@property
@abc.abstractmethod
def ssl(self) -> bool:
"""
Whether the installation supports SSL.
"""
class Cluster(Element):
"""
Interface to a PostgreSQL cluster--a data directory. An implementation of
this provides a means to control a server.
"""
_e_label = 'CLUSTER'
_e_factors = ('installation', 'data_directory')
@property
@abc.abstractmethod
def installation(self) -> Installation:
"""
The installation used by the cluster.
"""
@property
@abc.abstractmethod
def data_directory(self) -> str:
"""
The path to the data directory of the cluster.
"""
@abc.abstractmethod
def init(self,
initdb : "path to the initdb to use" = None,
user : "name of the cluster's superuser" = None,
password : "superuser's password" = None,
encoding : "the encoding to use for the cluster" = None,
locale : "the locale to use for the cluster" = None,
collate : "the collation to use for the cluster" = None,
ctype : "the ctype to use for the cluster" = None,
monetary : "the monetary to use for the cluster" = None,
numeric : "the numeric to use for the cluster" = None,
time : "the time to use for the cluster" = None,
text_search_config : "default text search configuration" = None,
xlogdir : "location for the transaction log directory" = None,
):
"""
Create the cluster at the `data_directory` associated with the Cluster
instance.
"""
@abc.abstractmethod
def drop(self):
"""
Kill the server and completely remove the data directory.
"""
@abc.abstractmethod
def start(self):
"""
Start the cluster.
"""
@abc.abstractmethod
def stop(self):
"""
Signal the server to shutdown.
"""
@abc.abstractmethod
def kill(self):
"""
Kill the server.
"""
@abc.abstractmethod
def restart(self):
"""
Restart the cluster.
"""
@abc.abstractmethod
def wait_until_started(self,
timeout : "maximum time to wait" = 10
):
"""
After the start() method is ran, the database may not be ready for use.
This method provides a mechanism to block until the cluster is ready for
use.
If the `timeout` is reached, the method *must* throw a
`postgresql.exceptions.ClusterTimeoutError`.
"""
@abc.abstractmethod
def wait_until_stopped(self,
timeout : "maximum time to wait" = 10
):
"""
After the stop() method is ran, the database may still be running.
This method provides a mechanism to block until the cluster is completely
shutdown.
If the `timeout` is reached, the method *must* throw a
`postgresql.exceptions.ClusterTimeoutError`.
"""
@property
@abc.abstractmethod
def settings(self):
"""
A `Settings` interface to the ``postgresql.conf`` file associated with the
cluster.
"""
@abc.abstractmethod
def __enter__(self):
"""
Start the cluster if it's not already running, and wait for it to be
readied.
"""
@abc.abstractmethod
def __exit__(self, exc, val, tb):
"""
Stop the cluster and wait for it to shutdown *iff* it was started by the
corresponding enter.
"""
__docformat__ = 'reStructuredText'
if __name__ == '__main__':
help(__package__ + '.api')
##
# vim: ts=3:sw=3:noet:
| zappyk-github/zappyk-python | lib/lib_external/postgresql/api.py | Python | gpl-2.0 | 36,526 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-checkmempool"]]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| cryptoprojects/ultimateonlinecash | test/functional/mempool_spendcoinbase.py | Python | mit | 2,228 |
from xbmcswift2 import Plugin, xbmcgui
from resources.lib.api import SiteApi
plugin = Plugin()
api = SiteApi()
@plugin.cached_route('/')
def index():
'''
Get movie categories
'''
plugin.log.debug('Get category menu')
c = api.get_menu_category()
items = [{
'label': item['label'],
'path': plugin.url_for(
'browse_category', menuid=item.get('pk', '0'),
page=1, url=item['url'])
} for item in c]
return items
@plugin.cached_route('/<menuid>/page/<page>')
def browse_category(menuid, page='1'):
'''
Get list of movies from category
'''
plugin.log.debug('Get movies menu')
url = plugin.request.args['url'][0]
movies = api.get_menu_movies(url)
items = [{
'label': item['label'],
'thumbnail': item['thumb'],
'icon': item['thumb'],
'info': {
'plot': item['info']
},
'path': plugin.url_for(
'browse_movie', menuid=menuid, page=page,
movieid=item.get('pk', '0'), url=item['url'])
} for item in movies]
# build next link
next_link = api.get_next_link(url)
if next_link:
items.append({
'label': next_link['label'],
'path': plugin.url_for(
'browse_category', menuid=item.get('pk', '0'),
page=next_link['pk'], url=next_link['url'])
})
return items
@plugin.route('/<menuid>/page/<page>/movie/<movieid>/')
def browse_movie(menuid, page, movieid):
'''
Get links for movie
'''
plugin.log.debug('Get movie links')
page_url = plugin.request.args['url'][0]
links = api.get_movie_links(page_url)
items = [{
'label': item['label'],
'is_playable': item['is_playable'],
'path': plugin.url_for(
'resolve_movie', menuid=menuid, page=page,
movieid=movieid, linkid=item.get('pk', '0'),
url=item['url'])
} for item in links]
return items
@plugin.route('/<menuid>/page/<page>/movie/<movieid>/<linkid>')
def resolve_movie(menuid, page, movieid, linkid):
'''
Play movie
'''
page_url = plugin.request.args['url'][0]
url = api.resolve_redirect(page_url)
print 'resolve video: {url}'.format(url=url)
plugin.log.debug('resolve video: {url}'.format(url=url))
if url:
media = __resolve_item(url, movieid)
print 'resolved to: {url}'.format(url=media)
if media:
plugin.set_resolved_url(media)
else:
msg = ['cannot play video stream']
plugin.log.error(msg[0])
dialog = xbmcgui.Dialog()
dialog.ok(api.LONG_NAME, *msg)
else:
msg = ['video url not found']
plugin.log.error(msg[0])
dialog = xbmcgui.Dialog()
dialog.ok(api.LONG_NAME, *msg)
def __resolve_item(url, title):
import urlresolver
media = urlresolver.HostedMediaFile(
url=url, title=title)
return media.resolve()
###############################################
if __name__ == '__main__':
try:
plugin.run()
except Exception, e:
print e
plugin.log.error(e)
plugin.notify(msg=e)
| dknlght/dkodi | src/download/plugin.video.india4movie/addon.py | Python | gpl-2.0 | 3,212 |
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import yaml
import logging
from vlan import VLAN
from port import Port
class DP:
"""Object to hold the configuration for a faucet controlled datapath."""
dp_id = None
vlans = None
ports = None
running = False
def __init__(self, dp_id, logname):
self.dp_id = dp_id
self.vlans = {}
self.ports = {}
self.logger = logging.getLogger(logname)
self.set_defaults()
@classmethod
def parser(cls, config_file, logname=__name__):
logger = logging.getLogger(logname)
try:
with open(config_file, 'r') as stream:
conf = yaml.load(stream)
except yaml.YAMLError as ex:
mark = ex.problem_mark
errormsg = "Error in file: {0} at ({1}, {2})".format(
config_file,
mark.line + 1,
mark.column + 1)
logger.error(errormsg)
return None
if 'dp_id' not in conf:
errormsg = "dp_id not configured in file: {0}".format(config_file)
logger.error(errormsg)
return None
dp = DP(conf['dp_id'], logname)
interfaces = conf.pop('interfaces', {})
vlans = conf.pop('vlans', {})
dp.__dict__.update(conf)
dp.set_defaults()
for k, v in vlans.iteritems():
dp.add_vlan(k, v)
for k, v in interfaces.iteritems():
dp.add_port(k, v)
return dp
def sanity_check(self):
assert 'dp_id' in self.__dict__
assert isinstance(self.dp_id, int)
assert self.hardware in ('Open vSwitch', 'Allied-Telesis')
for vid, vlan in self.vlans.iteritems():
assert isinstance(vid, int)
assert isinstance(vlan, VLAN)
assert all(isinstance(p, Port) for p in vlan.get_ports())
for portnum, port in self.ports.iteritems():
assert isinstance(portnum, int)
assert isinstance(port, Port)
assert isinstance(self.monitor_ports, bool)
assert isinstance(self.monitor_ports_file, basestring)
assert isinstance(self.monitor_ports_interval, int)
assert isinstance(self.monitor_flow_table, bool)
assert isinstance(self.monitor_flow_table_file, basestring)
assert isinstance(self.monitor_flow_table_interval, int)
def set_defaults(self):
# Offset for tables used by faucet
self.__dict__.setdefault('table_offset', 0)
# The table for internally associating vlans
self.__dict__.setdefault('vlan_table', self.table_offset)
# The table for checking eth src addresses are known
self.__dict__.setdefault('eth_src_table', self.table_offset + 1)
# The table for matching eth dst and applying unicast actions
self.__dict__.setdefault('eth_dst_table', self.table_offset + 2)
# The table for applying broadcast actions
self.__dict__.setdefault('flood_table', self.table_offset + 3)
# How much to offset default priority by
self.__dict__.setdefault('priority_offset', 0)
# Some priority values
self.__dict__.setdefault('lowest_priority', self.priority_offset)
self.__dict__.setdefault('low_priority', self.priority_offset + 9000)
self.__dict__.setdefault('high_priority', self.low_priority + 1)
self.__dict__.setdefault('highest_priority', self.high_priority + 98)
# Identification cookie value to allow for multiple controllers to
# control the same datapath
self.__dict__.setdefault('cookie', 1524372928)
# inactive MAC timeout
self.__dict__.setdefault('timeout', 300)
# enable port stats monitoring?
self.__dict__.setdefault('monitor_ports', False)
# File for port stats logging
self.__dict__.setdefault('monitor_ports_file', 'logfile.log')
# Stats reporting interval (in seconds)
self.__dict__.setdefault('monitor_ports_interval', 30)
# Enable flow table monitoring?
self.__dict__.setdefault('monitor_flow_table', False)
# File for flow table logging
self.__dict__.setdefault('monitor_flow_table_file', 'logfile.log')
# Stats reporting interval
self.__dict__.setdefault('monitor_flow_table_interval', 30)
# Name for this dp, used for stats reporting
self.__dict__.setdefault('name', str(self.dp_id))
# description, strictly informational
self.__dict__.setdefault('description', self.name)
# The hardware maker (for chosing an openflow driver)
self.__dict__.setdefault('hardware', 'Open_vSwitch')
def add_port(self, port_num, port_conf=None):
# add port specific vlans or fall back to defaults
port_conf = copy.copy(port_conf) if port_conf else {}
port = self.ports.setdefault(port_num, Port(port_num, port_conf))
# add native vlan
port_conf.setdefault('native_vlan', None)
if port_conf['native_vlan'] is not None:
vid = port_conf['native_vlan']
if vid not in self.vlans:
self.vlans[vid] = VLAN(vid)
self.vlans[vid].untagged.append(self.ports[port_num])
port_conf.setdefault('tagged_vlans', [])
# add vlans & acls configured on a port
for vid in port_conf['tagged_vlans']:
if vid not in self.vlans:
self.vlans[vid] = VLAN(vid)
self.vlans[vid].tagged.append(port)
def add_vlan(self, vid, vlan_conf=None):
vlan_conf = copy.copy(vlan_conf) if vlan_conf else {}
self.vlans.setdefault(vid, VLAN(vid, vlan_conf))
def get_native_vlan(self, port_num):
if port_num not in self.ports:
return None
port = self.ports[port_num]
for vlan in self.vlans.values():
if port in vlan.untagged:
return vlan
return None
def __str__(self):
return self.name
| buraglio-esnet/faucet | dp.py | Python | apache-2.0 | 6,669 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-29 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.CharField(blank=True, max_length=30),
),
]
| sSwiergosz/FinancialOrganiser | organizer_project/organizer/migrations/0002_auto_20170330_0041.py | Python | apache-2.0 | 781 |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EC2api API Metadata Server
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from ec2api import config
from ec2api import service
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "ec2api")
server = service.WSGIService('metadata')
service.serve(server, workers=server.workers)
service.wait()
if __name__ == '__main__':
main()
| stackforge/ec2-api | ec2api/cmd/api_metadata.py | Python | apache-2.0 | 1,007 |
from setuptools import setup, find_packages
setup(
name='keymaker',
version='0.1',
description='A management tool for SSL certificates and keys',
url='https://github.com/fooker/keymaker/',
author='Dustin Frisch',
author_email='fooker@lab.sh',
license='GPLv3',
packages=find_packages(),
install_requires=[
'pyOpenSSL >= 0.14',
'requests >= 2.0',
'beautifulsoup4 >= 4.0'
],
entry_points={
'console_scripts': [
'keymaker = keymaker.__main__:main'
],
'keymaker.authorities': [
'cacert = keymaker.authorities.cacert:CACert'
]
}
)
| fooker/keymaker | setup.py | Python | gpl-3.0 | 662 |
import os
import gtk
from EPLaunchLite.EPLaunchLiteWindow import Window
from EPLaunchLite.Settings import load_settings, save_settings
# once done doing any preliminary processing, actually run the application
this_settings_file_name = os.path.join(os.path.expanduser("~"), ".eplaunchlite.json")
# we will keep the form in a loop to handle requested restarts (language change, etc.)
running = True
while running:
this_settings = load_settings(this_settings_file_name)
main_window = Window(this_settings)
gtk.main()
save_settings(main_window.settings, this_settings_file_name)
running = main_window.doing_restart
main_window.destroy()
| Myoldmopar/EPLaunchLight | EP-Launch-Lite.py | Python | bsd-3-clause | 662 |
import itertools
class BST:
def __init__(self):
self.root = None
self.set_mode(BST.NODUP)
def push(self, value):
node = BST.__Node(value)
if self.root is None:
self.root = node
else:
self.__insert(self,self.root,node)
def set_mode(self,mode):
if isinstance(mode,BST.__Mode):
self.__insert = mode.run
@staticmethod
def binary_insert_wdup(tree, root, node):
if root.data > node.data:
if root.left is None:
root.left = node
else:
tree.binary_insert_wdup(tree, root.left, node)
else:
if root.right is None:
root.right = node
else:
tree.binary_insert_wdup(tree, root.right, node)
@staticmethod
def binary_insert_ndup(tree, root, node):
while True:
if root.data > node.data:
if root.left is None:
root.left = node
break
else:
root = root.left
elif root.data < node.data:
if root.right is None:
root.right = node
break
else:
root = root.right
else:
break
def print(self):
print(self.toList())
def toList(self):
out = list()
temp = list()
c = self.root
if c is None:
return out
while True:
if c.left is not None:
temp.append(c)
c = c.left
continue
elif c.right is not None:
out.append(c.data)
c = c.right
continue
elif len(temp) is 0:
out.append(c.data)
break
else:
out.append(c.data)
c = temp.pop()
out.append(c.data)
while c.right is None and len(temp) is not 0:
c = temp.pop()
out.append(c.data)
if c.right is None:
break
c = c.right
continue
return out
@staticmethod
def search(tree, root, key):
while True:
if root.data > node.data:
if root.left is None:
root.left = node
break
else:
root = root.left
elif root.data < node.data:
if root.right is None:
root.right = node
break
else:
root = root.right
else:
break
class __Node:
def __init__(self, val):
self.left = None
self.right = None
self.data = val
class __Mode:
def __init__(self, ru):
self.run = ru
WDUP = __Mode(binary_insert_wdup.__get__(object))
NODUP = __Mode(binary_insert_ndup.__get__(object))
class Algorithm:
@staticmethod
def binary_search(array, target):
lower = 0
upper = len(array)
while lower < upper: # use < instead of <=
x = lower + (upper - lower) // 2
val = array[x]
if target == val:
return x
elif target > val:
if lower == x: # this two are the actual lines
break # you're looking for
lower = x
elif target < val:
upper = x
if __name__ is "__main__":
for n in itertools.permutations(range(0,11)):
tree = BST()
for j in n:
tree.push(j)
print(j) | Ercadio/generativeEssays | Containers.py | Python | unlicense | 3,788 |
#importing the data from the correct file
from active_site_kop import*
from confluence_cut_from_develop_and_release import *
from confluence_merge_queue import *
from jenkins_built_count import*
from jenkins_building_count import*
from jenkins_failed_count import*
from jenkins_failed_other_priority_list import *
from jira_live_incidents import*
from jira_iteration import*
# Samplers and refresh rates are defined here
def run(app, xyzzy):
samplers = [
ActiveSite(xyzzy, 20),
ConfluenceCutFromDevelopAndRelease(xyzzy, 60),
ConfluenceMergeQueue(xyzzy, 6),
JenkinsBuiltCount(xyzzy, 30),
JenkinsBuildingCount(xyzzy, 30),
JenkinsFailedCount(xyzzy, 30),
JenkinsFailedPriorityList(xyzzy, 30),
JiraLiveIncidents(xyzzy, 30),
JiraIteration(xyzzy, 30),
]
try:
app.run(host='0.0.0.0',
debug=True,
port=5000,
threaded=True,
use_reloader=False,
use_debugger=True
)
finally:
print "Disconnecting clients"
xyzzy.stopped = True
print "Stopping %d timers" % len(samplers)
for (i, sampler) in enumerate(samplers):
sampler.stop()
print "Done"
| edhiley/pydashie | pydashie/example_app.py | Python | mit | 1,239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" The flask application """
import argparse
import sys
import os
parser = argparse.ArgumentParser(
description='Run the anitya app')
parser.add_argument(
'--config', '-c', dest='config',
help='Configuration file to use for anitya.')
parser.add_argument(
'--debug', dest='debug', action='store_true',
default=False,
help='Expand the level of data returned.')
parser.add_argument(
'--profile', dest='profile', action='store_true',
default=False,
help='Profile the anitya application.')
parser.add_argument(
'--port', '-p', default=5000,
help='Port for the flask application.')
parser.add_argument(
'--host', default='127.0.0.1',
help='IP address for the flask application to bind to.'
)
args = parser.parse_args()
if args.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
APP.config['PROFILE'] = True
APP.wsgi_app = ProfilerMiddleware(APP.wsgi_app, restrictions=[30])
if args.config:
config = args.config
if not config.startswith('/'):
here = os.path.join(os.path.dirname(os.path.abspath(__file__)))
config = os.path.join(here, config)
os.environ['ANITYA_WEB_CONFIG'] = config
from anitya.app import APP
APP.debug = True
APP.run(port=int(args.port), host=args.host)
| pombredanne/anitya | runserver.py | Python | gpl-2.0 | 1,330 |
import os
import shutil
import yaml
from time import ctime
import cv2
print ctime()
datasetPath = '/home/huligang/data/myVoc/'
dataset_new = 'dataset/'
if os.path.exists(dataset_new):
shutil.rmtree(dataset_new)
os.system('mkdir ' + dataset_new)
Annotations = datasetPath + 'Annotations/'
JPEGImages = datasetPath + 'JPEGImages/'
count = 0
for anno_file in os.listdir(Annotations):
if anno_file[-4:] == '.yml':
with open(Annotations + anno_file) as f:
f.readline()
f.readline()
data = yaml.load(f.read())
img_name = data['annotation']['filename']
img = cv2.imread(JPEGImages + img_name)
if type(data['annotation']['object']) == list:
for obj in data['annotation']['object']:
xmin = int(obj['bndbox']['xmin'])
ymin = int(obj['bndbox']['ymin'])
xmax = int(obj['bndbox']['xmax'])
ymax = int(obj['bndbox']['ymax'])
assert(xmin < xmax)
assert(ymin < ymax)
if not os.path.exists(dataset_new + obj['name']):
os.system('mkdir ' + dataset_new + obj['name'])
cv2.imwrite(dataset_new + obj['name'] + '/' + str(count) + '.jpg',
img[ymin:ymax,xmin:xmax])
count = count + 1
elif type(data['annotation']['object']) == dict:
obj = data['annotation']['object']
xmin = int(obj['bndbox']['xmin'])
ymin = int(obj['bndbox']['ymin'])
xmax = int(obj['bndbox']['xmax'])
ymax = int(obj['bndbox']['ymax'])
assert(xmin < xmax)
assert(ymin < ymax)
if not os.path.exists(dataset_new + obj['name']):
os.system('mkdir ' + dataset_new + obj['name'])
cv2.imwrite(dataset_new + obj['name'] + '/' + str(count) + '.jpg',
img[ymin:ymax,xmin:xmax])
count = count + 1
else:
print 'WTF!!!!!!!!!!!!!!!!!!!!!!!'
sys.exit()
print ctime()
print '%d images saved.'%count | huligagn/hello | generate_dataset.py | Python | apache-2.0 | 1,785 |
import os
count = 0
for dirname, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.txt') :
count = count + 1
print('Files:', count)
| mkhuthir/learnPython | Book_pythonlearn_com/28_os/txtcount.py | Python | mit | 182 |
#!/usr/bin/env python
'''
Copyright (C) 2007 Terry Brown, terry_n_brown@yahoo.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex
import sys
import layout_nup_pageframe
class Nup(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
opts = [('', '--unit', 'string', 'unit', 'px', ''),
('', '--rows', 'int', 'rows', '2', ''),
('', '--cols', 'int', 'cols', '2', ''),
('', '--paddingTop', 'string', 'paddingTop', '', ''),
('', '--paddingBottom', 'string', 'paddingBottom', '', ''),
('', '--paddingLeft', 'string', 'paddingLeft', '', ''),
('', '--paddingRight', 'string', 'paddingRight', '', ''),
('', '--marginTop', 'string', 'marginTop', '', ''),
('', '--marginBottom', 'string', 'marginBottom', '', ''),
('', '--marginLeft', 'string', 'marginLeft', '', ''),
('', '--marginRight', 'string', 'marginRight', '', ''),
('', '--pgSizeX', 'string', 'pgSizeX', '', ''),
('', '--pgSizeY', 'string', 'pgSizeY', '', ''),
('', '--sizeX', 'string', 'sizeX', '', ''),
('', '--sizeY', 'string', 'sizeY', '', ''),
('', '--calculateSize', 'inkbool', 'calculateSize', True, ''),
('', '--pgMarginTop', 'string', 'pgMarginTop', '', ''),
('', '--pgMarginBottom', 'string', 'pgMarginBottom', '', ''),
('', '--pgMarginLeft', 'string', 'pgMarginLeft', '', ''),
('', '--pgMarginRight', 'string', 'pgMarginRight', '', ''),
('', '--showHolder', 'inkbool', 'showHolder', True, ''),
('', '--showCrosses', 'inkbool', 'showCrosses', True, ''),
('', '--showInner', 'inkbool', 'showInner', True, ''),
('', '--showOuter', 'inkbool', 'showOuter', False, ''),
('', '--showInnerBox', 'inkbool', 'showInnerBox', False, ''),
('', '--showOuterBox', 'inkbool', 'showOuterBox', False, ''),
('', '--tab', 'string', 'tab', '', ''),
]
for o in opts:
self.OptionParser.add_option(o[0], o[1], action="store", type=o[2],
dest=o[3], default=o[4], help=o[5])
def effect(self):
showList = []
for i in ['showHolder','showCrosses','showInner','showOuter',
'showInnerBox','showOuterBox',]:
if getattr(self.options, i):
showList.append(i.lower().replace('show', ''))
o = self.options
self.pf = layout_nup_pageframe.GenerateNup(
unit=o.unit,
pgSize=(o.pgSizeX,o.pgSizeY),
pgMargin=(o.pgMarginTop,o.pgMarginRight,o.pgMarginBottom,o.pgMarginLeft),
num=(o.rows,o.cols),
calculateSize = o.calculateSize,
size=(o.sizeX,o.sizeY),
margin=(o.marginTop,o.marginRight,o.marginBottom,o.marginLeft),
padding=(o.paddingTop,o.paddingRight,o.paddingBottom,o.paddingLeft),
show=showList,
)
def setAttr(self, node, name, value):
attr = node.ownerDocument.createAttribute(name)
attr.value = value
node.attributes.setNamedItem(attr)
def output(self):
sys.stdout.write(self.pf)
e = Nup()
e.affect()
| piksels-and-lines-orchestra/inkscape | share/extensions/layout_nup.py | Python | gpl-2.0 | 4,056 |
import unittest
from circular_buffer import (
CircularBuffer,
BufferFullException,
BufferEmptyException
)
class CircularBufferTest(unittest.TestCase):
def test_read_empty_buffer(self):
buf = CircularBuffer(1)
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_one_item(self):
buf = CircularBuffer(1)
buf.write('1')
self.assertEqual('1', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_multiple_items(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
self.assertEqual(buf.read(), '1')
self.assertEqual(buf.read(), '2')
with self.assertRaises(BufferEmptyException):
buf.read()
def test_clearing_buffer(self):
buf = CircularBuffer(3)
for c in '123':
buf.write(c)
buf.clear()
with self.assertRaises(BufferEmptyException):
buf.read()
buf.write('1')
buf.write('2')
self.assertEqual(buf.read(), '1')
buf.write('3')
self.assertEqual(buf.read(), '2')
def test_alternate_write_and_read(self):
buf = CircularBuffer(2)
buf.write('1')
self.assertEqual(buf.read(), '1')
buf.write('2')
self.assertEqual(buf.read(), '2')
def test_read_back_oldest_item(self):
buf = CircularBuffer(3)
buf.write('1')
buf.write('2')
buf.read()
buf.write('3')
buf.read()
self.assertEqual(buf.read(), '3')
def test_write_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
with self.assertRaises(BufferFullException):
buf.write('A')
def test_overwrite_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
buf.overwrite('A')
self.assertEqual(buf.read(), '2')
self.assertEqual(buf.read(), 'A')
with self.assertRaises(BufferEmptyException):
buf.read()
def test_overwrite_non_full_buffer(self):
buf = CircularBuffer(2)
buf.overwrite('1')
buf.overwrite('2')
self.assertEqual(buf.read(), '1')
self.assertEqual(buf.read(), '2')
with self.assertRaises(BufferEmptyException):
buf.read()
def test_alternate_read_and_overwrite(self):
buf = CircularBuffer(5)
for c in '123':
buf.write(c)
buf.read()
buf.read()
buf.write('4')
buf.read()
for c in '5678':
buf.write(c)
buf.overwrite('A')
buf.overwrite('B')
self.assertEqual(buf.read(), '6')
self.assertEqual(buf.read(), '7')
self.assertEqual(buf.read(), '8')
self.assertEqual(buf.read(), 'A')
self.assertEqual(buf.read(), 'B')
with self.assertRaises(BufferEmptyException):
buf.read()
if __name__ == '__main__':
unittest.main()
| mweb/python | exercises/circular-buffer/circular_buffer_test.py | Python | mit | 3,083 |
# .caffemodel file path:
MODEL_FILE = '/home/user/Desktop/cifar10/cifar10_quick_iter_5000.caffemodel'
# .prototxt file path:
MODEL_NET = '/home/user/Desktop/cifar10/cifar10_quick.prototxt'
# Saving path:
SAVE_TO = '/home/user/Desktop/cifar10/'
# Set True if you want to get parameters:
GET_PARAMS = True
# Set True if you want to get blobs:
GET_BLOBS = False
import sys
import os
import numpy as np
import msgpack
# Make sure that Caffe is on the python path:
# Caffe installation path:
caffe_root = '/home/user/caffe/'
sys.path.insert(0, caffe_root + 'python')
sys.path.append(os.path.abspath('.'))
import caffe
def get_blobs(net):
net_blobs = {'info': 'net.blobs data'}
for key in net.blobs.iterkeys():
print('Getting blob: ' + key)
net_blobs[key] = net.blobs[key].data
return net_blobs
def get_params(net):
net_params = {'info': 'net.params data'}
for key in net.params.iterkeys():
print('Getting parameters: ' + key)
if type(net.params[key]) is not caffe._caffe.BlobVec:
net_params[key] = net.params[key].data
else:
net_params[key] = [net.params[key][0].data, net.params[key][1].data]
return net_params
# Open a model:
caffe.set_mode_cpu()
net = caffe.Net(MODEL_NET, MODEL_FILE, caffe.TEST)
net.forward()
# Extract the model:
if GET_BLOBS:
blobs = get_blobs(net)
if GET_PARAMS:
params = get_params(net)
save_list = []
# Write blobs:
if GET_BLOBS:
save_list.append('***BLOBS***')
for b in blobs.iterkeys():
print('Saving blob: ' + b)
if type(blobs[b]) is np.ndarray:
save_list.append(b)
buf = msgpack.packb(blobs[b].tolist(), use_single_float = True)
with open(SAVE_TO + 'model_blob_'+ b + '.msg', 'wb') as f:
f.write(buf)
else:
print('Blob ' + b + ' not saved.')
# Write parameters:
if GET_PARAMS:
save_list.append('***PARAMS***')
for b in params.iterkeys():
print('Saving parameters: ' + b)
if type(params[b]) is np.ndarray:
save_list.append(b)
buf = msgpack.packb(params[b].tolist(), use_single_float = True)
with open(SAVE_TO + 'model_param_'+ b + '.msg', 'wb') as f:
f.write(buf)
elif type(params[b]) is list:
save_list.append(b)
if len(params[b][0].shape) == 4: # for the convolution layers
buf1 = msgpack.packb(params[b][0].tolist(), use_single_float = True)
elif len(params[b][0].shape) == 2: # for the fully-connected layers
buf1 = msgpack.packb(params[b][0].ravel().tolist(), use_single_float = True)
buf2 = msgpack.packb(params[b][1].tolist(), use_single_float = True)
with open(SAVE_TO + 'model_param_'+ b + '.msg', 'wb') as f:
f.write(buf1)
f.write(buf2)
else:
print('Parameters ' + b + ' not saved.')
print('Saved data:')
print(save_list)
| ShMCG/CNNdroid | Parameter Generation Scripts/SavePycaffeModelInMessagePack.py | Python | mit | 2,993 |
'''
ExperimentClient tests.
'''
import os
import unittest
import pandas as pd
import time
from mljar.client.project import ProjectClient
from mljar.client.dataset import DatasetClient
from mljar.client.experiment import ExperimentClient
from .project_based_test import ProjectBasedTest, get_postfix
class ExperimentClientTest(ProjectBasedTest):
def setUp(self):
proj_title = 'Test project-01'+get_postfix()
proj_task = 'bin_class'
self.expt_title = 'Test experiment-01'
self.validation_kfolds = 5
self.validation_shuffle = True
self.validation_stratify = True
self.validation_train_split = None
self.algorithms = ['xgb']
self.metric = 'logloss'
self.tuning_mode = 'Normal'
self.time_constraint = 1
self.create_enseble = False
# setup project
self.project_client = ProjectClient()
self.project = self.project_client.create_project(title = proj_title, task = proj_task)
# add training data
df = pd.read_csv('tests/data/test_1.csv')
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
dc = DatasetClient(self.project.hid)
self.dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
def tearDown(self):
# wait before clean, to have time to initialize models
time.sleep(60)
# clean
self.project_client.delete_project(self.project.hid)
def test_create_with_kfold_cv(self):
#Create experiment test with k-fold CV.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "5-fold CV, Shuffle, Stratify")
self.assertEqual(experiment.metric, self.metric)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# get experiment by hid, there should be the same
experiment_2 = ec.get_experiment(experiment.hid)
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
# test __str__ method
self.assertTrue('id' in str(experiment_2))
self.assertTrue('title' in str(experiment_2))
self.assertTrue('metric' in str(experiment_2))
self.assertTrue('validation' in str(experiment_2))
def test_create_with_train_split(self):
#Create experiment with validation by train split.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "Split 72/28, Shuffle, Stratify")
def test_create_with_validation_dataset(self):
#Create experiment with validation with dataset.
# add vald dataset
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
df = pd.read_csv('tests/data/test_1_vald.csv')
dc = DatasetClient(self.project.hid)
vald_dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, vald_dataset, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "With dataset")
def test_create_if_exists(self):
#Create experiment after experiment is already in project.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# try to create the same experiment
experiment_2 = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# both should be the same
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
if __name__ == "__main__":
unittest.main()
| mljar/mljar-api-python | tests/experiment_client_test.py | Python | apache-2.0 | 7,937 |
import unittest
from locust.util.timespan import parse_timespan
from locust.util.rounding import proper_round
class TestParseTimespan(unittest.TestCase):
def test_parse_timespan_invalid_values(self):
self.assertRaises(ValueError, parse_timespan, None)
self.assertRaises(ValueError, parse_timespan, "")
self.assertRaises(ValueError, parse_timespan, "q")
def test_parse_timespan(self):
self.assertEqual(7, parse_timespan("7"))
self.assertEqual(7, parse_timespan("7s"))
self.assertEqual(60, parse_timespan("1m"))
self.assertEqual(7200, parse_timespan("2h"))
self.assertEqual(3787, parse_timespan("1h3m7s"))
class TestRounding(unittest.TestCase):
def test_rounding_down(self):
self.assertEqual(1, proper_round(1.499999999))
self.assertEqual(5, proper_round(5.499999999))
self.assertEqual(2, proper_round(2.05))
self.assertEqual(3, proper_round(3.05))
def test_rounding_up(self):
self.assertEqual(2, proper_round(1.5))
self.assertEqual(3, proper_round(2.5))
self.assertEqual(4, proper_round(3.5))
self.assertEqual(5, proper_round(4.5))
self.assertEqual(6, proper_round(5.5))
| heyman/locust | locust/test/test_util.py | Python | mit | 1,232 |
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
{
'name': 'MRP Wave Scheduler',
'version': '7.0.1.0.0',
'category': 'Stock',
'sequence': 19,
'summary': 'MRP Wave Scheduler',
'description': """
Stock Big Scheduler + AUTO/FORCE MO (For bundle products with BOM)
==================================================
* Calculate scheduler based on DTS, PTS.
* During scheulder, create and validate mo based on product definition.
* Create & Update Scheduler Staus message with message group 'scheduler'.
* Launch scheduler in threads when a scheduler is not started.
* Option to stop / kill the scheduler threads.
""",
'author': 'Elico Corp',
'website': 'https://www.elico-corp.com',
'images' : [],
'depends': ['product','stock','delivery_plan'],
'data': [
'wizard/scheduler.xml',
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
}
| Elico-Corp/openerp-7.0 | mrp_wave/__openerp__.py | Python | agpl-3.0 | 1,104 |
#!/usr/bin/env python
# encoding: utf-8
from cmath import exp, pi, sqrt
from random import random
class Psi:
def __init__(self, n_qubits):
"""
set up a quantum system with the given number of qubits
initialized to the "zero" qubit.
"""
self.n_qubits = n_qubits
# in this classical simulation, we use 2^n_qubits complex numbers
self.amplitudes = [0] * (1 << n_qubits)
self.amplitudes[0] = 1
def collapse(self):
"""
collapse the system (i.e. measure it) and return a tuple
of the bits.
"""
weights = [abs(amp) ** 2 for amp in self.amplitudes]
choice = random() * sum(weights)
for i, w in enumerate(weights):
choice -= w
if choice < 0:
self.amplitudes = [0] * (1 << self.n_qubits)
self.amplitudes[i] = 1
return tuple((i >> bit) % 2 for bit in range(self.n_qubits))
## gates
def pi_over_eight(self, qubit):
"""
applies a π/8 gate to the given qubit
"""
# has to be a valid qubit
if qubit > self.n_qubits:
raise ValueError()
# go through each amplitude
for i in range(1 << self.n_qubits):
# find out whether that amplitude corresponds to the qubit being
# zero or one
if (i >> qubit) % 2 == 0: # if zero
self.amplitudes[i] *= exp(-1j * pi / 8)
else: # if one
self.amplitudes[i] *= exp(1j * pi / 8)
def controlled_not(self, qubit1, qubit2):
"""
applies a controlled-not gate using the first given qubit as the
control of the permutation of the second.
"""
# the two quibits have to valid and different
if qubit1 > self.n_qubits or qubit2 > self.n_qubits or qubit1 == qubit2:
raise ValueError()
# make a copy of amplitudes as they update simultaneously
old_amplitudes = self.amplitudes[:]
# go through each amplitude
for i in range(1 << self.n_qubits):
# permutate qubit2 based on value of qubit1
self.amplitudes[i ^ (((i >> qubit1) % 2) << qubit2)] = old_amplitudes[i]
def hadamard(self, qubit):
"""
applies a Hadamard gate to the given qubit.
"""
# has to be a valid qubit
if qubit > self.n_qubits:
raise ValueError()
# make a copy of amplitudes as they update simultaneously
old_amplitudes = self.amplitudes[:]
# go through each amplitude
for i in range(1 << self.n_qubits):
# find out whether that amplitude corresponds to the qubit being
# zero or one
if (i >> qubit) % 2 == 0: # if zero
self.amplitudes[i] = (old_amplitudes[i] - old_amplitudes[i + (1 << qubit)]) / sqrt(2)
else: # if one
self.amplitudes[i] = (old_amplitudes[i - (1 << qubit)] - old_amplitudes[i]) / sqrt(2)
if __name__ == "__main__":
# set up a system with 3 qubits
psi = Psi(3)
# apply Hadamard gate to qubit 1 (second qubit)
psi.hadamard(1)
# apply Hadamard gate to qubit 2 (third qubit)
psi.hadamard(2)
# apply π/8 gate to qubit 1
psi.pi_over_eight(1)
# apply controlled-not gate with qubit 1 controlling qubit 0 (first qubit)
psi.controlled_not(1, 0)
# collapse and print the result (a tuple of 3 classical bits)
print(psi.collapse())
| jtauber/quantumpy | quantum.py | Python | mit | 3,516 |
from PyQt5.QtCore import QAbstractListModel, QModelIndex, Qt
class QtListModel(QAbstractListModel):
def __init__(self, item_builder):
QAbstractListModel.__init__(self)
self._items = {}
self._itemlist = [] # For queries
self._item_builder = item_builder
def rowCount(self, parent):
if parent.isValid():
return 0
return len(self._itemlist)
def data(self, index, role):
if not index.isValid() or index.row() >= len(self._itemlist):
return None
if role != Qt.DisplayRole:
return None
return self._itemlist[index.row()]
# TODO - insertion and removal are O(n).
def _add_item(self, data, id_):
assert id_ not in self._items
next_index = len(self._itemlist)
self.beginInsertRows(QModelIndex(), next_index, next_index)
item = self._item_builder(data)
item.updated.connect(self._at_item_updated)
self._items[id_] = item
self._itemlist.append(item)
self.endInsertRows()
def _remove_item(self, id_):
assert id_ in self._items
item = self._items[id_]
item_index = self._itemlist.index(item)
self.beginRemoveRows(QModelIndex(), item_index, item_index)
item.updated.disconnect(self._at_item_updated)
del self._items[id_]
self._itemlist.pop(item_index)
self.endRemoveRows()
def _clear_items(self):
self.beginRemoveRows(QModelIndex(), 0, len(self._items) - 1)
for item in self._items.values():
item.updated.disconnect(self._at_item_updated)
self._items.clear()
self._itemlist.clear()
self.endRemoveRows()
def _at_item_updated(self, item):
item_index = self._itemlist.index(item)
index = self.index(item_index, 0)
self.dataChanged.emit(index, index)
| FAForever/client | src/util/qt_list_model.py | Python | gpl-3.0 | 1,886 |
""" Convenience script to run all python test cases in one go.
"""
import sys
import unittest
try:
import DNS
except ImportError:
print("TESTS SKIPPED: the python3-dns library is not installed")
sys.exit(0)
from test_btrie import *
from test_ip6trie import *
from test_ip4trie import *
from test_acl import *
if __name__ == '__main__':
unittest.main()
| spamhaus/rbldnsd | tests.py | Python | gpl-2.0 | 371 |
from rest_framework import viewsets
from limbo import serializers
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from limbo.serializers import LimboSerializer
from limbo.lib import Dictionary
class LimboViewSet(viewsets.ViewSet):
def list(self, request):
"""
Lists all dictionaries currently available
"""
l = Dictionary.dictionary_list()
return Response(data={"words": l}, status=200)
def partial_update(self, request, pk=None):
pass
def create(self, request):
"""
Adds words to global dictionary
"""
dictionary = Dictionary.get_global_dictionary()
wordlist = LimboSerializer(data=request.data)
if wordlist.is_valid():
for w in wordlist.data["words"]:
dictionary.add_word(w["word"])
return Response(wordlist.data, status=201)
else:
return Response(status=400)
def retrieve(self, request, pk=None):
"""
List of words for specified dictionary
"""
dictionary = Dictionary(pk)
return Response(
status=200,
data={"words": dictionary.get_words()}
)
def destroy(self, request, pk=None):
pass
def update(self, request, pk=None):
"""
Add word to local dictionary
"""
dictionary = Dictionary(pk)
if dictionary.is_owner(request.user.email):
wordlist = LimboSerializer(data=request.data)
if wordlist.is_valid():
words = wordlist.validated_data["words"]
for w in words:
dictionary.add_word(w["word"])
return Response(status=201)
else:
return Response(status=400)
else:
return Response(status=403)
@detail_route(methods=['post'])
def check(self, request, pk=None):
wordlist = LimboSerializer(data=request.data)
dictionary = Dictionary(pk)
res = []
if wordlist.is_valid():
ww = wordlist.data["words"]
for w in ww:
if dictionary.check(w["word"]):
res.append({
"word": w["word"],
"ok": True,
"suggestions": []
})
else:
res.append({
"word": w["word"],
"ok": False,
"suggestions": dictionary.get_suggestions(w["word"])
})
return Response(data={"words": res})
else:
return Response(status=400)
@detail_route(methods=['post'])
def ignore(self, request, pk=None):
"""
Removes word from specified dictionary
(specify 'global' for global dictionary)
"""
dictionary = None
if pk == "global":
dictionary = Dictionary.get_global_dictionary()
else:
dictionary = Dictionary(pk)
wordlist = LimboSerializer(data=request.data)
if wordlist.is_valid():
for w in wordlist.data["words"]:
dictionary.ignore_word(w["word"])
return Response(status=202)
else:
return Response(status=400)
| fantastic001/liberator-api | limbo/views/limbo.py | Python | gpl-2.0 | 3,380 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
# if we want to give our script parameters, we need a special library
import sys, os, re, requests, json
from elasticsearch import Elasticsearch
reload(sys)
sys.setdefaultencoding("utf-8")
# FUNCTIONS
def add_data_to_index(es, index_name, document_type, data):
result = es.index(index=index_name, doc_type=document_type, body=data, request_timeout=30)
return result
def split_and_index_text(es, theIndex, theType, text, code, region, lid):
local_file = text
# open the file for reading
with open(local_file, 'r',) as infile:
content = infile.read()
sunitDict = dict()
lines = re.split("\n", content)
for line in lines:
line = line.strip()
# print(line)
myList = line.split("\t")
if len(myList) >= 2:
lid += 1
sunitDict['localId'] = lid
sunitDict['areaCode'] = code
sunitDict['region'] = region
sunitDict['rawText'] = myList[1]
sunitDict['origText'] = myList[1]
tempid = myList[0]
sunitDict['sunitId'] = tempid
myNewList = tempid.split(":")
if len(myNewList) > 1:
sunitDict['textId'] = myNewList[0] + ':' + myNewList[1]
tType = myNewList[1]
if re.match("S", tType):
sunitDict['textType'] = "spoken"
elif re.match("W", tType):
sunitDict['textType'] = "written"
else:
sunitDict['textType'] = "unknown"
else:
print("Cannot generate textId/textType -- Empty line?", end="")
print(tempid, end="\n")
sunitDict['textId'] = tempid
sunitJSON = json.dumps(sunitDict)
# print(sunitJSON)
indexed = add_data_to_index(es, theIndex, theType, sunitJSON)
elif len(myList) >= 1:
lid += 1
sunitDict['localId'] = lid
sunitDict['areaCode'] = code
sunitDict['region'] = region
sunitDict['rawText'] = ""
sunitDict['origText'] = ""
tempid = myList[0]
sunitDict['sunitId'] = tempid
myNewList = tempid.split(":")
if len(myNewList) > 1:
sunitDict['textId'] = myNewList[0] + ':' + myNewList[1]
tType = myNewList[1]
if re.match("s", tType):
sunitDict['textType'] = "spoken"
elif re.match("w", tType):
sunitDict['textType'] = "written"
else:
sunitDict['textType'] = "unknown"
else:
print("Cannot generate textId/textType -- Empty line?", end="")
print(tempid, end="\n")
sunitDict['textId'] = tempid
sunitJSON = json.dumps(sunitDict)
# print(sunitJSON)
indexed = add_data_to_index(es, theIndex, theType, sunitJSON)
else:
print("Cannot index:", end="")
print(line, end="\n")
return lid
#MAIN
#res = requests.get('http://localhost:9200')
#print(res.content)
#Should/Could be read from command line/URL
myIndex = 'ice'
myType = 'iceraw'
if len(sys.argv) < 4:
print("Need input file (e.g. 'ice-gb.txt'), country/area code (e.g. 'aus', 'can', 'gbr', 'sgp'), region (e.g. 'north', 'south' or 'other') and start id (a number).")
sys.exit()
mytext = sys.argv[1]
myarea = sys.argv[2]
myregion = sys.argv[3]
mylocalid = sys.argv[4]
print(mytext)
print(myarea)
print(myregion)
print(mylocalid)
localidint = int(mylocalid)
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
return_value = split_and_index_text(es, myIndex, myType, mytext, myarea, myregion, localidint)
print("Local id :", end="")
print(return_value)
| jarleEbe/elasticsearch-on-corpora | iceuntagged/iceSimpleIndex.py | Python | mit | 3,789 |
#-*- encoding: utf-8 -*-
class ParserHandler:
KEYWORDS = ['up','down','right','left',
'a','b','start','select']
def __init__(self):
pass
def parser(self, message):
ret = []
while len(message) > 0:
found = False
for key in self.KEYWORDS:
if message.find(key) == 0:
ret.append(key)
message = message[len(key):]
found = True
if not found:
message = message[1:]
return ret | tnlin/nctuNASA | NA-HW1-Twitch_Play_Pokemon/msgParser.py | Python | mit | 577 |
#!/usr/bin/env python
""" Test unicode support in tvnamer
"""
from functional_runner import run_tvnamer, verify_out_data
from nose.plugins.attrib import attr
import unicodedata
@attr("functional")
def test_unicode_in_inputname():
"""Tests parsing a file with unicode in the input filename
"""
input_files = [
u'The Big Bang Theory - S02E07 - The Panty Pin\u0303ata Polarization.avi']
expected_files = [
u'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ["--batch"])
verify_out_data(out_data, expected_files)
@attr("functional")
def test_unicode_in_search_results():
"""Show with unicode in search results
"""
input_files = [
'psych.s04e11.avi']
expected_files = [
'Psych - [04x11] - Thrill Seekers & Hell Raisers.avi']
out_data = run_tvnamer(
with_files = input_files,
with_input = '1\ny\n')
verify_out_data(out_data, expected_files)
@attr("functional")
def test_not_overwritting_unicode_filename():
"""Test no error occurs when warning about a unicode filename being overwritten
"""
input_files = [
u'The Big Bang Theory - S02E07.avi',
u'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
expected_files = [
u'The Big Bang Theory - S02E07.avi',
u'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi']
out_data = run_tvnamer(
with_files = input_files,
with_flags = ['--batch'])
verify_out_data(out_data, expected_files)
| lahwaacz/tvnamer | tests/test_unicode.py | Python | unlicense | 1,655 |
from django.db import models
from django.contrib.auth.models import User
# x und y-values in metern
# allgemiene Daten (ueber tankstellen-api zu holen)
class Tankstellen(models.Model):
bezeichnung = models.CharField(max_length=256)
position_x = models.DecimalField(max_digits = 8, decimal_places = 6)
position_y = models.DecimalField(max_digits = 8, decimal_places = 6)
preis = models.DecimalField(max_digits = 5, decimal_places = 2)
def __unicode__(self):
return self.bezeichnung
# preise werden auch gecrawled (ebenfalls tankstellen-api)
class BenzinPreis(models.Model):
tankstelle = models.ForeignKey(Tankstellen)
preis = models.DecimalField(max_digits = 5, decimal_places = 2)
start_zeit = models.DateTimeField()
def __unicode__(self):
return(str(self.start_zeit))
class Meta:
ordering = ('start_zeit',)
class FahrtDaten(models.Model):
nutzer = models.ForeignKey(User)
strecken_laengekm = models.DecimalField(max_digits = 5, decimal_places = 1)
spritverbrauch_in_l = models.DecimalField(max_digits = 4, decimal_places = 2)
start_zeit = models.DateTimeField()
end_zeit = models.DateTimeField()
class Meta:
ordering = ('end_zeit',)
# create user positions for mockup, updated jede minute
class UserPositions(models.Model):
zeit = models.DateTimeField()
benzin_delta_in_l = models.DecimalField(max_digits = 4, decimal_places = 2)
position_x = models.DecimalField(max_digits = 8, decimal_places = 6)
position_y = models.DecimalField(max_digits = 8, decimal_places = 6)
class Meta:
ordering = ('zeit',)
| AlexImmer/VolkSwaggen | richtigTanken/models.py | Python | mit | 1,542 |
from tago import Tago
import os
TOKEN = os.environ.get('TAGO_TOKEN_DEVICE') or 'TOKEN'
def test_insert():
result = Tago(TOKEN).device.find({'query': 'last_value'})
print result
if result['status']:
assert True
else:
assert False
| madhavbhatt/tago-sdk-python | tests/device/test_get.py | Python | apache-2.0 | 263 |
import abc
import math
import cmath
import numpy as np
from ..base import Base
class Coupling(Base):
"""
Abstract base class for couplings.
"""
__metaclass__ = abc.ABCMeta
connection = None
"""
Connection this coupling is part of.
"""
component_from = None
"""
Component
"""
component_to = None
"""
Component
"""
subsystem_from = None
"""
Type of subsystem origin for coupling
"""
subsystem_to = None
"""
Type of subsystem destination for coupling
"""
object_sort = 'Coupling'
size = None
"""
Size of the coupling.
"""
@abc.abstractproperty
def impedance_from(self):
"""Impedance of :attr:`subsystem_from` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return
@abc.abstractproperty
def impedance_to(self):
"""Impedance of :attr:`subsystem_to` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return
@abc.abstractproperty
def clf(self):
"""Coupling loss factor `\\eta`.
:rtype: :class:`numpy.ndarray`
"""
return
@property
def mobility_from(self):
"""Mobility of :attr:`subsystem_from` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return 1.0 / self.impedance_from
@property
def mobility_to(self):
"""Mobility of :attr:`subsystem_to` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return 1.0 / self.impedance_to
@property
def resistance_from(self):
"""Resistance of :attr:`subsystem_from` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return np.real(self.impedance_from)
@property
def resistance_to(self):
"""Resistance of :attr:`subsystem_to` corrected for the type of coupling.
:rtype: :class:`numpy.ndarray`
"""
return np.real(self.impedance_to)
@property
def modal_coupling_factor(self):
"""Modal coupling factor of the coupling.
:rtype: :class:`numpy.ndarray`
.. math:: \\beta_{ij} = \\frac{ f * \\eta_{ij} } { \\overline{\\delta f_i} }
See Lyon, above equation 12.1.4
"""
return self.frequency.center * self.clf / self.subsystem_from.average_frequency_spacing
| FRidh/Sea | Sea/model/couplings/Coupling.py | Python | bsd-3-clause | 2,622 |
"""
Kolibri Hooks API
-----------------
What are hooks
~~~~~~~~~~~~~~
Hooks are classes that define *something* that happens at one or more places
where the hook is looked for and applied. It means that you can
"hook into a component" in Kolibri and have it do a predefined and
parameterized *thing*. For instance, Kolibri could ask all its plugins who
wants to add something to the user settings panel, and its then up to the
plugins to inherit from that specific hook and feed back the parameters that
the hook definition expects.
The consequences of a hook being applied can happen anywhere in Kolibri. Each
hook is defined through a class inheriting from ``KolibriHook``. But how the
inheritor of that class deals with plugins using it, is entirely up to each
specific implementation and can be applied in templates, views, middleware -
basically everywhere!
That's why you should consult the class definition and documentation of the
hook you are adding plugin functionality with.
We have two different types of hooks:
Abstract hooks
Are definitions of hooks that are implemented by *implementing hooks*.
Registered hooks
Are concrete hooks that inherit from abstract hooks, thus embodying the
definitions of the abstract hook into a specific case.
So what's "a hook"?
Simply referring to "a hook" is okay, it can be ambiguous on purpose. For
instance, in the example, we talk about "a navigation hook". So we both
mean the abstract definition of the navigation hook and everything that
is registered for the navigation.
Where can I find hooks?
~~~~~~~~~~~~~~~~~~~~~~~
All Kolibri core applications and plugins alike should *by convention* define
their abstract hooks inside ``<myapp>/hooks.py``. Thus, to see which hooks
a Kolibri component exposes, you can refer to its ``hooks`` module.
.. note::
Defining abstract hooks in ``<myapp>/hooks.py`` isn't mandatory, but
*loading* a concrete hook in ``<myapp>/kolibri_plugin.py`` is.
.. warning::
Do not define abstract and registered hooks in the same module. Or to put it
in other words: Never put registered hooks in ``<myapp>/hooks.py``. The
non-abstract hooks should not be loaded unintentionally in case your
application is not loaded but only used to import an abstract definition
by an external component!
In which order are hooks used/applied?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is entirely up to the registering class. By default, hooks are applied in
the same order that the registered hook gets registered! This most likely means
the order in which ``kolibri_plugin`` is loaded ``=>`` the order in which the
app is listed in ``INSTALLED_APPS``
An example of a plugin using a hook
-----------------------------------
.. note::
The example shows a NavigationHook which is simplified for the sake of
readability. The actual implementation in Kolibri will defer.
Example implementation
----------------------
Here is an example of how to use a hook in ``myplugin.kolibri_plugin.py``:
.. code-block:: python
from django.db.models
# This is where the actual abstract hook is defined
from kolibri.core.hooks import NavigationHook
# By inheriting NavigationHook, we tell that we are going to want our
# plugin to be part of the hook's activities with the specified attributes.
# We only define one navigation item, but defining another one is as simple
# as adding another class definition.
class MyPluginNavigationItem(NavigationHook):
label = _("My Plugin")
url = reverse_lazy("kolibri:my_plugin:index")
And here is the definition of that hook in kolibri.core.hooks:
.. code-block:: python
from kolibri.plugins.hooks import KolibriHook
class NavigationHook(KolibriHook):
\"\"\"
Extend this hook to define a new navigation item
\"\"\"
#: A string label for the menu item
label = "Untitled"
#: A string or lazy proxy for the url
url = "/"
@classmethod
def get_menu(cls):
menu = {}
for hook in self.registered_hooks:
menu[hook.label] = url
return menu
class Meta:
abstract = True
Usage of the hook
-----------------
Inside our templates, we load a template tag from navigation_tags, and this
template tag definition looks like this:
.. code-block:: python
from kolibri.core.hooks import NavigationHook
@register.assignment_tag()
def kolibri_main_navigation():
for item in NavigationHook().get_menu():
yield item
.. code-block:: html
{% load kolibri_tags %}
<ul>
{% for menu_item in kolibri_main_navigation %}
<li><a href="{{ menu_item.url }}">{{ menu_item.label }}</a></li>
{% endfor %}
</ul>
.. warning::
Do not load registered hook classes outside of a plugin's
``kolibri_plugin``. Either define them there directly or import the modules
that define them. Hook classes should all be seen at load time, and
placing that logic in ``kolibri_plugin`` guarantees that things are
registered correctly.
"""
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import six
logger = logging.getLogger(__name__)
# : Inspired by how Django's Model Meta option settings work, we define a simple
# : list of valid options for Meta classes.
DEFAULT_NAMES = ('abstract', 'replace_parent')
def abstract_method(func):
@functools.wraps(func)
def inner(instance, *args, **kwargs):
assert \
instance._meta.abstract, \
"Method call only valid for an abstract hook"
return func(instance, *args, **kwargs)
return inner
def registered_method(func):
@functools.wraps(func)
def inner(instance, *args, **kwargs):
assert \
not instance._meta.abstract, \
"Method call only valid for a registered, non-abstract hook"
return func(instance, *args, **kwargs)
return inner
class Options(object):
"""
Stores instance of options for Hook.Meta classes
"""
def __init__(self, meta):
self.abstract = False
self.replace_parent = False
self.meta = meta
self.registered_hooks = set()
if meta:
# Deep copy because options may be immutable, and so we shouldn't
# have one object manipulate an instance of an option and that
# propagates to other objects.
meta_attrs = self.meta.__dict__.copy()
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
assert not (self.abstract and self.replace_parent), "Cannot replace abstract hooks"
class KolibriHookMeta(type):
"""
This is the base meta class of all hooks in Kolibri. A lot of the code is
lifted from django.db.models.ModelBase.
We didn't end up like this because of bad luck, rather it fitted perfectly
to how we want plugin functionality to be /plugged into/ in an explicit
manner:
Metaclasses are deeper magic than 99% of users should ever worry about.
If you wonder whether you need them, you don't (the people who actually
need them know with certainty that they need them, and don't need an
explanation about why).
- Tim Peters
"""
def __new__(cls, name, bases, attrs):
"""
Inspired by Django's ModelBase, we create a dynamic type for each hook
definition type class and add it to the global registry of hooks.
"""
super_new = super(KolibriHookMeta, cls).__new__
# Parent classes of cls up until and including KolibriHookMeta
parents = [b for b in bases if isinstance(b, KolibriHookMeta)]
# If there isn't any parents, it's the main class of everything
# ...and we just set some empty options
if not parents:
base_class = super_new(cls, name, bases, attrs)
base_class.add_to_class('_meta', Options(None))
base_class.add_to_class('_parents', [])
return base_class
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
# Commented out because it sets the meta properties of the parent
# if not attr_meta:
# meta = getattr(new_class, 'Meta', None)
# else:
# meta = attr_meta
meta = attr_meta
# Meta of the base object can be retrieved by looking at the currently
# set _meta object... but we don't use it...
# base_meta = getattr(new_class, '_meta', None)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
new_class.add_to_class('_meta', Options(meta))
new_class.add_to_class('_parents', parents)
if not abstract:
logger.debug("Registered hook class {}".format(new_class))
for parent in new_class._parents:
parent.register_hook(new_class)
if new_class._meta.replace_parent:
immediate_parent = parents[-1]
for parent in parents:
parent.unregister_hook(immediate_parent)
return new_class
def add_to_class(cls, name, value):
setattr(cls, name, value)
def unregister_hook(cls, child_hook):
if child_hook in cls._meta.registered_hooks:
cls._meta.registered_hooks.remove(child_hook)
for parent in cls._parents:
parent.unregister_hook(child_hook)
def register_hook(cls, child_hook):
cls._meta.registered_hooks.add(child_hook)
for parent in cls._parents:
parent.register_hook(child_hook)
class KolibriHook(six.with_metaclass(KolibriHookMeta)):
"""
WIP!!!
To use it, extend it. All hooks in kolibri extends from this.
Example is in the main description of ``kolibri.plugins.hooks``.
"""
def __init__(self):
# We have been initialized. A noop. But inheriting hooks might want to
# pay attention when they are initialized, since it's an event in itself
# signaling that whatever that hook was intended for is now being
# yielded or rendered.
pass
@property
def registered_hooks(self):
"""
Always go through this method. This should guarantee that every time a
hook is accessed, it's also instantiated, giving it a chance to re-do
things that it wants done on every event.
"""
for HookClass in self._meta.registered_hooks:
yield HookClass()
class Meta:
abstract = True
| ralphiee22/kolibri | kolibri/plugins/hooks.py | Python | mit | 10,971 |
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import F
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry import features, roles
from sentry.auth import manager
from sentry.auth.helper import AuthHelper
from sentry.models import AuditLogEntryEvent, AuthProvider, OrganizationMember, User
from sentry.plugins import Response
from sentry.tasks.auth import email_missing_links, email_unlink_notifications
from sentry.utils import db
from sentry.utils.http import absolute_uri
from sentry.web.frontend.base import OrganizationView
ERR_NO_SSO = _('The SSO feature is not enabled for this organization.')
OK_PROVIDER_DISABLED = _('SSO authentication has been disabled.')
OK_REMINDERS_SENT = _(
'A reminder email has been sent to members who have not yet linked their accounts.'
)
class AuthProviderSettingsForm(forms.Form):
require_link = forms.BooleanField(
label=_('Require SSO'),
help_text=_('Require members use a valid linked SSO account to access this organization'),
required=False,
)
default_role = forms.ChoiceField(
label=_('Default Role'),
choices=roles.get_choices(),
help_text=_(
'The default role new members will receive when logging in for the first time.'
),
)
class OrganizationAuthSettingsView(OrganizationView):
# We restrict auth settings to org:admin as it allows a non-owner to
# escalate members to own by disabling the default role.
required_scope = 'org:admin'
def _disable_provider(self, request, organization, auth_provider):
self.create_audit_entry(
request,
organization=organization,
target_object=auth_provider.id,
event=AuditLogEntryEvent.SSO_DISABLE,
data=auth_provider.get_audit_log_data(),
)
if db.is_sqlite():
for om in OrganizationMember.objects.filter(organization=organization):
setattr(om.flags, 'sso:linked', False)
setattr(om.flags, 'sso:invalid', False)
om.save()
else:
OrganizationMember.objects.filter(
organization=organization,
).update(
flags=F('flags').bitand(
~getattr(OrganizationMember.flags, 'sso:linked'),
).bitand(
~getattr(OrganizationMember.flags, 'sso:invalid'),
),
)
user_ids = OrganizationMember.objects.filter(organization=organization).values('user')
User.objects.filter(id__in=user_ids).update(is_managed=False)
email_unlink_notifications.delay(organization.id, request.user.id, auth_provider.provider)
auth_provider.delete()
def handle_existing_provider(self, request, organization, auth_provider):
provider = auth_provider.get_provider()
if request.method == 'POST':
op = request.POST.get('op')
if op == 'disable':
self._disable_provider(request, organization, auth_provider)
messages.add_message(
request,
messages.SUCCESS,
OK_PROVIDER_DISABLED,
)
next_uri = reverse('sentry-organization-auth-settings', args=[organization.slug])
return self.redirect(next_uri)
elif op == 'reinvite':
email_missing_links.delay(organization.id, request.user.id, provider.key)
messages.add_message(
request,
messages.SUCCESS,
OK_REMINDERS_SENT,
)
next_uri = reverse(
'sentry-organization-auth-provider-settings',
args=[
organization.slug])
return self.redirect(next_uri)
form = AuthProviderSettingsForm(
data=request.POST if request.POST.get('op') == 'settings' else None,
initial={
'require_link': not auth_provider.flags.allow_unlinked,
'default_role': organization.default_role,
},
)
if form.is_valid():
auth_provider.flags.allow_unlinked = not form.cleaned_data['require_link']
auth_provider.save()
organization.default_role = form.cleaned_data['default_role']
organization.save()
view = provider.get_configure_view()
response = view(request, organization, auth_provider)
if isinstance(response, HttpResponse):
return response
elif isinstance(response, Response):
response = response.render(
request, {
'auth_provider': auth_provider,
'organization': organization,
'provider': provider,
}
)
pending_links_count = OrganizationMember.objects.filter(
organization=organization,
flags=~getattr(OrganizationMember.flags, 'sso:linked'),
).count()
context = {
'form': form,
'pending_links_count': pending_links_count,
'login_url':
absolute_uri(reverse('sentry-organization-home', args=[organization.slug])),
'auth_provider': auth_provider,
'provider_name': provider.name,
'content': response,
}
return self.respond('sentry/organization-auth-provider-settings.html', context)
@transaction.atomic
def handle(self, request, organization):
if not features.has('organizations:sso', organization, actor=request.user):
messages.add_message(
request,
messages.ERROR,
ERR_NO_SSO,
)
return HttpResponseRedirect(
reverse('sentry-organization-home', args=[organization.slug])
)
try:
auth_provider = AuthProvider.objects.get(
organization=organization,
)
except AuthProvider.DoesNotExist:
pass
else:
return self.handle_existing_provider(
request=request,
organization=organization,
auth_provider=auth_provider,
)
if request.method == 'POST':
provider_key = request.POST.get('provider')
if not manager.exists(provider_key):
raise ValueError('Provider not found: {}'.format(provider_key))
helper = AuthHelper(
request=request,
organization=organization,
provider_key=provider_key,
flow=AuthHelper.FLOW_SETUP_PROVIDER,
)
feature = helper.provider.required_feature
if feature and not features.has(feature, organization, actor=request.user):
return HttpResponse('Provider is not enabled', status=401)
if request.POST.get('init'):
helper.init_pipeline()
if not helper.pipeline_is_valid():
return helper.error('Something unexpected happened during authentication.')
# render first time setup view
return helper.current_step()
# Otherwise user is in bad state since frontend/react should handle this case
return HttpResponseRedirect(
reverse('sentry-organization-home', args=[organization.slug])
)
| looker/sentry | src/sentry/web/frontend/organization_auth_settings.py | Python | bsd-3-clause | 7,726 |
"""
This file is part of DeepConvSep.
Copyright (c) 2014-2017 Marius Miron <miron.marius at gmail.com>
DeepConvSep is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepConvSep is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepConvSep. If not, see <http://www.gnu.org/licenses/>.
"""
import os,sys
import transform
from transform import transformFFT
import dataset
from dataset import LargeDataset
import util
import numpy as np
import re
from scipy.signal import blackmanharris as blackmanharris
import shutil
import time
import cPickle
import re
import climate
import ConfigParser
import theano
import theano.tensor as T
import theano.sandbox.rng_mrg
import lasagne
from lasagne.layers import ReshapeLayer,Layer
from lasagne.init import Normal
from lasagne.regularization import regularize_layer_params_weighted, l2, l1
from lasagne.regularization import regularize_layer_params
logging = climate.get_logger('trainer')
climate.enable_default_logging()
def load_model(filename):
f=file(filename,'rb')
params=cPickle.load(f)
f.close()
return params
def save_model(filename, model):
params=lasagne.layers.get_all_param_values(model)
f = file(filename, 'wb')
cPickle.dump(params,f,protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
return None
def build_ca(input_var=None, batch_size=32,time_context=30,feat_size=513):
"""
Builds a network with lasagne
Parameters
----------
input_var : Theano tensor
The input for the network
batch_size : int, optional
The number of examples in a batch
time_context : int, optional
The time context modeled by the network.
feat_size : int, optional
The feature size modeled by the network (last dimension of the feature vector)
Yields
------
l_out : Theano tensor
The output of the network
"""
input_shape=(batch_size,1,time_context,feat_size)
#input layer
l_in_1 = lasagne.layers.InputLayer(shape=input_shape, input_var=input_var)
#vertical convolution layer
l_conv1 = lasagne.layers.Conv2DLayer(l_in_1, num_filters=30, filter_size=(1,30),stride=(1,3), pad='valid', nonlinearity=None)
l_conv1b= lasagne.layers.BiasLayer(l_conv1)
#horizontal convolution layer
l_conv2 = lasagne.layers.Conv2DLayer(l_conv1b, num_filters=30, filter_size=(10,20),stride=(1,1), pad='valid', nonlinearity=None)
l_conv2b= lasagne.layers.BiasLayer(l_conv2)
#bottlneck layer
l_fc=lasagne.layers.DenseLayer(l_conv2b,256)
#build output for source1
l_fc11=lasagne.layers.DenseLayer(l_fc,l_conv2.output_shape[1]*l_conv2.output_shape[2]*l_conv2.output_shape[3])
l_reshape1 = lasagne.layers.ReshapeLayer(l_fc11,(batch_size,l_conv2.output_shape[1],l_conv2.output_shape[2], l_conv2.output_shape[3]))
l_inverse11=lasagne.layers.InverseLayer(l_reshape1, l_conv2)
l_inverse41=lasagne.layers.InverseLayer(l_inverse11, l_conv1)
#build output for source1
l_fc12=lasagne.layers.DenseLayer(l_fc,l_conv2.output_shape[1]*l_conv2.output_shape[2]*l_conv2.output_shape[3])
l_reshape2 = lasagne.layers.ReshapeLayer(l_fc12,(batch_size,l_conv2.output_shape[1],l_conv2.output_shape[2], l_conv2.output_shape[3]))
l_inverse12=lasagne.layers.InverseLayer(l_reshape2, l_conv2)
l_inverse42=lasagne.layers.InverseLayer(l_inverse12, l_conv1)
#build final output
l_merge=lasagne.layers.ConcatLayer([l_inverse41,l_inverse42],axis=1)
l_out = lasagne.layers.NonlinearityLayer(lasagne.layers.BiasLayer(l_merge), nonlinearity=lasagne.nonlinearities.rectify)
return l_out
def train_auto(train,fun,transform,testdir,outdir,num_epochs=30,model="1.pkl",scale_factor=0.3,load=False,skip_train=False,skip_sep=False):
"""
Trains a network built with \"fun\" with the data generated with \"train\"
and then separates the files in \"testdir\",writing the result in \"outdir\"
Parameters
----------
train : Callable, e.g. LargeDataset object
The callable which generates training data for the network: inputs, target = train()
fun : lasagne network object, Theano tensor
The network to be trained
transform : transformFFT object
The Transform object which was used to compute the features (see compute_features.py)
testdir : string, optional
The directory where the files to be separated are located
outdir : string, optional
The directory where to write the separated files
num_epochs : int, optional
The number the epochs to train for (one epoch is when all examples in the dataset are seen by the network)
model : string, optional
The path where to save the trained model (theano tensor containing the network)
scale_factor : float, optional
Scale the magnitude of the files to be separated with this factor
Yields
------
losser : list
The losses for each epoch, stored in a list
"""
logging.info("Building Autoencoder")
input_var2 = T.tensor4('inputs')
target_var2 = T.tensor4('targets')
rand_num = T.tensor4('rand_num')
eps=1e-8
alpha=0.9
beta_acc=0.005
beta_voc=0.02
network2 = fun(input_var=input_var2,batch_size=train.batch_size,time_context=train.time_context,feat_size=train.input_size)
if load:
params=load_model(model)
lasagne.layers.set_all_param_values(network2,params)
prediction2 = lasagne.layers.get_output(network2, deterministic=True)
rand_num = np.random.uniform(size=(train.batch_size,1,train.time_context,train.input_size))
voc=prediction2[:,0:1,:,:]+eps*rand_num
acco=prediction2[:,1:2,:,:]+eps*rand_num
mask1=voc/(voc+acco)
mask2=acco/(voc+acco)
vocals=mask1*input_var2[:,0:1,:,:]
acc=mask2*input_var2[:,0:1,:,:]
train_loss_recon_vocals = lasagne.objectives.squared_error(vocals,target_var2[:,0:1,:,:])
train_loss_recon_acc = alpha * lasagne.objectives.squared_error(acc,target_var2[:,1:2,:,:])
train_loss_recon_neg_voc = beta_voc * lasagne.objectives.squared_error(vocals,target_var2[:,1:2,:,:])
train_loss_recon_neg_acc = beta_acc * lasagne.objectives.squared_error(acc,target_var2[:,0:1,:,:])
vocals_error=train_loss_recon_vocals.sum()
acc_error=train_loss_recon_acc.sum()
negative_error_voc=train_loss_recon_neg_voc.sum()
negative_error_acc=train_loss_recon_neg_acc.sum()
loss=abs(vocals_error+acc_error-negative_error_voc)
params1 = lasagne.layers.get_all_params(network2, trainable=True)
updates = lasagne.updates.adadelta(loss, params1)
train_fn = theano.function([input_var2,target_var2], loss, updates=updates,allow_input_downcast=True)
train_fn1 = theano.function([input_var2,target_var2], [vocals_error,acc_error,negative_error_voc,negative_error_acc], allow_input_downcast=True)
predict_function2=theano.function([input_var2],[vocals,acc],allow_input_downcast=True)
predict_function3=theano.function([input_var2],[prediction2[:,0:1,:,:],prediction2[:,1:2,:,:]],allow_input_downcast=True)
losser=[]
loss2=[]
if not skip_train:
logging.info("Training...")
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
vocals_err=0
acc_err=0
beta_voc=0
beta_acc=0
start_time = time.time()
for batch in range(train.iteration_size):
inputs, target = train()
jump = inputs.shape[2]
targets=np.ndarray(shape=(inputs.shape[0],2,inputs.shape[1],inputs.shape[2]))
inputs=np.reshape(inputs,(inputs.shape[0],1,inputs.shape[1],inputs.shape[2]))
targets[:,0,:,:]=target[:,:,:jump]
targets[:,1,:,:]=target[:,:,jump:jump*2]
target=None
train_err+=train_fn(inputs,targets)
[vocals_erre,acc_erre,betae_voc,betae_acc]=train_fn1(inputs,targets)
vocals_err += vocals_erre
acc_err += acc_erre
beta_voc+= betae_voc
beta_acc+= betae_acc
train_batches += 1
logging.info("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
logging.info(" training loss:\t\t{:.6f}".format(train_err/train_batches))
logging.info(" training loss for vocals:\t\t{:.6f}".format(vocals_err/train_batches))
logging.info(" training loss for acc:\t\t{:.6f}".format(acc_err/train_batches))
logging.info(" Beta component for voice:\t\t{:.6f}".format(beta_voc/train_batches))
logging.info(" Beta component for acc:\t\t{:.6f}".format(beta_acc/train_batches))
losser.append(train_err / train_batches)
save_model(model,network2)
if not skip_sep:
logging.info("Separating")
for f in os.listdir(testdir):
if f.endswith(".wav"):
audioObj, sampleRate, bitrate = util.readAudioScipy(os.path.join(testdir,f))
assert sampleRate == 44100,"Sample rate needs to be 44100"
audio = audioObj[:,0] + audioObj[:,1]
audioObj = None
mag,ph=transform.compute_file(audio,phase=True)
mag=scale_factor*mag.astype(np.float32)
batches,nchunks = util.generate_overlapadd(mag,input_size=mag.shape[-1],time_context=train.time_context,overlap=train.overlap,batch_size=train.batch_size,sampleRate=sampleRate)
output=[]
batch_no=1
for batch in batches:
batch_no+=1
start_time=time.time()
output.append(predict_function2(batch))
output=np.array(output)
bmag,mm=util.overlapadd(output,batches,nchunks,overlap=train.overlap)
audio_out=transform.compute_inverse(bmag[:len(ph)]/scale_factor,ph)
if len(audio_out)>len(audio):
audio_out=audio_out[:len(audio)]
audio_out=essentia.array(audio_out)
audio_out2= transform.compute_inverse(mm[:len(ph)]/scale_factor,ph)
if len(audio_out2)>len(audio):
audio_out2=audio_out2[:len(audio)]
audio_out2=essentia.array(audio_out2)
#write audio files
util.writeAudioScipy(os.path.join(outdir,f.replace(".wav","-voice.wav")),audio_out,sampleRate,bitrate)
util.writeAudioScipy(os.path.join(outdir,f.replace(".wav","-music.wav")),audio_out2,sampleRate,bitrate)
audio_out=None
audio_out2=None
return losser
if __name__ == "__main__":
"""
Source separation for the iKala dataset.
2nd place MIREX Singing voice separation 2016
http://www.music-ir.org/mirex/wiki/2016:Singing_Voice_Separation_Results
More details in the following article:
P. Chandna, M. Miron, J. Janer, and E. Gomez,
\“Monoaural audio source separation using deep convolutional neural networks,\”
International Conference on Latent Variable Analysis and Signal Separation, 2017.
Given the features computed previusly with compute_features, train a network and perform the separation.
Parameters
----------
db : string
The path to the iKala dataset
nepochs : int, optional
The number the epochs to train for (one epoch is when all examples in the dataset are seen by the network)
model : string, optional
The name of the trained model
scale_factor : float, optional
Scale the magnitude of the files to be separated with this factor
batch_size : int, optional
The number of examples in a batch (see LargeDataset in dataset.py)
batch_memory : int, optional
The number of batches to load in memory at once (see LargeDataset in dataset.py)
time_context : int, optional
The time context modeled by the network
overlap : int, optional
The number of overlapping frames between adjacent segments (see LargeDataset in dataset.py)
nprocs : int, optional
The number of CPU to use when loading the data in parallel: the more, the faster (see LargeDataset in dataset.py)
"""
if len(sys.argv)>-1:
climate.add_arg('--db', help="the ikala dataset path")
climate.add_arg('--model', help="the name of the model to test/save")
climate.add_arg('--nepochs', help="number of epochs to train the net")
climate.add_arg('--time_context', help="number of frames for the recurrent/lstm/conv net")
climate.add_arg('--batch_size', help="batch size for training")
climate.add_arg('--batch_memory', help="number of big batches to load into memory")
climate.add_arg('--overlap', help="overlap time context for training")
climate.add_arg('--nprocs', help="number of processor to parallelize file reading")
climate.add_arg('--scale_factor', help="scale factor for the data")
climate.add_arg('--feature_path', help="the path where to load the features from")
db=None
kwargs = climate.parse_args()
if kwargs.__getattribute__('db'):
db = kwargs.__getattribute__('db')
# else:
# db='/home/marius/Documents/Database/iKala/'
if kwargs.__getattribute__('feature_path'):
feature_path = kwargs.__getattribute__('feature_path')
else:
feature_path=os.path.join(db,'transforms','t1')
assert os.path.isdir(db), "Please input the directory for the iKala dataset with --db path_to_iKala"
if kwargs.__getattribute__('model'):
model = kwargs.__getattribute__('model')
else:
model="fft_1024"
if kwargs.__getattribute__('batch_size'):
batch_size = int(kwargs.__getattribute__('batch_size'))
else:
batch_size = 32
if kwargs.__getattribute__('batch_memory'):
batch_memory = int(kwargs.__getattribute__('batch_memory'))
else:
batch_memory = 200
if kwargs.__getattribute__('time_context'):
time_context = int(kwargs.__getattribute__('time_context'))
else:
time_context = 30
if kwargs.__getattribute__('overlap'):
overlap = int(kwargs.__getattribute__('overlap'))
else:
overlap = 20
if kwargs.__getattribute__('nprocs'):
nprocs = int(kwargs.__getattribute__('nprocs'))
else:
nprocs = 7
if kwargs.__getattribute__('nepochs'):
nepochs = int(kwargs.__getattribute__('nepochs'))
else:
nepochs = 40
if kwargs.__getattribute__('scale_factor'):
scale_factor = int(kwargs.__getattribute__('scale_factor'))
else:
scale_factor = 0.3
#tt object needs to be the same as the one in compute_features
tt = transformFFT(frameSize=1024, hopSize=512, sampleRate=44100, window=blackmanharris)
pitchhop=0.032*float(tt.sampleRate) #seconds to frames
ld1 = LargeDataset(path_transform_in=feature_path, batch_size=batch_size, batch_memory=batch_memory, time_context=time_context, overlap=overlap, nprocs=nprocs,mult_factor_in=scale_factor,mult_factor_out=scale_factor)
logging.info(" Maximum:\t\t{:.6f}".format(ld1.getMax()))
logging.info(" Mean:\t\t{:.6f}".format(ld1.getMean()))
logging.info(" Standard dev:\t\t{:.6f}".format(ld1.getStd()))
if not os.path.exists(os.path.join(db,'output',model)):
os.makedirs(os.path.join(db,'output',model))
if not os.path.exists(os.path.join(db,'models')):
os.makedirs(os.path.join(db,'models'))
train_errs=train_auto(train=ld1,fun=build_ca,transform=tt,outdir=os.path.join(db,'output',model),testdir=os.path.join(db,'Wavfile'),model=os.path.join(db,'models',"model_"+model+".pkl"),num_epochs=nepochs,scale_factor=scale_factor)
f = file(db+"models/"+"loss_"+model+".data", 'wb')
cPickle.dump(train_errs,f,protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
| gerruz/DeepConvSep | examples/ikala/trainCNN.py | Python | agpl-3.0 | 16,813 |
import numpy as np
import matplotlib.pyplot as plt
from analytical_solutions import *
z_surf = 0.0 # location of upper surface
z_bott = 150e3 # location of bottom boundary
nz = 11 # number of grid points
nt = 8 # number of timesteps to calculate
dt = 0.5*112500000000000.0 # timestep to use, in seconds
T_surf = 0.0 # temperature of the upper surface
T_bott = 1300.0 # temperature of the lower boundary
T_ini = 1300.0
k = 4.5 # heat conductivity
rho = 3200 # density
Cp = 1250 # heat capacity
plot_every = 1 # plot the temperature field every Nth timestep
plot_analytical = True # change this to 'False' to omit plotting the analytical solution
# Analytical solution is applicable to cooling/heating of the semi-infinite
# half-space, i.e. for case T_bott == T_ini != T_surf
# Generate the grid
dz = (z_bott - z_surf) / (nz - 1)
z = np.zeros(nz)
for i in range(0, nz):
z[i] = z_surf + i*dz
# Generate array of times at every timestep
# Used only for plotting
t = np.zeros(nt)
for j in range(0, nt):
t[j] = j*dt
print("Grid points are:")
print(z)
# Generate an empty array for temperature values
# for every grid point at every time step, i.e. an array
# of size nt times nz
T = np.zeros((nt, nz))
# Set initial condition, T=0 everywhere except at boundaries
j = 0 # = initial step
T[j, 0] = T_surf
T[j, nz-1] = T_bott
for i in range(1, nz-1):
T[j, i] = T_ini
time = 0
# Loop over time steps, skipping the first one (=initial condition)
for j in range(1,nt):
time = time + dt
# Set boundary condition
T[j, 0] = ????
T[j, nz-1] = ????
# Calculate temperature at inner grid points
for i in range(1, nz-1):
T[j, i] = ????
if j % plot_every == 0:
# Print and plot the depth vs temperature during this timestep
# Also calculate an analytical solution
T_analytical = analytical_tr(T_surf, T_ini, k/(rho*Cp), time, np.linspace(z[0], z[-1], 100))
print("Temperatures at timestep " + str(j) + ":")
print(T[j, 0:nz])
plt.plot(T[j, 0:nz], -z, "o-", label="FD") # minus sign is placed to make z axis point down
if plot_analytical:
plt.plot(T_analytical, -np.linspace(z[0], z[-1], 100), "-r", label="Analytical")
plt.title("Time=" + str(time/(60*60*24*365.25*1e3)) + "kyrs")
plt.xlabel("Temperature")
plt.ylabel("Depth")
plt.legend(loc=3)
plt.show()
# Plot the temperature field as a function of time and location
SECINYR = 60*60*24*365.25
TIME, LOC = np.meshgrid(t, z, indexing="ij")
plt.contourf(TIME/(1000.0*SECINYR), LOC, T)
plt.xlabel("kyrs")
plt.ylabel("z")
plt.show()
| HUGG/NGWM2016-modelling-course | Lessons/05-Finite-differences/scripts/heat_transient.py | Python | mit | 2,662 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from deepy.utils import build_activation, FLOATX
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RNN(NeuralLayer):
"""
Recurrent neural network layer.
"""
def __init__(self, hidden_size, input_type="sequence", output_type="sequence", vector_core=None,
hidden_activation="tanh", hidden_init=None, input_init=None, steps=None,
persistent_state=False, reset_state_for_input=None, batch_size=None,
go_backwards=False, mask=None, second_input_size=None, second_input=None):
super(RNN, self).__init__("rnn")
self._hidden_size = hidden_size
self.output_dim = self._hidden_size
self._input_type = input_type
self._output_type = output_type
self._hidden_activation = hidden_activation
self._hidden_init = hidden_init
self._vector_core = vector_core
self._input_init = input_init
self.persistent_state = persistent_state
self.reset_state_for_input = reset_state_for_input
self.batch_size = batch_size
self._steps = steps
self._go_backwards = go_backwards
self._mask = mask.dimshuffle((1,0)) if mask else None
self._second_input_size = second_input_size
self._second_input = second_input
self._sequence_map = OrderedDict()
if input_type not in INPUT_TYPES:
raise Exception("Input type of RNN is wrong: %s" % input_type)
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of RNN is wrong: %s" % output_type)
if self.persistent_state and not self.batch_size:
raise Exception("Batch size must be set for persistent state mode")
if mask and input_type == "one":
raise Exception("Mask only works with sequence input")
def _hidden_preact(self, h):
return T.dot(h, self.W_h) if not self._vector_core else h * self.W_h
def step(self, *vars):
# Parse sequence
sequence_map = dict(zip(self._sequence_map.keys(), vars[:len(self._sequence_map)]))
if self._input_type == "sequence":
x = sequence_map["x"]
h = vars[-1]
# Reset part of the state on condition
if self.reset_state_for_input != None:
h = h * T.neq(x[:, self.reset_state_for_input], 1).dimshuffle(0, 'x')
# RNN core step
z = x + self._hidden_preact(h) + self.B_h
else:
h = vars[-1]
z = self._hidden_preact(h) + self.B_h
# Second input
if "second_input" in sequence_map:
z += sequence_map["second_input"]
new_h = self._hidden_act(z)
# Apply mask
if "mask" in sequence_map:
mask = sequence_map["mask"].dimshuffle(0, 'x')
new_h = mask * new_h + (1 - mask) * h
return new_h
def produce_input_sequences(self, x, mask=None, second_input=None):
self._sequence_map.clear()
if self._input_type == "sequence":
self._sequence_map["x"] = T.dot(x, self.W_i)
# Mask
if mask:
# (batch)
self._sequence_map["mask"] = mask
elif self._mask:
# (time, batch)
self._sequence_map["mask"] = self._mask
# Second input
if second_input:
self._sequence_map["second_input"] = T.dot(second_input, self.W_i2)
elif self._second_input:
self._sequence_map["second_input"] = T.dot(self._second_input, self.W_i2)
return self._sequence_map.values()
def produce_initial_states(self, x):
h0 = T.alloc(np.cast[FLOATX](0.), x.shape[0], self._hidden_size)
if self._input_type == "sequence":
if self.persistent_state:
h0 = self.state
else:
h0 = x
return [h0]
def output(self, x):
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
sequences = self.produce_input_sequences(x.dimshuffle((1,0,2)))
else:
sequences = self.produce_input_sequences(None)
step_outputs = self.produce_initial_states(x)
hiddens, _ = theano.scan(self.step, sequences=sequences, outputs_info=step_outputs,
n_steps=self._steps, go_backwards=self._go_backwards)
# Save persistent state
if self.persistent_state:
self.register_updates((self.state, hiddens[-1]))
if self._output_type == "one":
return hiddens[-1]
elif self._output_type == "sequence":
return hiddens.dimshuffle((1,0,2))
def setup(self):
if self._input_type == "one" and self.input_dim != self._hidden_size:
raise Exception("For RNN receives one vector as input, "
"the hidden size should be same as last output dimension.")
self._setup_params()
self._setup_functions()
def _setup_functions(self):
self._hidden_act = build_activation(self._hidden_activation)
def _setup_params(self):
if not self._vector_core:
self.W_h = self.create_weight(self._hidden_size, self._hidden_size, suffix="h", initializer=self._hidden_init)
else:
self.W_h = self.create_bias(self._hidden_size, suffix="h")
self.W_h.set_value(self.W_h.get_value() + self._vector_core)
self.B_h = self.create_bias(self._hidden_size, suffix="h")
self.register_parameters(self.W_h, self.B_h)
if self.persistent_state:
self.state = self.create_matrix(self.batch_size, self._hidden_size, "rnn_state")
self.register_free_parameters(self.state)
else:
self.state = None
if self._input_type == "sequence":
self.W_i = self.create_weight(self.input_dim, self._hidden_size, suffix="i", initializer=self._input_init)
self.register_parameters(self.W_i)
if self._second_input_size:
self.W_i2 = self.create_weight(self._second_input_size, self._hidden_size, suffix="i2", initializer=self._input_init)
self.register_parameters(self.W_i2)
| rldotai/deepy | deepy/layers/recurrent.py | Python | mit | 6,446 |
import pickle
from pymech.units.SI import *
from pymech.fluid.Core import fluidspeed, flowrate
class Component:
ID: int = 0 # ID number
_Q = 0. * ureg['m**3/s'] # Volumetric flow
_v = 0. * ureg['m/s'] # Speed of fluid
_A = 0. * ureg['m**2'] # Cross section of fluid plane
_P = [0. * ureg['Pa'], 0. * ureg['Pa']] # Pressure at point A and B
def __init__(self, ID=None):
if ID is not None:
self.ID = int(ID)
else:
self.ID = int(0)
def __repr__(self):
return repr([self.ID, self.Q, self.v, self.A])
def __hash__(self):
return hash(self.ID)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
A_old = self.A
if A_old == 0. * ureg['m**2']:
A_old = value.to('m**2')
self._A = value.to('m**2')
self.Q = (self.A / A_old) * self.Q # update Q en v
@property
def v(self):
return self._v
@v.setter
def v(self, value):
self._v = value.to('m/s')
self._Q = flowrate(self.v, self.A) # update Q
@property
def Q(self):
return self._Q
@Q.setter
def Q(self, value):
self._Q = value.to('m**3/s')
self._v = fluidspeed(self.Q, self.A) # update v
@property
def P(self):
""" Get the pressure at both points np.array(2,1) [Pa]"""
return self._P
@P.setter
def P(self, value):
""" Set the pressure at both points np.array(2,1) [Pa]"""
self._P = value.to('Pa')
@property
def P0(self):
return self.P[0]
@P0.setter
def P0(self, value):
self._P[0] = value.to('Pa')
@property
def P1(self):
return self.P[1]
@P1.setter
def P1(self, value):
self._P[1] = value.to('Pa')
@property
def dp(self):
""" Pressure drop [Pa] """
return self._P[0] - self._P[1]
@property
def hl(self):
""" Headloss in the component [m]"""
return self.dp / g
def save(self, filename):
serialize_dump(self, filename, fmt='yaml')
def load(self, filename):
comp = serialize_load(filename, fmt='yaml')
self.ID = comp.ID
self._P = comp._P
self._A = comp._A
self._Q = comp._Q
self._v = comp._v
def Component_to_builtin(u):
return (u.ID, u._P, u._A, u._Q, u._v)
def Component_from_builtin(c):
comp = Component()
comp.ID = c[0]
comp._P = c[1]
comp._A = c[2]
comp._Q = c[3]
comp._v = c[4]
return comp
| peer23peer/pymech | pymech/fluid/Component.py | Python | mit | 2,609 |
#!/usr/bin/python
"""LDAP Directory Management, wrapper for python-ldap (http://www.python-ldap.org).
This module provides high level control over an LDAP Directory.
Some code was originally built on examples available here:
http://www.grotan.com/ldap/python-ldap-samples.html
Copyright (c) 2014 Derak Berreyesa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = "Derak Berreyesa (github.com/derak)"
__version__ = "1.0"
import sys, json
import ldap
import ldap.modlist as modlist
class Directory(object):
def __init__(self):
self.result = {}
self.l = None
def connect(self, url, username, password):
try:
# Create a new user in Active Directory
ldap.set_option(ldap.OPT_REFERRALS, 0)
# Allows us to have a secure connection
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, 0)
# Open a connection
self.l = ldap.initialize(url)
# Bind/authenticate with a user with apropriate rights to add objects
self.l.simple_bind_s(username, password)
except ldap.LDAPError, e:
sys.stderr.write('Error connecting to LDAP server: ' + str(e) + '\n')
self.result['status'] = 'Error connecting to LDAP server: ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def add_user(self, dn, attrs):
try:
# Convert our dict to nice syntax for the add-function using modlist-module
ldif = modlist.addModlist(attrs)
# Add user
self.l.add_s(dn,ldif)
except ldap.LDAPError, e:
sys.stderr.write('Error with LDAP add_user: ' + str(e) + '\n')
self.result['status'] = 'Error with LDAP add_user: ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def add_user_to_groups(self, dn, group_dn_list):
try:
# Add user to groups as member
mod_attrs = [( ldap.MOD_ADD, 'member', dn )]
for g in group_dn_list:
self.l.modify_s(g, mod_attrs)
except ldap.LDAPError, e:
sys.stderr.write('Error: adding user to group(s): ' + str(e) + '\n')
self.result['status'] = 'Error: adding user to group(s): ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def set_password(self, dn, password):
# HERE YOU MAKE THE utf-16-le encode password
unicode_pass = unicode('\"' + password + '\"', 'iso-8859-1')
password_value = unicode_pass.encode('utf-16-le')
# change the atribute in the entry you just created
add_pass = [(ldap.MOD_REPLACE, 'unicodePwd', [password_value])]
try:
self.l.modify_s(dn, add_pass)
except ldap.LDAPError, error_message:
self.result['status'] = 'Error: could not change password: ' + str(error_message) + '\n'
print json.dumps(self.result)
sys.exit(1)
else:
self.result['status'] = 'Successfully changed password \n'
def modify_user(self, dn, flag):
"""Modify user, flag is userAccountControl property"""
# 512 will set user account to enabled
# change the user to enabled
mod_acct = [(ldap.MOD_REPLACE, 'userAccountControl', str(flag))]
try:
self.l.modify_s(dn, mod_acct)
except ldap.LDAPError, error_message:
self.result['status'] = 'Error: could not modify user: ' + str(error_message) + '\n'
print json.dumps(self.result)
sys.exit(1)
else:
self.result['status'] = 'Successfully modified user \n'
def print_users(self, base_dn, attrs):
filter = '(objectclass=person)'
users = self.l.search_s(base_dn, ldap.SCOPE_SUBTREE, filter, attrs)
for row in users:
print row
def disconnect(self):
self.l.unbind_s()
def get_result(self):
return self.result
if __name__ == '__main__':
print 'This is directory.py'
| derak/directory.py | directory.py | Python | mit | 5,070 |
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
class DatabaseError(Exception):
pass
class DatabaseROError(DatabaseError):
"""cannot write to read-only database"""
pass
class DatabaseLockedError(DatabaseError):
"""cannot write to locked index"""
pass
class NonexistantObjectError(DatabaseError):
"""requested thread or message does not exist in the index"""
pass
| np/alot | alot/db/errors.py | Python | gpl-3.0 | 536 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
buf = open(os.path.join(os.path.dirname(__file__), fname), 'rb').read()
return buf.decode('utf8')
setup(name='mtgjson',
version='0.4.1.dev1',
description='A python library for working with data from mtgjson.com.',
long_description=read('README.rst'),
author='Marc Brinkmann',
author_email='git@marcbrinkmann.de',
url='http://github.com/mbr/pymtgjson',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=['docutils', 'requests', 'six'],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
])
| mbr/pymtgjson | setup.py | Python | mit | 772 |
from psrd.sections import cap_words
from psrd.stat_block.utils import colon_filter, default_closure, noop
from psrd.stat_block.utils import parse_stat_block, collapse_text, has_heading
from psrd.stat_block.utils import sections_pass
from psrd.universal import StatBlockSection, Heading, filter_name
from psrd.universal import title_collapse_pass
def is_npc(sb, book):
if is_creature(sb, book):
for detail in sb.details:
if detail.__class__ == StatBlockSection and detail.name.lower(
).strip() in ['boon']:
return True
return False
def is_creature(sb, book):
for detail in sb.details:
if detail.__class__ == StatBlockSection and detail.name.lower(
).strip() in ['ecology', 'statistics']:
return True
return False
def parse_npc(sb, book):
npc = parse_creature(sb, book)
npc['subtype'] = 'npc'
return npc
def parse_creature(sb, book):
name = sb.name
cr = None
if name.find('CR') > -1:
name, cr = name.split('CR')
creature = {'type': 'creature', 'source': book, 'name': filter_name(name)}
if cr:
creature['cr'] = cr.strip()
sections = []
text = []
descriptors = []
for tup in sb.keys:
if tup[0] == 'descriptor':
descriptors.append(tup)
for tup in descriptors:
sb.keys.remove(tup)
if len(descriptors) > 0:
parse_creature_descriptors(creature, descriptors)
for key, value in sb.keys:
creature_parse_function(key)(creature, value)
for detail in sb.details:
if detail.name.lower() == 'base statistics':
detail.name = 'Statistics'
if detail.name.lower() == 'environment':
detail.name = 'Ecology'
if detail.__class__ == StatBlockSection and detail.name.lower() in [
'defense', 'offense', 'statistics', 'ecology']:
for key, value in detail.keys:
creature_parse_function(key)(creature, value)
for subd in detail.details:
if isinstance(subd, dict) or isinstance(subd, Heading):
sections.append(subd)
else:
newsec = {
'type': 'section',
'source': book,
'text': unicode(subd)}
sections.append(newsec)
elif detail.__class__ == StatBlockSection and detail.name.lower() in [
'special abilities']:
special_abilities = {
'type': 'section',
'subtype': 'special_abilities',
'source': book,
'name': 'Special Abilities',
'sections': []}
for key in detail.keys:
newsec = {
'type': 'section',
'source': book,
'name': key[0],
'text': key[1]}
special_abilities['sections'].append(newsec)
sections.append(special_abilities)
for subd in detail.details:
if isinstance(subd, dict) or isinstance(subd, Heading):
sections.append(subd)
else:
newsec = {
'type': 'section',
'source': book,
'text': unicode(subd)}
sections.append(newsec)
elif detail.__class__ == StatBlockSection and detail.name.lower() in [
'tactics']:
sections.append(parse_stat_block(detail, book, no_sb=True))
else:
if isinstance(detail, dict) or isinstance(detail, Heading):
text.append(detail)
else:
text.append(unicode(detail))
if len(text) > 0:
collapse_text(creature, text)
if len(sections) > 0:
level = has_heading(sections)
while level:
sections = title_collapse_pass(sections, level)
level = level - 1
if level == 0:
sections = sections_pass(sections, creature['source'])
creature['sections'] = sections
return creature
def creature_parse_function(field):
functions = {
'cr': parse_cr,
'size': default_closure('size'),
'hit dice': default_closure('hit_dice'),
'natural armor': default_closure('natural_armor'),
'breath weapon': default_closure('breath_weapon'),
'init': default_closure('init'),
'senses': default_closure('senses'),
'perception': perception_fix,
'aura': default_closure('aura'),
'ac': default_closure('ac'),
'hp': default_closure('hp'),
'fort': default_closure('fortitude'),
'ref': default_closure('reflex'),
'will': default_closure('will'),
'defensive abilities': default_closure('defensive_abilities'),
'defensive ability': default_closure('defensive_abilities'),
'dr': default_closure('dr'),
'immune': default_closure('immune'),
'resist': default_closure('resist'),
'sr': default_closure('sr'),
'weaknesses': default_closure('weaknesses'),
'vulnerability': default_closure('weaknesses'),
'weakness': default_closure('weaknesses'),
'sq': default_closure('special_qualities'),
'special qualities': default_closure('special_qualities'),
'speed': default_closure('speed'),
'melee': default_closure('melee'),
'ranged': default_closure('ranged'),
'special attacks': default_closure('special_attacks'),
'special attack': default_closure('special_attacks'),
'attacks': default_closure('special_attacks'),
'spell-like abilities': creature_spell_closure('spell-like abilities'),
'spell-lilke abilities': creature_spell_closure('spell-like abilities'),
'spell-like ability': creature_spell_closure('spell-like abilities'),
'bloodline spell-like ability': creature_spell_closure(
'bloodline spell-like ability'),
'bloodline spell-like abilities': creature_spell_closure(
'bloodline spell-like abilities'),
'arcane spell-like abilities': creature_spell_closure(
'arcane spell-like abilities'),
'arcane school spell-like abilities': creature_spell_closure(
'arcane school spell-like abilities'),
'domain spell-like abilities': creature_spell_closure(
'domain spell-like abilities'),
'ifrit spell-like abilities': creature_spell_closure(
'ifrit spell-like abilities'),
'gnome spell-like abilities': creature_spell_closure(
'gnome spell-like abilities'),
'sorcerer spell-like abilities': creature_spell_closure(
'sorcerer spell-like abilities'),
'antipaladin spell-like abilities': creature_spell_closure(
'antipaladin spell-like abilities'),
'paladin spell-like abilities': creature_spell_closure(
'paladin spell-like abilities'),
'rogue spell-like abilities': creature_spell_closure(
'rogue spell-like abilities'),
'conjurer spell-like abilities': creature_spell_closure(
'conjurer spell-like abilities'),
'transmuter spell-like abilities': creature_spell_closure(
'transmuter spell-like abilities'),
'enchanter spell-like abilities': creature_spell_closure(
'enchanter spell-like abilities'),
'evoker spell-like abilities': creature_spell_closure(
'evoker spell-like abilities'),
'dragon disciple spell-like abilities': creature_spell_closure(
'dragon disciple spell-like abilities'),
'shadowdancer spell-like abilities': creature_spell_closure(
'shadowdancer spell-like abilities'),
'devilbound spell-like abilities': creature_spell_closure(
'devilbound spell-like abilities'),
'gathlain spell-like abilities': creature_spell_closure(
'gathlain spell-like abilities'),
'kitsune spell-like abilities': creature_spell_closure(
'kitsune spell-like abilities'),
'wayang spell-like abilities': creature_spell_closure(
'wayang spell-like abilities'),
'utility spell-like abilities': creature_spell_closure(
'utility spell-like abilities'),
'defensive spell-like abilities': creature_spell_closure(
'defensive spell-like abilities'),
'attack spell-like abilities': creature_spell_closure(
'attack spell-like abilities'),
'spells prepared': creature_spell_closure('spells prepared'),
'alchemist extracts prepared': creature_spell_closure(
'alchemist extracts prepared'),
'adept spells prepared': creature_spell_closure(
'adept spells prepared'),
'bard spells prepared': creature_spell_closure(
'bard spells prepared'),
'cleric spells prepared': creature_spell_closure(
'cleric spells prepared'),
'conjurer spells prepared': creature_spell_closure(
'conjurer spells prepared'),
'druid spells prepared': creature_spell_closure(
'druid spells prepared'),
'magus spells prepared': creature_spell_closure(
'magus spells prepared'),
'antipaladin spells prepared': creature_spell_closure(
'antipaladin spells prepared'),
'paladin spells prepared': creature_spell_closure(
'paladin spells prepared'),
'ranger spells prepared': creature_spell_closure(
'ranger spells prepared'),
'witch spells prepared': creature_spell_closure(
'witch spells prepared'),
'wizard spells prepared': creature_spell_closure(
'wizard spells prepared'),
'necromancer spells prepared': creature_spell_closure(
'necromancer spells prepared'),
'enchanter spells prepared': creature_spell_closure(
'enchanter spells prepared'),
'diviner spells prepared': creature_spell_closure(
'diviner spells prepared'),
'transmuter spells prepared': creature_spell_closure(
'transmuter spells prepared'),
'evoker spells prepared': creature_spell_closure(
'evoker spells prepared'),
'illusionist spells prepared': creature_spell_closure(
'illusionist spells prepared'),
'abjurer spells prepared': creature_spell_closure(
'abjurer spells prepared'),
'utility spells': creature_spell_closure(
'utility spells'),
'utility options': creature_spell_closure(
'utility options'),
'defensive spells': creature_spell_closure(
'defensive spells'),
'attack spells': creature_spell_closure(
'attack spells'),
'spells known': creature_spell_closure('spells known'),
'bard spells known': creature_spell_closure('bard spells known'),
'inquisitor spells known': creature_spell_closure(
'inquisitor spells known'),
'oracle spells known': creature_spell_closure('oracle spells known'),
'sorcerer spells known': creature_spell_closure(
'sorcerer spells known'),
'str': default_closure('strength'),
'dex': default_closure('dexterity'),
'con': default_closure('constitution'),
'int': default_closure('intelligence'),
'wis': default_closure('wisdom'),
'cha': default_closure('charisma'),
'base atk': default_closure('base_attack'),
'atk': default_closure('base_attack'),
'cmb': default_closure('cmb'),
'cmd': default_closure('cmd'),
'concentration': default_closure('concentration'),
'feats': default_closure('feats'),
'skills': default_closure('skills'),
'racial modifiers': default_closure('racial_modifiers'),
'racial modifier': default_closure('racial_modifiers'),
'languages': default_closure('languages'),
'language': default_closure('languages'),
'gear': default_closure('gear'),
'combat gear': default_closure('combat_gear'),
'other gear': default_closure('other_gear'),
'boon': default_closure('boon'),
'space': default_closure('space'),
'reach': default_closure('reach'),
'environment': default_closure('environment'),
'environment any': parse_broken_environment,
'organization': default_closure('organization'),
'treasure': default_closure('treasure'),
'base': noop,
'special': noop,
'descriptor': parse_creature_descriptor
}
if field.lower().startswith('xp'):
return xp_closure('field')
return functions[field.lower()]
def parse_cr(sb, value):
try:
v = int(value)
sb['cr'] = value
return
except:
pass
if value.startswith('CR '):
mr = None
cr = value
if cr.find('/MR') > -1:
cr, mr = cr.split('/MR')
if mr:
sb['mr'] = mr.strip()
sb['cr'] = cr.replace('CR ', '')
else:
raise Exception("Unknown CR line: %s " % value)
def creature_spell_closure(field):
def fxn(sb, value):
value = colon_filter(value)
value = value.replace('–', '-')
value = value.replace('—', '-')
spells = sb.setdefault('spells', {})
spells[field] = value
return fxn
def parse_broken_environment(sb, value):
sb['environment'] = 'any'
def xp_closure(field):
def fxn(sb, value):
sb['xp'] = value.replace('XP', '').strip()
return fxn
def perception_fix(sb, value):
sb['senses'] = sb['senses'] + "; Perception " + value
def parse_creature_classes(creature, value):
name = creature['name'].split("(")[0].strip().lower()
values = value.lower().split(name)
if len(values) == 2:
first = values[0].strip()
second = values[1].strip()
if len(first) > 0:
parse_super_race(creature, first)
if len(second) > 0:
second = second.split("(")[0].strip()
parts = second.split(" ")
try:
int(parts[-1])
creature['level'] = second
except ValueError:
if second == "animal companion":
creature['super_race'] = second
else:
raise Exception("Not a level, not sure what to do: %s" % second)
elif len(values) == 1:
parse_super_race(creature, values[0])
else:
raise Exception("Not sure what to do here: %s" % value)
def parse_super_race(creature, snippet):
fields = snippet.split(' ')
sr = []
for field in fields:
if field in ['male', 'female']:
creature['sex'] = field
else:
sr.append(field)
if len(sr) > 0:
creature['super_race'] = ' '.join(sr)
def parse_creature_descriptors(creature, value):
real = []
for tup in value:
key, val = tup
if val.startswith('AC'):
default_closure('ac')(creature, val[2:])
else:
real.append(val)
parse_creature_descriptor(creature, real.pop())
if len(real) > 0:
parse_creature_classes(creature, real.pop(0))
if len(real) > 0:
raise Exception("Too many descriptors: %s" % value)
def parse_creature_descriptor(creature, value):
print "%s: %s" %(creature, value)
if value.startswith('AC'):
default_closure('ac')(creature, value[2:])
return
bsb = None
if value.find('(but see below) ') > -1:
value = value.replace('(but see below) ', '')
bsb = " (but see below)"
any_al = None
if value.find('Any alignment (same as creator)') > -1:
any_al = 'Any alignment (same as creator)'
value = value.replace(any_al, 'Any')
descsplit = value.split("(")
if len(descsplit) > 1:
value = descsplit.pop(0)
subtype = ''.join(descsplit)
subtype = subtype.replace(')', '')
creature['creature_subtype'] = subtype
values = value.split()
if len(values) == 2:
creature['alignment'] = values.pop(0)
creature['creature_type'] = cap_words(values.pop(0))
elif len(values) >= 3:
creature['alignment'] = values.pop(0)
if any_al:
creature['alignment'] = any_al
if values[0] == 'alignment':
creature['alignment'] = creature['alignment'] + " " + values.pop(0)
if bsb:
creature['alignment'] = creature['alignment'] + bsb
if values[0] == 'or':
alignment = creature['alignment']
alignment = alignment + " " + values.pop(0)
alignment = alignment + " " + values.pop(0)
creature['alignment'] = alignment
creature['size'] = values.pop(0)
creature['creature_type'] = cap_words(values.pop(0))
if len(values) > 0:
if values[0] in ['beast', 'humanoid']:
creature['creature_type'] = creature['creature_type'] + \
" " + cap_words(values.pop(0))
if len(values) > 0:
raise Exception('well fuck: %s' %(values))
| devonjones/PSRD-Parser | src/psrd/stat_block/creature.py | Python | gpl-3.0 | 14,691 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
'''Python usage, esp. virtualenv.
'''
import os
import subprocess
import sys
import time
import json
import traceback
import mozharness
from mozharness.base.script import (
PostScriptAction,
PostScriptRun,
PreScriptAction,
PreScriptRun,
)
from mozharness.base.errors import VirtualenvErrorList
from mozharness.base.log import WARNING, FATAL
from mozharness.mozilla.proxxy import Proxxy
external_tools_path = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
'external_tools',
)
def get_tlsv1_post():
# Monkeypatch to work around SSL errors in non-bleeding-edge Python.
# Taken from https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
import requests
from requests.packages.urllib3.poolmanager import PoolManager
import ssl
class TLSV1Adapter(requests.adapters.HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
s = requests.Session()
s.mount('https://', TLSV1Adapter())
return s.post
# Virtualenv {{{1
virtualenv_config_options = [
[["--venv-path", "--virtualenv-path"], {
"action": "store",
"dest": "virtualenv_path",
"default": "venv",
"help": "Specify the path to the virtualenv top level directory"
}],
[["--virtualenv"], {
"action": "store",
"dest": "virtualenv",
"help": "Specify the virtualenv executable to use"
}],
[["--find-links"], {
"action": "extend",
"dest": "find_links",
"help": "URL to look for packages at"
}],
[["--pip-index"], {
"action": "store_true",
"default": True,
"dest": "pip_index",
"help": "Use pip indexes (default)"
}],
[["--no-pip-index"], {
"action": "store_false",
"dest": "pip_index",
"help": "Don't use pip indexes"
}],
]
class VirtualenvMixin(object):
'''BaseScript mixin, designed to create and use virtualenvs.
Config items:
* virtualenv_path points to the virtualenv location on disk.
* virtualenv_modules lists the module names.
* MODULE_url list points to the module URLs (optional)
Requires virtualenv to be in PATH.
Depends on ScriptMixin
'''
python_paths = {}
site_packages_path = None
def __init__(self, *args, **kwargs):
self._virtualenv_modules = []
super(VirtualenvMixin, self).__init__(*args, **kwargs)
def register_virtualenv_module(self, name=None, url=None, method=None,
requirements=None, optional=False,
two_pass=False, editable=False):
"""Register a module to be installed with the virtualenv.
This method can be called up until create_virtualenv() to register
modules that should be installed in the virtualenv.
See the documentation for install_module for how the arguments are
applied.
"""
self._virtualenv_modules.append((name, url, method, requirements,
optional, two_pass, editable))
def query_virtualenv_path(self):
c = self.config
dirs = self.query_abs_dirs()
virtualenv = None
if 'abs_virtualenv_dir' in dirs:
virtualenv = dirs['abs_virtualenv_dir']
elif c.get('virtualenv_path'):
if os.path.isabs(c['virtualenv_path']):
virtualenv = c['virtualenv_path']
else:
virtualenv = os.path.join(dirs['abs_work_dir'],
c['virtualenv_path'])
return virtualenv
def query_python_path(self, binary="python"):
"""Return the path of a binary inside the virtualenv, if
c['virtualenv_path'] is set; otherwise return the binary name.
Otherwise return None
"""
if binary not in self.python_paths:
bin_dir = 'bin'
if self._is_windows():
bin_dir = 'Scripts'
virtualenv_path = self.query_virtualenv_path()
if virtualenv_path:
self.python_paths[binary] = os.path.abspath(os.path.join(virtualenv_path, bin_dir, binary))
else:
self.python_paths[binary] = self.query_exe(binary)
return self.python_paths[binary]
def query_python_site_packages_path(self):
if self.site_packages_path:
return self.site_packages_path
python = self.query_python_path()
self.site_packages_path = self.get_output_from_command(
[python, '-c',
'from distutils.sysconfig import get_python_lib; ' +
'print(get_python_lib())'])
return self.site_packages_path
def package_versions(self, pip_freeze_output=None, error_level=WARNING, log_output=False):
"""
reads packages from `pip freeze` output and returns a dict of
{package_name: 'version'}
"""
packages = {}
if pip_freeze_output is None:
# get the output from `pip freeze`
pip = self.query_python_path("pip")
if not pip:
self.log("package_versions: Program pip not in path", level=error_level)
return {}
pip_freeze_output = self.get_output_from_command([pip, "freeze"], silent=True, ignore_errors=True)
if not isinstance(pip_freeze_output, basestring):
self.fatal("package_versions: Error encountered running `pip freeze`: %s" % pip_freeze_output)
for line in pip_freeze_output.splitlines():
# parse the output into package, version
line = line.strip()
if not line:
# whitespace
continue
if line.startswith('-'):
# not a package, probably like '-e http://example.com/path#egg=package-dev'
continue
if '==' not in line:
self.fatal("pip_freeze_packages: Unrecognized output line: %s" % line)
package, version = line.split('==', 1)
packages[package] = version
if log_output:
self.info("Current package versions:")
for package in sorted(packages):
self.info(" %s == %s" % (package, packages[package]))
return packages
def is_python_package_installed(self, package_name, error_level=WARNING):
"""
Return whether the package is installed
"""
packages = self.package_versions(error_level=error_level).keys()
return package_name.lower() in [package.lower() for package in packages]
def install_module(self, module=None, module_url=None, install_method=None,
requirements=(), optional=False, global_options=[],
no_deps=False, editable=False):
"""
Install module via pip.
module_url can be a url to a python package tarball, a path to
a directory containing a setup.py (absolute or relative to work_dir)
or None, in which case it will default to the module name.
requirements is a list of pip requirements files. If specified, these
will be combined with the module_url (if any), like so:
pip install -r requirements1.txt -r requirements2.txt module_url
"""
c = self.config
dirs = self.query_abs_dirs()
env = self.query_env()
venv_path = self.query_virtualenv_path()
self.info("Installing %s into virtualenv %s" % (module, venv_path))
if not module_url:
module_url = module
if install_method in (None, 'pip'):
if not module_url and not requirements:
self.fatal("Must specify module and/or requirements")
pip = self.query_python_path("pip")
if c.get("verbose_pip"):
command = [pip, "-v", "install"]
else:
command = [pip, "install"]
if no_deps:
command += ["--no-deps"]
# To avoid timeouts with our pypi server, increase default timeout:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1007230#c802
command += ['--timeout', str(c.get('pip_timeout', 120))]
for requirement in requirements:
command += ["-r", requirement]
if c.get('find_links') and not c["pip_index"]:
command += ['--no-index']
for opt in global_options:
command += ["--global-option", opt]
elif install_method == 'easy_install':
if not module:
self.fatal("module parameter required with install_method='easy_install'")
if requirements:
# Install pip requirements files separately, since they're
# not understood by easy_install.
self.install_module(requirements=requirements,
install_method='pip')
# Allow easy_install to be overridden by
# self.config['exes']['easy_install']
default = 'easy_install'
if self._is_windows():
# Don't invoke `easy_install` directly on windows since
# the 'install' in the executable name hits UAC
# - http://answers.microsoft.com/en-us/windows/forum/windows_7-security/uac-message-do-you-want-to-allow-the-following/bea30ad8-9ef8-4897-aab4-841a65f7af71
# - https://bugzilla.mozilla.org/show_bug.cgi?id=791840
default = [self.query_python_path(), self.query_python_path('easy_install-script.py')]
command = self.query_exe('easy_install', default=default, return_type="list")
else:
self.fatal("install_module() doesn't understand an install_method of %s!" % install_method)
# Add --find-links pages to look at
proxxy = Proxxy(self.config, self.log_obj)
for link in proxxy.get_proxies_and_urls(c.get('find_links', [])):
command.extend(["--find-links", link])
# module_url can be None if only specifying requirements files
if module_url:
if editable:
if install_method in (None, 'pip'):
command += ['-e']
else:
self.fatal("editable installs not supported for install_method %s" % install_method)
command += [module_url]
# If we're only installing a single requirements file, use
# the file's directory as cwd, so relative paths work correctly.
cwd = dirs['abs_work_dir']
if not module and len(requirements) == 1:
cwd = os.path.dirname(requirements[0])
quoted_command = subprocess.list2cmdline(command)
# Allow for errors while building modules, but require a
# return status of 0.
self.retry(
self.run_command,
# None will cause default value to be used
attempts=1 if optional else None,
good_statuses=(0,),
error_level=WARNING if optional else FATAL,
error_message='Could not install python package: ' + quoted_command + ' failed after %(attempts)d tries!',
args=[command, ],
kwargs={
'error_list': VirtualenvErrorList,
'cwd': cwd,
'env': env,
# WARNING only since retry will raise final FATAL if all
# retry attempts are unsuccessful - and we only want
# an ERROR of FATAL if *no* retry attempt works
'error_level': WARNING,
}
)
def create_virtualenv(self, modules=(), requirements=()):
"""
Create a python virtualenv.
The virtualenv exe can be defined in c['virtualenv'] or
c['exes']['virtualenv'], as a string (path) or list (path +
arguments).
c['virtualenv_python_dll'] is an optional config item that works
around an old windows virtualenv bug.
virtualenv_modules can be a list of module names to install, e.g.
virtualenv_modules = ['module1', 'module2']
or it can be a heterogeneous list of modules names and dicts that
define a module by its name, url-or-path, and a list of its global
options.
virtualenv_modules = [
{
'name': 'module1',
'url': None,
'global_options': ['--opt', '--without-gcc']
},
{
'name': 'module2',
'url': 'http://url/to/package',
'global_options': ['--use-clang']
},
{
'name': 'module3',
'url': os.path.join('path', 'to', 'setup_py', 'dir')
'global_options': []
},
'module4'
]
virtualenv_requirements is an optional list of pip requirements files to
use when invoking pip, e.g.,
virtualenv_requirements = [
'/path/to/requirements1.txt',
'/path/to/requirements2.txt'
]
"""
c = self.config
dirs = self.query_abs_dirs()
venv_path = self.query_virtualenv_path()
self.info("Creating virtualenv %s" % venv_path)
virtualenv = c.get('virtualenv', self.query_exe('virtualenv'))
if isinstance(virtualenv, str):
# allow for [python, virtualenv] in config
virtualenv = [virtualenv]
if not os.path.exists(virtualenv[0]) and not self.which(virtualenv[0]):
self.add_summary("The executable '%s' is not found; not creating "
"virtualenv!" % virtualenv[0], level=FATAL)
return -1
# https://bugs.launchpad.net/virtualenv/+bug/352844/comments/3
# https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c50
if c.get('virtualenv_python_dll'):
# We may someday want to copy a differently-named dll, but
# let's not think about that right now =\
dll_name = os.path.basename(c['virtualenv_python_dll'])
target = self.query_python_path(dll_name)
scripts_dir = os.path.dirname(target)
self.mkdir_p(scripts_dir)
self.copyfile(c['virtualenv_python_dll'], target, error_level=WARNING)
else:
self.mkdir_p(dirs['abs_work_dir'])
# make this list configurable?
for module in ('distribute', 'pip'):
if c.get('%s_url' % module):
self.download_file(c['%s_url' % module],
parent_dir=dirs['abs_work_dir'])
virtualenv_options = c.get('virtualenv_options',
['--no-site-packages', '--distribute'])
if os.path.exists(self.query_python_path()):
self.info("Virtualenv %s appears to already exist; skipping virtualenv creation." % self.query_python_path())
else:
self.run_command(virtualenv + virtualenv_options + [venv_path],
cwd=dirs['abs_work_dir'],
error_list=VirtualenvErrorList,
halt_on_failure=True)
if not modules:
modules = c.get('virtualenv_modules', [])
if not requirements:
requirements = c.get('virtualenv_requirements', [])
if not modules and requirements:
self.install_module(requirements=requirements,
install_method='pip')
for module in modules:
module_url = module
global_options = []
if isinstance(module, dict):
if module.get('name', None):
module_name = module['name']
else:
self.fatal("Can't install module without module name: %s" %
str(module))
module_url = module.get('url', None)
global_options = module.get('global_options', [])
else:
module_url = self.config.get('%s_url' % module, module_url)
module_name = module
install_method = 'pip'
if module_name in ('pywin32',):
install_method = 'easy_install'
self.install_module(module=module_name,
module_url=module_url,
install_method=install_method,
requirements=requirements,
global_options=global_options)
for module, url, method, requirements, optional, two_pass, editable in \
self._virtualenv_modules:
if two_pass:
self.install_module(
module=module, module_url=url,
install_method=method, requirements=requirements or (),
optional=optional, no_deps=True, editable=editable
)
self.install_module(
module=module, module_url=url,
install_method=method, requirements=requirements or (),
optional=optional, editable=editable
)
self.info("Done creating virtualenv %s." % venv_path)
self.package_versions(log_output=True)
def activate_virtualenv(self):
"""Import the virtualenv's packages into this Python interpreter."""
bin_dir = os.path.dirname(self.query_python_path())
activate = os.path.join(bin_dir, 'activate_this.py')
execfile(activate, dict(__file__=activate))
class ResourceMonitoringMixin(object):
"""Provides resource monitoring capabilities to scripts.
When this class is in the inheritance chain, resource usage stats of the
executing script will be recorded.
This class requires the VirtualenvMixin in order to install a package used
for recording resource usage.
While we would like to record resource usage for the entirety of a script,
since we require an external package, we can only record resource usage
after that package is installed (as part of creating the virtualenv).
That's just the way things have to be.
"""
def __init__(self, *args, **kwargs):
super(ResourceMonitoringMixin, self).__init__(*args, **kwargs)
self.register_virtualenv_module('psutil>=3.1.1', method='pip',
optional=True)
self.register_virtualenv_module('mozsystemmonitor==0.3',
method='pip', optional=True)
self.register_virtualenv_module('jsonschema==2.5.1',
method='pip')
self._resource_monitor = None
# 2-tuple of (name, options) to assign Perfherder resource monitor
# metrics to. This needs to be assigned by a script in order for
# Perfherder metrics to be reported.
self.resource_monitor_perfherder_id = None
@PostScriptAction('create-virtualenv')
def _start_resource_monitoring(self, action, success=None):
self.activate_virtualenv()
# Resource Monitor requires Python 2.7, however it's currently optional.
# Remove when all machines have had their Python version updated (bug 711299).
if sys.version_info[:2] < (2, 7):
self.warning('Resource monitoring will not be enabled! Python 2.7+ required.')
return
try:
from mozsystemmonitor.resourcemonitor import SystemResourceMonitor
self.info("Starting resource monitoring.")
self._resource_monitor = SystemResourceMonitor(poll_interval=1.0)
self._resource_monitor.start()
except Exception:
self.warning("Unable to start resource monitor: %s" %
traceback.format_exc())
@PreScriptAction
def _resource_record_pre_action(self, action):
# Resource monitor isn't available until after create-virtualenv.
if not self._resource_monitor:
return
self._resource_monitor.begin_phase(action)
@PostScriptAction
def _resource_record_post_action(self, action, success=None):
# Resource monitor isn't available until after create-virtualenv.
if not self._resource_monitor:
return
self._resource_monitor.finish_phase(action)
@PostScriptRun
def _resource_record_post_run(self):
if not self._resource_monitor:
return
# This should never raise an exception. This is a workaround until
# mozsystemmonitor is fixed. See bug 895388.
try:
self._resource_monitor.stop()
self._log_resource_usage()
# Upload a JSON file containing the raw resource data.
try:
upload_dir = self.query_abs_dirs()['abs_blob_upload_dir']
with open(os.path.join(upload_dir, 'resource-usage.json'), 'wb') as fh:
json.dump(self._resource_monitor.as_dict(), fh,
sort_keys=True, indent=4)
except (AttributeError, KeyError):
self.exception('could not upload resource usage JSON',
level=WARNING)
except Exception:
self.warning("Exception when reporting resource usage: %s" %
traceback.format_exc())
def _log_resource_usage(self):
# Delay import because not available until virtualenv is populated.
import jsonschema
rm = self._resource_monitor
if rm.start_time is None:
return
def resources(phase):
cpu_percent = rm.aggregate_cpu_percent(phase=phase, per_cpu=False)
cpu_times = rm.aggregate_cpu_times(phase=phase, per_cpu=False)
io = rm.aggregate_io(phase=phase)
swap_in = sum(m.swap.sin for m in rm.measurements)
swap_out = sum(m.swap.sout for m in rm.measurements)
return cpu_percent, cpu_times, io, (swap_in, swap_out)
def log_usage(prefix, duration, cpu_percent, cpu_times, io):
message = '{prefix} - Wall time: {duration:.0f}s; ' \
'CPU: {cpu_percent}; ' \
'Read bytes: {io_read_bytes}; Write bytes: {io_write_bytes}; ' \
'Read time: {io_read_time}; Write time: {io_write_time}'
# XXX Some test harnesses are complaining about a string being
# being fed into a 'f' formatter. This will help diagnose the
# issue.
cpu_percent_str = str(round(cpu_percent)) + '%' if cpu_percent else "Can't collect data"
try:
self.info(
message.format(
prefix=prefix, duration=duration,
cpu_percent=cpu_percent_str, io_read_bytes=io.read_bytes,
io_write_bytes=io.write_bytes, io_read_time=io.read_time,
io_write_time=io.write_time
)
)
except ValueError:
self.warning("Exception when formatting: %s" %
traceback.format_exc())
cpu_percent, cpu_times, io, (swap_in, swap_out) = resources(None)
duration = rm.end_time - rm.start_time
# Write out Perfherder data if configured.
if self.resource_monitor_perfherder_id:
perfherder_name, perfherder_options = self.resource_monitor_perfherder_id
suites = []
overall = []
if cpu_percent:
overall.append({
'name': 'cpu_percent',
'value': cpu_percent,
})
overall.extend([
{'name': 'io_write_bytes', 'value': io.write_bytes},
{'name': 'io.read_bytes', 'value': io.read_bytes},
{'name': 'io_write_time', 'value': io.write_time},
{'name': 'io_read_time', 'value': io.read_time},
])
suites.append({
'name': '%s.overall' % perfherder_name,
'extraOptions': perfherder_options,
'subtests': overall,
})
for phase in rm.phases.keys():
phase_duration = rm.phases[phase][1] - rm.phases[phase][0]
subtests = [
{
'name': 'time',
'value': phase_duration,
},
{
'name': 'cpu_percent',
'value': rm.aggregate_cpu_percent(phase=phase,
per_cpu=False),
}
]
# We don't report I/O during each step because measured I/O
# is system I/O and that I/O can be delayed (e.g. writes will
# buffer before being flushed and recorded in our metrics).
suites.append({
'name': '%s.%s' % (perfherder_name, phase),
'subtests': subtests,
})
data = {
'framework': {'name': 'job_resource_usage'},
'suites': suites,
}
try:
schema_path = os.path.join(external_tools_path,
'performance-artifact-schema.json')
with open(schema_path, 'rb') as fh:
schema = json.load(fh)
self.info('Validating Perfherder data against %s' % schema_path)
jsonschema.validate(data, schema)
except Exception:
self.exception('error while validating Perfherder data; ignoring')
else:
self.info('PERFHERDER_DATA: %s' % json.dumps(data))
log_usage('Total resource usage', duration, cpu_percent, cpu_times, io)
# Print special messages so usage shows up in Treeherder.
if cpu_percent:
self._tinderbox_print('CPU usage<br/>{:,.1f}%'.format(
cpu_percent))
self._tinderbox_print('I/O read bytes / time<br/>{:,} / {:,}'.format(
io.read_bytes, io.read_time))
self._tinderbox_print('I/O write bytes / time<br/>{:,} / {:,}'.format(
io.write_bytes, io.write_time))
# Print CPU components having >1%. "cpu_times" is a data structure
# whose attributes are measurements. Ideally we'd have an API that
# returned just the measurements as a dict or something.
cpu_attrs = []
for attr in sorted(dir(cpu_times)):
if attr.startswith('_'):
continue
if attr in ('count', 'index'):
continue
cpu_attrs.append(attr)
cpu_total = sum(getattr(cpu_times, attr) for attr in cpu_attrs)
for attr in cpu_attrs:
value = getattr(cpu_times, attr)
percent = value / cpu_total * 100.0
if percent > 1.00:
self._tinderbox_print('CPU {}<br/>{:,.1f} ({:,.1f}%)'.format(
attr, value, percent))
# Swap on Windows isn't reported by psutil.
if not self._is_windows():
self._tinderbox_print('Swap in / out<br/>{:,} / {:,}'.format(
swap_in, swap_out))
for phase in rm.phases.keys():
start_time, end_time = rm.phases[phase]
cpu_percent, cpu_times, io, swap = resources(phase)
log_usage(phase, end_time - start_time, cpu_percent, cpu_times, io)
def _tinderbox_print(self, message):
self.info('TinderboxPrint: %s' % message)
class InfluxRecordingMixin(object):
"""Provides InfluxDB stat recording to scripts.
This class records stats to an InfluxDB server, if enabled. Stat recording
is enabled in a script by inheriting from this class, and adding an
influxdb_credentials line to the influx_credentials_file (usually oauth.txt
in automation). This line should look something like:
influxdb_credentials = 'http://goldiewilson-onepointtwentyone-1.c.influxdb.com:8086/db/DBNAME/series?u=DBUSERNAME&p=DBPASSWORD'
Where DBNAME, DBUSERNAME, and DBPASSWORD correspond to the database name,
and user/pw credentials for recording to the database. The stats from
mozharness are recorded in the 'mozharness' table.
"""
@PreScriptRun
def influxdb_recording_init(self):
self.recording = False
self.post = None
self.posturl = None
self.build_metrics_summary = None
self.res_props = self.config.get('build_resources_path') % self.query_abs_dirs()
self.info("build_resources.json path: %s" % self.res_props)
if self.res_props:
self.rmtree(self.res_props)
try:
site_packages_path = self.query_python_site_packages_path()
if site_packages_path not in sys.path:
sys.path.append(site_packages_path)
self.post = get_tlsv1_post()
auth = os.path.join(os.getcwd(), self.config['influx_credentials_file'])
if not os.path.exists(auth):
self.warning("Unable to start influxdb recording: %s not found" % (auth,))
return
credentials = {}
execfile(auth, credentials)
if 'influxdb_credentials' in credentials:
self.posturl = credentials['influxdb_credentials']
self.recording = True
else:
self.warning("Unable to start influxdb recording: no credentials")
return
except Exception:
# The exact reason for failing to start stats doesn't really matter.
# If anything fails, we just won't record stats for this job.
self.warning("Unable to start influxdb recording: %s" %
traceback.format_exc())
return
@PreScriptAction
def influxdb_recording_pre_action(self, action):
if not self.recording:
return
self.start_time = time.time()
@PostScriptAction
def influxdb_recording_post_action(self, action, success=None):
if not self.recording:
return
elapsed_time = time.time() - self.start_time
c = {}
p = {}
if self.buildbot_config:
c = self.buildbot_config.get('properties', {})
if self.buildbot_properties:
p = self.buildbot_properties
self.record_influx_stat([{
"points": [[
action,
elapsed_time,
c.get('buildername'),
c.get('product'),
c.get('platform'),
c.get('branch'),
c.get('slavename'),
c.get('revision'),
p.get('gaia_revision'),
c.get('buildid'),
]],
"name": "mozharness",
"columns": [
"action",
"runtime",
"buildername",
"product",
"platform",
"branch",
"slavename",
"gecko_revision",
"gaia_revision",
"buildid",
],
}])
def _get_resource_usage(self, res, name, iolen, cpulen):
c = {}
p = {}
if self.buildbot_config:
c = self.buildbot_config.get('properties', {})
if self.buildbot_properties:
p = self.buildbot_properties
data = [
# Build properties
c.get('buildername'),
c.get('product'),
c.get('platform'),
c.get('branch'),
c.get('slavename'),
c.get('revision'),
p.get('gaia_revision'),
c.get('buildid'),
# Mach step properties
name,
res.get('start'),
res.get('end'),
res.get('duration'),
res.get('cpu_percent'),
]
# The io and cpu_times fields are arrays, though they aren't always
# present if a step completes before resource utilization is measured.
# We add the arrays if they exist, otherwise we just do an array of None
# to fill up the stat point.
data.extend(res.get('io', [None] * iolen))
data.extend(res.get('cpu_times', [None] * cpulen))
return data
@PostScriptAction('build')
def record_mach_stats(self, action, success=None):
if not os.path.exists(self.res_props):
self.info('No build_resources.json found, not logging stats')
return
with open(self.res_props) as fh:
resources = json.load(fh)
data = {
"points": [
],
"name": "mach",
"columns": [
# Build properties
"buildername",
"product",
"platform",
"branch",
"slavename",
"gecko_revision",
"gaia_revision",
"buildid",
# Mach step properties
"name",
"start",
"end",
"duration",
"cpu_percent",
],
}
# The io and cpu_times fields aren't static - they may vary based
# on the specific platform being measured. Mach records the field
# names, which we use as the column names here.
data['columns'].extend(resources['io_fields'])
data['columns'].extend(resources['cpu_times_fields'])
iolen = len(resources['io_fields'])
cpulen = len(resources['cpu_times_fields'])
if 'duration' in resources:
self.build_metrics_summary = {
'name': 'build times',
'value': resources['duration'],
'subtests': [],
}
# The top-level data has the overall resource usage, which we record
# under the name 'TOTAL' to separate it from the individual phases.
data['points'].append(self._get_resource_usage(resources, 'TOTAL', iolen, cpulen))
# Each phases also has the same resource stats as the top-level.
for phase in resources['phases']:
data['points'].append(self._get_resource_usage(phase, phase['name'], iolen, cpulen))
if 'duration' not in phase:
self.build_metrics_summary = None
elif self.build_metrics_summary:
self.build_metrics_summary['subtests'].append({
'name': phase['name'],
'value': phase['duration'],
})
self.record_influx_stat([data])
def record_influx_stat(self, json_data):
if not self.recording:
return
try:
r = self.post(self.posturl, data=json.dumps(json_data), timeout=5)
if r.status_code != 200:
self.warning("Failed to log stats. Return code = %i, stats = %s" % (r.status_code, json_data))
# Disable recording for the rest of this job. Even if it's just
# intermittent, we don't want to keep the build from progressing.
self.recording = False
except Exception, e:
self.warning('Failed to log stats. Exception = %s' % str(e))
self.recording = False
# __main__ {{{1
if __name__ == '__main__':
'''TODO: unit tests.
'''
pass
| cstipkovic/spidermonkey-research | testing/mozharness/mozharness/base/python.py | Python | mpl-2.0 | 36,576 |
#!/usr/bin/env python
import itertools
import logging
import os
import random
import stat
import subprocess
import sys
import tempfile
import unittest
from unittest import TestCase
import lief
from utils import get_sample
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
class TestCore(TestCase):
LOGGER = logging.getLogger(__name__)
def setUp(self):
self.logger = logging.getLogger(__name__)
def test_core_arm(self):
core = lief.parse(get_sample('ELF/ELF32_ARM_core_hello.core'))
notes = core.notes
self.assertEqual(len(notes), 6)
# Check NT_PRPSINFO
# =================
prpsinfo = notes[0]
self.assertTrue(prpsinfo.is_core)
self.assertEqual(prpsinfo.type_core, lief.ELF.NOTE_TYPES_CORE.PRPSINFO)
# Check details
details = prpsinfo.details
self.assertIsInstance(details, lief.ELF.CorePrPsInfo)
self.assertEqual(details.file_name, "hello-exe")
self.assertEqual(details.uid, 2000)
self.assertEqual(details.gid, 2000)
self.assertEqual(details.pid, 8166)
self.assertEqual(details.ppid, 8163)
self.assertEqual(details.pgrp, 8166)
self.assertEqual(details.sid, 7997)
# Check NT_PRSTATUS
# =================
prstatus = notes[1]
self.assertTrue(prstatus.is_core)
self.assertEqual(prstatus.type_core, lief.ELF.NOTE_TYPES_CORE.PRSTATUS)
# Check details
details = prstatus.details
self.assertEqual(details.current_sig, 7)
self.assertEqual(details.sigpend, 0)
self.assertEqual(details.sighold, 0)
self.assertEqual(details.pid, 8166)
self.assertEqual(details.ppid, 0)
self.assertEqual(details.pgrp, 0)
self.assertEqual(details.sid, 0)
self.assertEqual(details.utime.sec, 0)
self.assertEqual(details.utime.usec, 0)
self.assertEqual(details.stime.sec, 0)
self.assertEqual(details.stime.usec, 0)
self.assertEqual(details.cutime.sec, 0)
self.assertEqual(details.cutime.usec, 0)
self.assertEqual(details.cstime.sec, 0)
self.assertEqual(details.cstime.usec, 0)
reg_ctx = details.register_context
self.assertEqual(len(reg_ctx), 17)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R0], 0xaad75074)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R1], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R2], 0xb)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R3], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R4], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R5], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R6], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R7], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R8], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R9], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R10], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R11], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R12], 0xA)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R13], 1)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R14], 0xf7728841)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.ARM_R15], 0xaad7507c)
self.assertEqual(details.get(lief.ELF.CorePrStatus.REGISTERS.ARM_CPSR), 0x60010010)
arm_vfp = notes[2]
# Check NT_NOTE
# =================
siginfo = notes[3]
self.assertTrue(siginfo.is_core)
self.assertEqual(siginfo.type_core, lief.ELF.NOTE_TYPES_CORE.SIGINFO)
# Check details
details = siginfo.details
self.assertEqual(details.signo, 7)
self.assertEqual(details.sigcode, 0)
self.assertEqual(details.sigerrno, 1)
# Check NT_AUXV
# =================
auxv = notes[4]
self.assertTrue(auxv.is_core)
self.assertEqual(auxv.type_core, lief.ELF.NOTE_TYPES_CORE.AUXV)
# Check details
details = auxv.details
self.assertEqual(len(details.values), 18)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHDR], 0xaad74034)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHENT], 0x20)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHNUM], 0x9)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PAGESZ], 4096)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.BASE], 0xf7716000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.FLAGS], 0)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.ENTRY], 0xaad75074)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.UID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EUID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.GID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EGID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PLATFORM], 0xfffefb5c)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.HWCAP], 0x27b0d6)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.CKLTCK], 0x64)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.SECURE], 0)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.RANDOM], 0xfffefb4c)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.HWCAP2], 0x1f)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EXECFN], 0xfffeffec)
# Check NT_FILE
# =================
note = notes[5]
self.assertTrue(note.is_core)
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.FILE)
# Check details
details = note.details
files = details.files
self.assertEqual(len(files), len(details))
self.assertEqual(21, len(details))
self.assertEqual(files[0].start, 0xaad74000)
self.assertEqual(files[0].end, 0xaad78000)
self.assertEqual(files[0].file_ofs, 0)
self.assertEqual(files[0].path, "/data/local/tmp/hello-exe")
last = files.pop()
self.assertEqual(last.start, 0xf77a1000)
self.assertEqual(last.end, 0xf77a2000)
self.assertEqual(last.file_ofs, 0x8a000)
self.assertEqual(last.path, "/system/bin/linker")
self.assertTrue(all(len(c.path) > 0 for c in details))
def test_core_arm64(self):
core = lief.parse(get_sample('ELF/ELF64_AArch64_core_hello.core'))
notes = core.notes
self.assertEqual(len(notes), 6)
# Check NT_PRPSINFO
# =================
prpsinfo = notes[0]
self.assertTrue(prpsinfo.is_core)
self.assertEqual(prpsinfo.type_core, lief.ELF.NOTE_TYPES_CORE.PRPSINFO)
# Check details
details = prpsinfo.details
self.assertIsInstance(details, lief.ELF.CorePrPsInfo)
self.assertEqual(details.file_name, "hello-exe")
self.assertEqual(details.uid, 2000)
self.assertEqual(details.gid, 2000)
self.assertEqual(details.pid, 8104)
self.assertEqual(details.ppid, 8101)
self.assertEqual(details.pgrp, 8104)
self.assertEqual(details.sid, 7997)
# Check NT_PRSTATUS
# =================
prstatus = notes[1]
self.assertTrue(prstatus.is_core)
self.assertEqual(prstatus.type_core, lief.ELF.NOTE_TYPES_CORE.PRSTATUS)
# Check details
details = prstatus.details
self.assertEqual(details.current_sig, 5)
self.assertEqual(details.sigpend, 0)
self.assertEqual(details.sighold, 0)
self.assertEqual(details.pid, 8104)
self.assertEqual(details.ppid, 0)
self.assertEqual(details.pgrp, 0)
self.assertEqual(details.sid, 0)
self.assertEqual(details.utime.sec, 0)
self.assertEqual(details.utime.usec, 0)
self.assertEqual(details.stime.sec, 0)
self.assertEqual(details.stime.usec, 0)
self.assertEqual(details.cutime.sec, 0)
self.assertEqual(details.cutime.usec, 0)
self.assertEqual(details.cstime.sec, 0)
self.assertEqual(details.cstime.usec, 0)
reg_ctx = details.register_context
self.assertEqual(len(reg_ctx), 34)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X0], 0x5580b86f50)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X1], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X2], 0x1)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X3], 0x7fb7e2e160)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X4], 0x7fb7e83030)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X5], 0x4)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X6], 0x6f6c2f617461642f)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X7], 0x2f706d742f6c6163)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X8], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X9], 0xa)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X10], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X11], 0xA)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X12], 0x0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X13], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X14], 0x878ca62ae01a9a5)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X15], 0x7fb7e7a000)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X16], 0x7fb7c132c8)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X17], 0x7fb7bb0adc)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X18], 0x7fb7c1e000)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X19], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X20], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X21], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X22], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X23], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X24], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X25], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X26], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X27], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X28], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X29], 0)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X30], 0x7fb7eb6068)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_X31], 0x7ffffff950)
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.AARCH64_PC], 0x5580b86f50)
arm_vfp = notes[2]
# Check NT_NOTE
# =================
siginfo = notes[3]
self.assertTrue(siginfo.is_core)
self.assertEqual(siginfo.type_core, lief.ELF.NOTE_TYPES_CORE.SIGINFO)
# Check details
details = siginfo.details
self.assertEqual(details.signo, 5)
self.assertEqual(details.sigcode, 0)
self.assertEqual(details.sigerrno, 1)
# Check NT_AUXV
# =================
auxv = notes[4]
self.assertTrue(auxv.is_core)
self.assertEqual(auxv.type_core, lief.ELF.NOTE_TYPES_CORE.AUXV)
# Check details
details = auxv.details
self.assertEqual(len(details.values), 18)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHDR], 0x5580b86040)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHENT], 0x38)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PHNUM], 0x9)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PAGESZ], 4096)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.BASE], 0x7fb7e93000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.FLAGS], 0)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.ENTRY], 0x5580b86f50)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.UID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EUID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.GID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EGID], 2000)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.PLATFORM], 0x7ffffffb58)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.HWCAP], 0xff)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.CKLTCK], 0x64)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.SECURE], 0)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.RANDOM], 0x7ffffffb48)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.EXECFN], 0x7fffffffec)
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.SYSINFO_EHDR], 0x7fb7e91000)
# Check NT_FILE
# =================
note = notes[5]
self.assertTrue(note.is_core)
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.FILE)
# Check details
details = note.details
files = details.files
self.assertEqual(len(files), len(details))
self.assertEqual(22, len(details))
self.assertEqual(files[0].start, 0x5580b86000)
self.assertEqual(files[0].end, 0x5580b88000)
self.assertEqual(files[0].file_ofs, 0)
self.assertEqual(files[0].path, "/data/local/tmp/hello-exe")
last = files.pop()
self.assertEqual(last.start, 0x7fb7f8c000)
self.assertEqual(last.end, 0x7fb7f8d000)
self.assertEqual(last.file_ofs, 0xf8000)
self.assertEqual(last.path, "/system/bin/linker64")
def test_core_write(self):
core = lief.parse(get_sample('ELF/ELF64_x86-64_core_hello.core'))
note = core.notes[1]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.PRSTATUS)
details = note.details
details[lief.ELF.CorePrStatus.REGISTERS.X86_64_RIP] = 0xBADC0DE
note = core.notes[5]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.AUXV)
details = note.details
details[lief.ELF.CoreAuxv.TYPES.ENTRY] = 0xBADC0DE
note = core.notes[4]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.SIGINFO)
orig_siginfo_len = len(note.description)
details = note.details
details.sigerrno = 0xCC
# Cannot re-open a file on Windows, so handle it by hand
with tempfile.NamedTemporaryFile(prefix="", suffix=".core", delete=False) as f:
tmpfilename = f.name
core.write(tmpfilename)
try:
with open(tmpfilename, 'rb') as f:
core_new = lief.parse(f.name)
self.assertIsNotNone(core_new)
note = core_new.notes[1]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.PRSTATUS)
details = note.details
self.assertEqual(details[lief.ELF.CorePrStatus.REGISTERS.X86_64_RIP], 0xBADC0DE)
note = core_new.notes[5]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.AUXV)
details = note.details
self.assertEqual(details[lief.ELF.CoreAuxv.TYPES.ENTRY], 0xBADC0DE)
note = core_new.notes[4]
self.assertEqual(note.type_core, lief.ELF.NOTE_TYPES_CORE.SIGINFO)
self.assertEqual(len(note.description), orig_siginfo_len)
details = note.details
self.assertEqual(details.sigerrno, 0xCC)
finally:
try:
os.remove(tmpfilename)
except OSError:
pass
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
| lief-project/LIEF | tests/elf/test_core.py | Python | apache-2.0 | 16,506 |
#!/usr/local/bin/python
from ouimeaux.environment import Environment
import pdb
def on_switch(switch):
print "Switch found!", switch.name
def on_motion(motion):
print "Motion found!", motion.name
def list_switches():
env = Environment(on_switch, on_motion)
env.start()
env.discover(seconds=1)
return env.list_switches()
def get_switch_state(switch_name):
env = Environment(on_switch, on_motion)
env.start()
env.discover(seconds=1)
#time.sleep(2)
switch = env.get_switch(switch_name)
return switch.basicevent.GetBinaryState()['BinaryState']
def toggle_switch(switch_name):
env = Environment(on_switch, on_motion)
env.start()
env.discover(seconds=1)
#time.sleep(2)
switch = env.get_switch(switch_name)
switch.blink()
#toggle_switch(switch)
def toggle_switch_dumb(switch):
current_state = switch.basicevent.GetBinaryState()['BinaryState']
new_state = '1' if current_state == '0' else '1'
switch.basicevent.SetBinaryState(BinaryState=new_state)
if __name__ == "__main__":
print 'starting wemo debugging console'
pdb.set_trace()
| ramrom/haus | wemo.py | Python | mit | 1,079 |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Test suite for zdaemon.zdoptions."""
import os
import sys
import tempfile
import shutil
import unittest
import doctest
import ZConfig
import zdaemon
from zdaemon.zdoptions import (
ZDOptions, RunnerOptions, list_of_ints,
existing_parent_directory, existing_parent_dirpath)
try:
from StringIO import StringIO
except:
# Python 3 support.
from io import StringIO
class ZDOptionsTestBase(unittest.TestCase):
OptionsClass = ZDOptions
def save_streams(self):
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
sys.stdout = self.stdout = StringIO()
sys.stderr = self.stderr = StringIO()
def restore_streams(self):
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def check_exit_code(self, options, args, exit_code=2):
save_sys_stderr = sys.stderr
try:
sys.stderr = StringIO()
try:
options.realize(args)
except SystemExit as err:
self.assertEqual(err.code, exit_code)
else:
self.fail("SystemExit expected")
finally:
sys.stderr = save_sys_stderr
class TestZDOptions(ZDOptionsTestBase):
input_args = ["arg1", "arg2"]
output_opts = []
output_args = ["arg1", "arg2"]
def test_basic(self):
progname = "progname"
doc = "doc"
options = self.OptionsClass()
options.positional_args_allowed = 1
options.schemadir = os.path.dirname(zdaemon.__file__)
options.realize(self.input_args, progname, doc)
self.assertEqual(options.progname, "progname")
self.assertEqual(options.doc, "doc")
self.assertEqual(options.options, self.output_opts)
self.assertEqual(options.args, self.output_args)
def test_configure(self):
configfile = os.path.join(os.path.dirname(zdaemon.__file__),
"sample.conf")
for arg in "-C", "--c", "--configure":
options = self.OptionsClass()
options.realize([arg, configfile])
self.assertEqual(options.configfile, configfile)
# The original intent was that the docstring of whatever module is
# __main__ would be used as help documentation.
# Because of the way buildout generates scripts, this will always
# be an empty string.
# So, we now use the __doc__ of the options class being used.
def help_test_helper(self, optionsclass, kw, expected):
for arg in "-h", "--h", "--help":
options = optionsclass()
try:
self.save_streams()
try:
options.realize([arg], **kw)
finally:
self.restore_streams()
except SystemExit as err:
self.assertEqual(err.code, 0)
else:
self.fail("%s didn't call sys.exit()" % repr(arg))
helptext = self.stdout.getvalue()
self.assertEqual(helptext, expected)
def test_default_help(self):
# test what happens if OptionsClass is used directly.
# Not sure this ever happens :-S
self.help_test_helper(
self.OptionsClass, {},
self.OptionsClass.__doc__ or 'No help available.')
def test_default_subclass_help(self):
# test what happens when the subclass doesn't do anything
# with __doc__
class SubClass(self.OptionsClass):
pass
# __doc__ isn't inherited :-(
self.help_test_helper(SubClass, {}, 'No help available.')
def test_default_help_with_doc_kw(self):
# test what happens when the subclass doesn't do anything
# with __doc__, but doc is supplied to realize
self.help_test_helper(self.OptionsClass,
{'doc': 'Example help'},
'Example help')
def test_no_help(self):
# test what happens when the subclass has None for __doc__
class NoHelp(self.OptionsClass):
__doc__ = None
self.help_test_helper(NoHelp, {}, 'No help available.')
def test_no_help_with_doc_kw(self):
# test what happens when the subclass has None for __doc__,
# but doc is supplied to realize
class NoHelp(self.OptionsClass):
__doc__ = None
self.help_test_helper(NoHelp, {'doc': 'Example help'}, 'Example help')
def test_help(self):
# test what happens when the subclass has None for __doc__
class HasHelp(self.OptionsClass):
__doc__ = 'Some help for %s'
self.help_test_helper(HasHelp, {'progname': 'me'}, 'Some help for me')
def test_has_help_with_doc_kw(self):
# test what happens when the subclass has something for __doc__,
# and doc is also supplied to realize
class HasHelp(self.OptionsClass):
__doc__ = 'Some help'
self.help_test_helper(HasHelp, {'doc': 'Example help'}, 'Example help')
def test_version(self):
options = self.OptionsClass()
options.version = '2.4.frog-knows'
self.save_streams()
try:
self.check_exit_code(options, ['--version'], exit_code=0)
finally:
self.restore_streams()
self.assertNotEqual(self.stdout.getvalue(), "2.4.frog-knows")
def test_unrecognized(self):
# Check that we get an error for an unrecognized option
self.check_exit_code(self.OptionsClass(), ["-/"])
class TestBasicFunctionality(ZDOptionsTestBase):
def test_no_positional_args(self):
# Check that we get an error for positional args when they
# haven't been enabled.
self.check_exit_code(self.OptionsClass(), ["A"])
def test_positional_args(self):
options = self.OptionsClass()
options.positional_args_allowed = 1
options.realize(["A", "B"])
self.assertEqual(options.args, ["A", "B"])
def test_positional_args_empty(self):
options = self.OptionsClass()
options.positional_args_allowed = 1
options.realize([])
self.assertEqual(options.args, [])
def test_positional_args_unknown_option(self):
# Make sure an unknown option doesn't become a positional arg.
options = self.OptionsClass()
options.positional_args_allowed = 1
self.check_exit_code(options, ["-o", "A", "B"])
def test_conflicting_flags(self):
# Check that we get an error for flags which compete over the
# same option setting.
options = self.OptionsClass()
options.add("setting", None, "a", flag=1)
options.add("setting", None, "b", flag=2)
self.check_exit_code(options, ["-a", "-b"])
def test_duplicate_flags(self):
# Check that we don't get an error for flags which reinforce the
# same option setting.
options = self.OptionsClass()
options.add("setting", None, "a", flag=1)
options.realize(["-a", "-a"])
def test_handler_simple(self):
# Test that a handler is called; use one that doesn't return None.
options = self.OptionsClass()
options.add("setting", None, "a:", handler=int)
options.realize(["-a2"])
self.assertEqual(options.setting, 2)
def test_handler_side_effect(self):
# Test that a handler is called and conflicts are not
# signalled when it returns None.
options = self.OptionsClass()
L = []
options.add("setting", None, "a:", "append=", handler=L.append)
options.realize(["-a2", "--append", "3"])
self.assertTrue(options.setting is None)
self.assertEqual(L, ["2", "3"])
def test_handler_with_bad_value(self):
options = self.OptionsClass()
options.add("setting", None, "a:", handler=int)
self.check_exit_code(options, ["-afoo"])
def test_required_options(self):
# Check that we get an error if a required option is not specified
options = self.OptionsClass()
options.add("setting", None, "a:", handler=int, required=True)
self.check_exit_code(options, [])
def test_overrides_without_config_file(self):
# Check that we get an error if we use -X without -C
options = self.OptionsClass()
self.check_exit_code(options, ["-Xfoo"])
def test_raise_getopt_errors(self):
options = self.OptionsClass()
# note that we do not add "a" to the list of options;
# if raise_getopt_errors was true, this test would error
options.realize(["-afoo"], raise_getopt_errs=False)
# check_exit_code realizes the options with raise_getopt_errs=True
self.check_exit_code(options, ['-afoo'])
def test_list_of_ints(self):
self.assertEqual(list_of_ints(''), [])
self.assertEqual(list_of_ints('42'), [42])
self.assertEqual(list_of_ints('42,43'), [42, 43])
self.assertEqual(list_of_ints('42, 43'), [42, 43])
class TestOptionConfiguration(ZDOptionsTestBase):
def test_add_flag_or_handler_not_both(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, short="a", flag=1,
handler=lambda x: None)
def test_flag_requires_command_line_flag(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, flag=1)
def test_flag_cannot_accept_arguments(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, short='a:', flag=1)
self.assertRaises(ValueError, options.add, long='an-option=', flag=1)
def test_arguments_must_be_consistent(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, short='a:', long='an-option')
self.assertRaises(ValueError, options.add, short='a', long='an-option=')
def test_short_cmdline_syntax(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, short='-a')
self.assertRaises(ValueError, options.add, short='ab')
self.assertRaises(ValueError, options.add, short='abc')
def test_long_cmdline_syntax(self):
options = self.OptionsClass()
self.assertRaises(ValueError, options.add, long='--an-option')
self.assertRaises(ValueError, options.add, long='-an-option')
def test_duplicate_short_flags(self):
options = self.OptionsClass()
options.add(short='a')
options.add(short='b')
self.assertRaises(ValueError, options.add, short='a')
def test_duplicate_long_flags(self):
options = self.OptionsClass()
options.add(long='an-option')
options.add(long='be-still-my-beating-heart')
self.assertRaises(ValueError, options.add, long='an-option')
class EnvironmentOptions(ZDOptionsTestBase):
saved_schema = None
class OptionsClass(ZDOptions):
def __init__(self):
ZDOptions.__init__(self)
self.add("opt", "opt", "o:", "opt=",
default=42, handler=int, env="OPT")
def load_schema(self):
# Doing this here avoids needing a separate file for the schema:
if self.schema is None:
if EnvironmentOptions.saved_schema is None:
schema = ZConfig.loadSchemaFile(StringIO("""\
<schema>
<key name='opt' datatype='integer' default='12'/>
</schema>
"""))
EnvironmentOptions.saved_schema = schema
self.schema = EnvironmentOptions.saved_schema
def load_configfile(self):
if getattr(self, "configtext", None):
self.configfile = tempfile.mktemp()
f = open(self.configfile, 'w')
f.write(self.configtext)
f.close()
try:
ZDOptions.load_configfile(self)
finally:
os.unlink(self.configfile)
else:
ZDOptions.load_configfile(self)
# Save and restore the environment around each test:
def setUp(self):
self._oldenv = os.environ
env = {}
for k, v in os.environ.items():
env[k] = v
os.environ = env
def tearDown(self):
os.environ = self._oldenv
def create_with_config(self, text):
options = self.OptionsClass()
zdpkgdir = os.path.dirname(os.path.abspath(zdaemon.__file__))
options.schemadir = os.path.join(zdpkgdir, 'tests')
options.schemafile = "envtest.xml"
# configfile must be set for ZDOptions to use ZConfig:
if text:
options.configfile = "not used"
options.configtext = text
return options
class TestZDOptionsEnvironment(EnvironmentOptions):
def test_with_environment(self):
os.environ["OPT"] = "2"
self.check_from_command_line()
options = self.OptionsClass()
options.realize([])
self.assertEqual(options.opt, 2)
def test_without_environment(self):
self.check_from_command_line()
options = self.OptionsClass()
options.realize([])
self.assertEqual(options.opt, 42)
def check_from_command_line(self):
for args in (["-o1"], ["--opt", "1"]):
options = self.OptionsClass()
options.realize(args)
self.assertEqual(options.opt, 1)
def test_with_bad_environment(self):
os.environ["OPT"] = "Spooge!"
# make sure the bad value is ignored if the command-line is used:
self.check_from_command_line()
options = self.OptionsClass()
try:
self.save_streams()
try:
options.realize([])
finally:
self.restore_streams()
except SystemExit as e:
self.assertEqual(e.code, 2)
else:
self.fail("expected SystemExit")
def test_environment_overrides_configfile(self):
options = self.create_with_config("opt 3")
options.realize([])
self.assertEqual(options.opt, 3)
os.environ["OPT"] = "2"
options = self.create_with_config("opt 3")
options.realize([])
self.assertEqual(options.opt, 2)
class TestCommandLineOverrides(EnvironmentOptions):
def test_simple_override(self):
options = self.create_with_config("# empty config")
options.realize(["-X", "opt=-2"])
self.assertEqual(options.opt, -2)
def test_error_propogation(self):
self.check_exit_code(self.create_with_config("# empty"),
["-Xopt=1", "-Xopt=2"])
self.check_exit_code(self.create_with_config("# empty"),
["-Xunknown=foo"])
class TestRunnerDirectory(ZDOptionsTestBase):
OptionsClass = RunnerOptions
def setUp(self):
super(TestRunnerDirectory, self).setUp()
# Create temporary directory to work in
self.root = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.root)
super(TestRunnerDirectory, self).tearDown()
def test_not_existing_directory(self):
options = self.OptionsClass()
path = os.path.join(self.root, 'does-not-exist', 'really-not')
self.check_exit_code(options, ["-z", path])
socket = os.path.join(path, 'socket')
self.check_exit_code(options, ["-s", socket])
def test_existing_directory(self):
options = self.OptionsClass()
options.realize(["-z", self.root])
socket = os.path.join(self.root, 'socket')
self.check_exit_code(options, ["-s", socket])
def test_parent_is_created(self):
options = self.OptionsClass()
path = os.path.join(self.root, 'will-be-created')
options.realize(["-z", path])
self.assertEqual(path, options.directory)
socket = os.path.join(path, 'socket')
options = self.OptionsClass()
options.realize(["-s", socket])
# Directory will be created when zdaemon runs, not when the
# configuration is read
self.assertFalse(os.path.exists(path))
def test_existing_parent_directory(self):
self.assertTrue(existing_parent_directory(self.root))
self.assertTrue(existing_parent_directory(
os.path.join(self.root, 'not-there')))
self.assertRaises(
ValueError, existing_parent_directory,
os.path.join(self.root, 'not-there', 'this-also-not'))
def test_existing_parent_dirpath(self):
self.assertTrue(existing_parent_dirpath(
os.path.join(self.root, 'sock')))
self.assertTrue(existing_parent_dirpath(
os.path.join(self.root, 'not-there', 'sock')))
self.assertTrue(existing_parent_dirpath(
os.path.join('not-there', 'sock')))
self.assertRaises(
ValueError, existing_parent_dirpath,
os.path.join(self.root, 'not-there', 'this-also-not', 'sock'))
def test_suite():
return unittest.TestSuite([
doctest.DocTestSuite('zdaemon.zdoptions'),
unittest.defaultTestLoader.loadTestsFromName(__name__),
])
if __name__ == "__main__":
unittest.main(defaultTest='test_suite')
| wunderlins/learning | python/zodb/lib/osx/zdaemon/tests/testzdoptions.py | Python | gpl-2.0 | 17,993 |
# -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Geography for one family
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import operator
from gi.repository import Gdk
KEY_TAB = Gdk.KEY_Tab
import socket
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("GeoGraphy.geofamily")
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import EventRoleType, EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.pageview import PageView
from gramps.gui.editors import EditPlace
from gramps.gui.selectors.selectplace import SelectPlace
from gramps.gui.filters.sidebar import FamilySidebarFilter
from gramps.gui.views.navigationview import NavigationView
from gramps.gui.views.bookmarks import FamilyBookmarks
from gramps.plugins.lib.maps.geography import GeoGraphyView
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UI_DEF = '''\
<ui>
<menubar name="MenuBar">
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="PrintView"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="PrintView"/>
</placeholder>
</toolbar>
</ui>
'''
#-------------------------------------------------------------------------
#
# GeoView
#
#-------------------------------------------------------------------------
class GeoFamily(GeoGraphyView):
"""
The view used to render person map.
"""
def __init__(self, pdata, dbstate, uistate, nav_group=0):
GeoGraphyView.__init__(self, _('Family places map'),
pdata, dbstate, uistate,
FamilyBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.additional_uis.append(self.additional_ui())
self.no_show_places_in_status_bar = False
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoFamily')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'geo-show-family'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-family'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Family'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.places_found = []
self.build_tree()
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
if self.uistate.get_active('Family'):
self._createmap(self.uistate.get_active('Family'))
else:
self._createmap(self.uistate.get_active('Person'))
def _createpersonmarkers(self, dbstate, person, comment, fam_id):
"""
Create all markers for the specified person.
"""
self.cal = config.get('preferences.calendar-format-report')
latitude = longitude = ""
if person:
# For each event, if we have a place, set a marker.
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
role = event_ref.get_role()
event = dbstate.db.get_event_from_handle(event_ref.ref)
eyear = event.get_date_object().to_calendar(self.cal).get_year()
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = place.get_title()
evt = EventType(event.get_type())
descr1 = _("%(eventtype)s : %(name)s") % {
'eventtype': evt,
'name': _nd.display(person)}
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if ( longitude and latitude ):
if not self._present_in_places_list(2, str(descr1 + descr + str(evt))):
self._append_to_places_list(descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
fam_id
)
else:
self._append_to_places_without_coord(
place.gramps_id, descr)
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
for event_ref in family.get_event_ref_list():
if event_ref:
event = dbstate.db.get_event_from_handle(event_ref.ref)
role = event_ref.get_role()
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = place.get_title()
evt = EventType(event.get_type())
(father_name, mother_name) = self._get_father_and_mother_name(event)
descr1 = "%s : %s - " % ( evt, father_name )
descr1 = "%s%s" % ( descr1, mother_name )
eyear = event.get_date_object().to_calendar(self.cal).get_year()
if ( longitude and latitude ):
if not self._present_in_places_list(2, str(descr1 + descr + str(evt))):
self._append_to_places_list(descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
family.gramps_id
)
else:
self._append_to_places_without_coord( place.gramps_id, descr)
def family_label(self,family):
if family is None:
return "Unknown"
f = self.dbstate.db.get_person_from_handle(
family.get_father_handle())
m = self.dbstate.db.get_person_from_handle(
family.get_mother_handle())
if f and m:
label = _("%(gramps_id)s : %(father)s and %(mother)s") % {
'father' : _nd.display(f),
'mother' : _nd.display(m),
'gramps_id' : family.gramps_id,
}
elif f:
label = "%(gramps_id)s : %(father)s" % {
'father' : _nd.display(f),
'gramps_id' : family.gramps_id,
}
elif m:
label = "%(gramps_id)s : %(mother)s" % {
'mother' : _nd.display(m),
'gramps_id' : family.gramps_id,
}
else:
# No translation for bare gramps_id
label = "%(gramps_id)s :" % {
'gramps_id' : family.gramps_id,
}
return label
def _createmap_for_one_family(self, family):
"""
Create all markers for one family : all event's places with a lat/lon.
"""
dbstate = self.dbstate
self.message_layer.add_message(_("Family places for %s") % self.family_label(family))
try:
person = dbstate.db.get_person_from_handle(family.get_father_handle())
except:
return
family_id = family.gramps_id
if person is None: # family without father ?
person = dbstate.db.get_person_from_handle(family.get_mother_handle())
if person is None:
person = dbstate.db.get_person_from_handle(self.uistate.get_active('Person'))
if person is not None:
family_list = person.get_family_handle_list()
if len(family_list) > 0:
fhandle = family_list[0] # first is primary
fam = dbstate.db.get_family_from_handle(fhandle)
handle = fam.get_father_handle()
father = dbstate.db.get_person_from_handle(handle)
if father:
comment = _("Father : %(id)s : %(name)s") % {'id': father.gramps_id,
'name': _nd.display(father) }
self._createpersonmarkers(dbstate, father,
comment, family_id)
handle = fam.get_mother_handle()
mother = dbstate.db.get_person_from_handle(handle)
if mother:
comment = _("Mother : %(id)s : %(name)s") % {'id': mother.gramps_id,
'name': _nd.display(mother) }
self._createpersonmarkers(dbstate, mother,
comment, family_id)
index = 0
child_ref_list = fam.get_child_ref_list()
if child_ref_list:
for child_ref in child_ref_list:
child = dbstate.db.get_person_from_handle(child_ref.ref)
if child:
index += 1
comment = _("Child : %(id)s - %(index)d "
": %(name)s") % {
'id' : child.gramps_id,
'index' : index,
'name' : _nd.display(child)
}
self._createpersonmarkers(dbstate, child,
comment, family_id)
else:
comment = _("Person : %(id)s %(name)s has no family.") % {
'id' : person.gramps_id ,
'name' : _nd.display(person)
}
self._createpersonmarkers(dbstate, person, comment, family_id)
def _createmap(self, family_x):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.message_layer.clear_messages()
family = self.dbstate.db.get_family_from_handle(family_x)
if family is None:
person = self.dbstate.db.get_person_from_handle(self.uistate.get_active('Person'))
if not person:
return
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
self._createmap_for_one_family(family)
else:
self._createmap_for_one_family(family)
self.sort = sorted(self.place_list,
key=operator.itemgetter(3, 4, 6)
)
self._create_markers()
def add_event_bubble_message(self, event, lat, lon, mark, menu):
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.show()
menu.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Family"))
modify.show()
modify.connect("activate", self.edit_family, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Person"))
modify.show()
modify.connect("activate", self.edit_person, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event, event, lat, lon, mark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here, event, lat, lon, mark)
itemoption.append(center)
def bubble_message(self, event, lat, lon, marks):
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title("family")
message = ""
oldplace = ""
prevmark = None
for mark in marks:
if message != "":
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon,
prevmark, add_item)
if mark[0] != oldplace:
message = "%s :" % mark[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu, message, mark)
oldplace = mark[0]
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
if date == "":
date = _("Unknown")
if ( mark[5] == EventRoleType.PRIMARY ):
message = "(%s) %s : %s" % ( date, mark[7], mark[1] )
elif ( mark[5] == EventRoleType.FAMILY ):
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
(father_name, mother_name) = self._get_father_and_mother_name(evt)
message = "(%s) %s : %s - %s" % ( date, mark[7], father_name, mother_name )
else:
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
descr = evt.get_description()
if descr == "":
descr = _('No description')
message = "(%s) %s => %s" % ( date, mark[5], descr)
prevmark = mark
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon, prevmark, add_item)
menu.popup(None, None,
lambda menu, data: (event.get_root_coords()[0],
event.get_root_coords()[1], True),
None, event.button, event.time)
return 1
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
return
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Family Filter",),
())
| pmghalvorsen/gramps_branch | gramps/plugins/view/geofamily.py | Python | gpl-2.0 | 19,626 |
"""
.. module: drivers/picam
:platform: Windows
.. moduleauthor:: Daniel R. Dietze <daniel.dietze@berkeley.edu>
Basic interface to PrincetonInstrument's PICam library. It supports most of the standard features
that are provided by PICam. I have decided not to implement a non-blocking version of the image
acquisition in order to keep things clear and simple.
Here is some example code showing the necessary parameters to get 1 kHz readout rates on a PIXIS100::
from picam import *
# initialize camera class and connect to library, look for available camera and connect to first one
cam = picam()
cam.loadLibrary()
cam.getAvailableCameras()
cam.connect()
# this will cool down CCD
cam.setParameter("SensorTemperatureSetPoint", -75)
# shortest expoure
cam.setParameter("ExposureTime", 0)
# readout mode
cam.setParameter("ReadoutControlMode", PicamReadoutControlMode["FullFrame"])
# custom chip settings
cam.setROI(0, 1340, 1, 0, 100, 100)
cam.setParameter("ActiveWidth", 1340)
cam.setParameter("ActiveHeight", 100)
cam.setParameter("ActiveLeftMargin", 0)
cam.setParameter("ActiveRightMargin", 0)
cam.setParameter("ActiveTopMargin", 8)
cam.setParameter("ActiveBottomMargin", 8)
cam.setParameter("VerticalShiftRate", 3.2) # select fastest
# set logic out to not ready
cam.setParameter("OutputSignal", PicamOutputSignal["Busy"])
# shutter delays; open before trigger corresponds to shutter opening pre delay
cam.setParameter("ShutterTimingMode", PicamShutterTimingMode["Normal"])
cam.setParameter("ShutterClosingDelay", 0)
# sensor cleaning
cam.setParameter("CleanSectionFinalHeightCount", 1)
cam.setParameter("CleanSectionFinalHeight", 100)
cam.setParameter("CleanSerialRegister", False)
cam.setParameter("CleanCycleCount", 1)
cam.setParameter("CleanCycleHeight", 100)
cam.setParameter("CleanUntilTrigger", True)
# sensor gain settings
# according to manual, Pixis supports 100kHz and 2MHz; select fastest
cam.setParameter("AdcSpeed", 2.0)
cam.setParameter("AdcAnalogGain", PicamAdcAnalogGain["Low"])
cam.setParameter("AdcQuality", PicamAdcQuality["HighCapacity"])
# trigger and timing settings
cam.setParameter("TriggerDetermination", PicamTriggerDetermination["PositivePolarity"])
cam.setParameter("TriggerResponse", PicamTriggerResponse["ReadoutPerTrigger"])
# send configuration
cam.sendConfiguration()
# get readout speed
print "Estimated readout time = %f ms" % cam.getParameter("ReadoutTimeCalculation")
cam.disconnect()
cam.unloadLibrary()
..
This file is part of the pyFSRS app.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014-2016 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import os
import ctypes
import numpy as np
from picam_types import *
# ##########################################################################################################
# helper functions
def ptr(x):
"""Shortcut to return a ctypes.pointer to object x.
"""
return ctypes.pointer(x)
# ##########################################################################################################
# Camera Class
class picam():
"""Main class that handles all connectivity with library and cameras.
"""
# +++++++++++ CONSTRUCTION / DESTRUCTION ++++++++++++++++++++++++++++++++++++++++++++
def __init__(self):
# empty handle
self.cam = None
self.camIDs = None
self.roisPtr = []
self.pulsePtr = []
self.modPtr = []
self.acqThread = None
self.totalFrameSize = 0
# load picam.dll and initialize library
def loadLibrary(self, pathToLib=""):
"""Loads the picam library ('Picam.dll') and initializes it.
:param str pathToLib: Path to the dynamic link library (optional). If empty, the library is loaded using the path given by the environment variabel *PicamRoot*, which is normally created by the PICam SDK installer.
:returns: Prints the library version to stdout.
"""
if pathToLib == "":
pathToLib = os.path.join(os.environ["PicamRoot"], "Runtime")
pathToLib = os.path.join(pathToLib, "Picam.dll")
self.lib = ctypes.cdll.LoadLibrary(pathToLib)
isconnected = pibln()
self.status(self.lib.Picam_IsLibraryInitialized(ptr(isconnected)))
if not isconnected.value:
self.status(self.lib.Picam_InitializeLibrary())
print self.getLibraryVersion()
# call this function to release any resources and free the library
def unloadLibrary(self):
"""Call this function to release any resources and free the library.
"""
# clean up all reserved memory that may be around
for i in range(len(self.roisPtr)):
self.status(self.lib.Picam_DestroyRois(self.roisPtr[i]))
for i in range(len(self.pulsePtr)):
self.status(self.lib.Picam_DestroyPulses(self.pulsePtr[i]))
for i in range(len(self.modPtr)):
self.status(self.lib.Picam_DestroyModulations(self.modPtr[i]))
# disconnect from camera
self.disconnect()
if isinstance(self.camIDs, list):
for c in self.camIDs:
self.status(self.lib.Picam_DisconnectDemoCamera(ptr(c)))
# free camID resources
if self.camIDs is not None and not isinstance(self.camIDs, list):
self.status(self.lib.Picam_DestroyCameraIDs(self.camIDs))
self.camIDs = None
# unload the library
self.status(self.lib.Picam_UninitializeLibrary())
print "Unloaded PICamSDK"
# +++++++++++ CLASS FUNCTIONS ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get version information
def getLibraryVersion(self):
"""Returns the PICam library version string.
"""
major = piint()
minor = piint()
distr = piint()
released = piint()
self.status(self.lib.Picam_GetVersion(ptr(major), ptr(minor), ptr(distr), ptr(released)))
return "PICam Library Version %d.%d.%d.%d" % (major.value, minor.value, distr.value, released.value)
# returns a list of camera IDs that are connected to the computer
# if no physical camera is found, a demo camera is initialized - for debug only
def getAvailableCameras(self):
"""Queries a list of IDs of cameras that are connected to the computer and prints some sensor information for each camera to stdout.
If no physical camera is found, a demo camera is initialized - *for debug only*.
"""
if self.camIDs is not None and not isinstance(self.camIDs, list):
self.status(self.lib.Picam_DestroyCameraIDs(self.camIDs))
self.camIDs = None
# get connected cameras
self.camIDs = ptr(PicamCameraID())
id_count = piint()
self.status(self.lib.Picam_GetAvailableCameraIDs(ptr(self.camIDs), ptr(id_count)))
# if none are found, create a demo camera
print "Available Cameras:"
if id_count.value < 1:
self.status(self.lib.Picam_DestroyCameraIDs(self.camIDs))
model_array = ptr(piint())
model_count = piint()
self.status(self.lib.Picam_GetAvailableDemoCameraModels(ptr(model_array), ptr(model_count)))
model_ID = PicamCameraID()
serial = ctypes.c_char_p("Demo Cam 1")
self.status(self.lib.Picam_ConnectDemoCamera(model_array[0], serial, ptr(model_ID)))
self.camIDs = [model_ID]
self.status(self.lib.Picam_DestroyModels(model_array))
print ' Model is ', PicamModelLookup[model_ID.model]
print ' Computer interface is ', PicamComputerInterfaceLookup[model_ID.computer_interface]
print ' Sensor_name is ', model_ID.sensor_name
print ' Serial number is', model_ID.serial_number
print '\n'
else:
for i in range(id_count.value):
print ' Model is ', PicamModelLookup[self.camIDs[i].model]
print ' Computer interface is ', PicamComputerInterfaceLookup[self.camIDs[i].computer_interface]
print ' Sensor_name is ', self.camIDs[i].sensor_name
print ' Serial number is', self.camIDs[i].serial_number
print '\n'
# returns string associated with last error
def getLastError(self):
"""Returns the identifier associated with the last error (*str*).
"""
return PicamErrorLookup[self.err]
def status(self, err):
"""Checks the return value of a picam function for any error code. If an error occurred, it prints the error message to stdout.
:param int err: Error code returned by any picam function call.
:returns: Error code (int) and if an error occurred, prints error message.
"""
errstr = PicamErrorLookup[err]
if errstr != "None":
print "ERROR: ", errstr
# raise AssertionError(errstr)
self.err = err
return err
# connect / disconnect camera
# if no camera ID is given, connect to the first available camera
# otherwise camID is an integer index into a list of valid camera IDs that
# has been retrieved by getAvailableCameras()
def connect(self, camID=None):
""" Connect to camera.
:param int camID: Number / index of camera to connect to (optional). It is an integer index into a list of valid camera IDs that has been retrieved by :py:func:`getAvailableCameras`. If camID is None, this functions connects to the first available camera (default).
"""
if self.cam is not None:
self.disconnect()
if camID is None:
self.cam = pivoid()
self.status(self.lib.Picam_OpenFirstCamera(ptr(self.cam)))
else:
self.cam = pivoid()
self.status(self.lib.Picam_OpenCamera(ptr(self.camIDs[camID]), ctypes.addressof(self.cam)))
# invoke commit parameters to validate all parameters for acquisition
self.sendConfiguration()
def disconnect(self):
"""Disconnect current camera.
"""
if self.cam is not None:
self.status(self.lib.Picam_CloseCamera(self.cam))
self.cam = None
def getCurrentCameraID(self):
"""Returns the current camera ID (:py:class:`PicamCameraID`).
"""
id = PicamCameraID()
self.status(self.lib.Picam_GetCameraID(self.cam, ptr(id)))
return id
# prints a list of parameters that are available
def printAvailableParameters(self):
"""Prints an overview over the parameters to stdout that are available for the current camera and their limits.
"""
parameter_array = ptr(piint())
parameter_count = piint()
self.lib.Picam_GetParameters(self.cam, ptr(parameter_array), ptr(parameter_count))
for i in range(parameter_count.value):
# read / write access
access = piint()
self.lib.Picam_GetParameterValueAccess(self.cam, parameter_array[i], ptr(access))
readable = PicamValueAccessLookup[access.value]
# constraints
contype = piint()
self.lib.Picam_GetParameterConstraintType(self.cam, parameter_array[i], ptr(contype))
if PicamConstraintTypeLookup[contype.value] == "None":
constraint = "ALL"
elif PicamConstraintTypeLookup[contype.value] == "Range":
c = ptr(PicamRangeConstraint())
self.lib.Picam_GetParameterRangeConstraint(self.cam, parameter_array[i], PicamConstraintCategory['Capable'], ptr(c))
constraint = "from %f to %f in steps of %f" % (c[0].minimum, c[0].maximum, c[0].increment)
self.lib.Picam_DestroyRangeConstraints(c)
elif PicamConstraintTypeLookup[contype.value] == "Collection":
c = ptr(PicamCollectionConstraint())
self.lib.Picam_GetParameterCollectionConstraint(self.cam, parameter_array[i], PicamConstraintCategory['Capable'], ptr(c))
constraint = ""
for j in range(c[0].values_count):
if constraint != "":
constraint += ", "
constraint += str(c[0].values_array[j])
self.lib.Picam_DestroyCollectionConstraints(c)
elif PicamConstraintTypeLookup[contype.value] == "Rois":
constraint = "N.A."
elif PicamConstraintTypeLookup[contype.value] == "Pulse":
constraint = "N.A."
elif PicamConstraintTypeLookup[contype.value] == "Modulations":
constraint = "N.A."
# print infos
print PicamParameterLookup[parameter_array[i]]
print " value access:", readable
print " allowed values:", constraint
print "\n"
self.lib.Picam_DestroyParameters(parameter_array)
# get / set parameters
# name is a string specifying the parameter
def getParameter(self, name):
"""Reads and returns the value of the parameter with given name. If there is no parameter of this name, the function returns None and prints a warning.
:param str name: Name of the parameter exactly as stated in the PICam SDK manual.
:returns: Value of this parameter with data type corresponding to the type of parameter.
"""
prm = PicamParameter[name]
exists = pibln()
self.lib.Picam_DoesParameterExist(self.cam, prm, ptr(exists))
if not exists.value:
print "Ignoring parameter", name
print " Parameter does not exist for current camera!"
return
# get type of parameter
type = piint()
self.lib.Picam_GetParameterValueType(self.cam, prm, ptr(type))
if type.value not in PicamValueTypeLookup:
print "Not a valid parameter type enumeration:", type.value
print "Ignoring parameter", name
return 0
if PicamValueTypeLookup[type.value] in ["Integer", "Boolean", "Enumeration"]:
val = piint()
# test whether we can read the value directly from hardware
cr = pibln()
self.lib.Picam_CanReadParameter(self.cam, prm, ptr(cr))
if cr.value:
if self.lib.Picam_ReadParameterIntegerValue(self.cam, prm, ptr(val)) == 0:
return val.value
else:
if self.lib.Picam_GetParameterIntegerValue(self.cam, prm, ptr(val)) == 0:
return val.value
if PicamValueTypeLookup[type.value] == "LargeInteger":
val = pi64s()
if self.lib.Picam_GetParameterLargeIntegerValue(self.cam, prm, ptr(val)) == 0:
return val.value
if PicamValueTypeLookup[type.value] == "FloatingPoint":
val = piflt()
# NEW
# test whether we can read the value directly from hardware
cr = pibln()
self.lib.Picam_CanReadParameter(self.cam, prm, ptr(cr))
if cr.value:
if self.lib.Picam_ReadParameterFloatingPointValue(self.cam, prm, ptr(val)) == 0:
return val.value
else:
if self.lib.Picam_GetParameterFloatingPointValue(self.cam, prm, ptr(val)) == 0:
return val.value
if PicamValueTypeLookup[type.value] == "Rois":
val = ptr(PicamRois())
if self.lib.Picam_GetParameterRoisValue(self.cam, prm, ptr(val)) == 0:
self.roisPtr.append(val)
return val.contents
if PicamValueTypeLookup[type.value] == "Pulse":
val = ptr(PicamPulse())
if self.lib.Picam_GetParameterPulseValue(self.cam, prm, ptr(val)) == 0:
self.pulsePtr.append(val)
return val.contents
if PicamValueTypeLookup[type.value] == "Modulations":
val = ptr(PicamModulations())
if self.lib.Picam_GetParameterModulationsValue(self.cam, prm, ptr(val)) == 0:
self.modPtr.append(val)
return val.contents
return None
def setParameter(self, name, value):
"""Set parameter. The value is automatically typecast to the correct data type corresponding to the type of parameter.
.. note:: Setting a parameter with this function does not automatically change the configuration in the camera. In order to apply all changes, :py:func:`sendConfiguration` has to be called.
:param str name: Name of the parameter exactly as stated in the PICam SDK manual.
:param mixed value: New parameter value. If the parameter value cannot be changed, a warning is printed to stdout.
"""
prm = PicamParameter[name]
exists = pibln()
self.lib.Picam_DoesParameterExist(self.cam, prm, ptr(exists))
if not exists:
print "Ignoring parameter", name
print " Parameter does not exist for current camera!"
return
access = piint()
self.lib.Picam_GetParameterValueAccess(self.cam, prm, ptr(access))
if PicamValueAccessLookup[access.value] not in ["ReadWrite", "ReadWriteTrivial"]:
print "Ignoring parameter", name
print " Not allowed to overwrite parameter!"
return
if PicamValueAccessLookup[access.value] == "ReadWriteTrivial":
print "WARNING: Parameter", name, " allows only one value!"
# get type of parameter
type = piint()
self.lib.Picam_GetParameterValueType(self.cam, prm, ptr(type))
if type.value not in PicamValueTypeLookup:
print "Ignoring parameter", name
print " Not a valid parameter type:", type.value
return
if PicamValueTypeLookup[type.value] in ["Integer", "Boolean", "Enumeration"]:
val = piint(value)
self.status(self.lib.Picam_SetParameterIntegerValue(self.cam, prm, val))
if PicamValueTypeLookup[type.value] == "LargeInteger":
val = pi64s(value)
self.status(self.lib.Picam_SetParameterLargeIntegerValue(self.cam, prm, val))
if PicamValueTypeLookup[type.value] == "FloatingPoint":
val = piflt(value)
self.status(self.lib.Picam_SetParameterFloatingPointValue(self.cam, prm, val))
if PicamValueTypeLookup[type.value] == "Rois":
self.status(self.lib.Picam_SetParameterRoisValue(self.cam, prm, ptr(value)))
if PicamValueTypeLookup[type.value] == "Pulse":
self.status(self.lib.Picam_SetParameterPulseValue(self.cam, prm, ptr(value)))
if PicamValueTypeLookup[type.value] == "Modulations":
self.status(self.lib.Picam_SetParameterModulationsValue(self.cam, prm, ptr(value)))
if self.err != PicamError["None"]:
print "Ignoring parameter", name
print " Could not change parameter. Keeping previous value:", self.getParameter(name)
# this function has to be called once all configurations
# are done to apply settings to the camera
def sendConfiguration(self):
"""This function has to be called once all configurations are done to apply settings to the camera.
"""
failed = ptr(piint())
failedCount = piint()
self.status(self.lib.Picam_CommitParameters(self.cam, ptr(failed), ptr(failedCount)))
if failedCount.value > 0:
for i in range(failedCount.value):
print "Could not set parameter", PicamParameterLookup[failed[i]]
self.status(self.lib.Picam_DestroyParameters(failed))
self.updateROIS()
# utility function that extracts the number of pixel sizes of all ROIs
def updateROIS(self):
"""Internally used utility function to extract a list of pixel sizes of ROIs.
"""
self.ROIS = []
rois = self.getParameter("Rois")
self.totalFrameSize = 0
offs = 0
for i in range(rois.roi_count):
w = int(np.ceil(float(rois.roi_array[i].width) / float(rois.roi_array[i].x_binning)))
h = int(np.ceil(float(rois.roi_array[i].height) / float(rois.roi_array[i].y_binning)))
self.ROIS.append((w, h, offs))
offs = offs + w * h
self.totalFrameSize = offs
# set a single ROI
def setROI(self, x0, w, xbin, y0, h, ybin):
"""Create a single region of interest (ROI).
:param int x0: X-coordinate of upper left corner of ROI.
:param int w: Width of ROI.
:param int xbin: X-Binning, i.e. number of columns that are combined into one larger column (1 to w).
:param int y0: Y-coordinate of upper left corner of ROI.
:param int h: Height of ROI.
:param int ybin: Y-Binning, i.e. number of rows that are combined into one larger row (1 to h).
"""
r = PicamRoi(x0, w, xbin, y0, h, ybin)
R = PicamRois(ptr(r), 1)
self.setParameter("Rois", R)
self.updateROIS()
# add a ROI
def addROI(self, x0, w, xbin, y0, h, ybin):
"""Add a region-of-interest to the existing list of ROIs.
.. important:: The ROIs should not overlap! However, this function does not check for overlapping ROIs!
:param int x0: X-coordinate of upper left corner of ROI.
:param int w: Width of ROI.
:param int xbin: X-Binning, i.e. number of columns that are combined into one larger column (1 to w).
:param int y0: Y-coordinate of upper left corner of ROI.
:param int h: Height of ROI.
:param int ybin: Y-Binning, i.e. number of rows that are combined into one larger row (1 to h).
"""
# read existing rois
R = self.getParameter("Rois")
r0 = (PicamRoi * (R.roi_count + 1))()
for i in range(R.roi_count):
r0[i] = R.roi_array[i]
# add new roi
r0[-1] = PicamRoi(x0, w, xbin, y0, h, ybin)
# write back to camera
R1 = PicamRois(ptr(r0[0]), len(r0))
self.setParameter("Rois", R1)
self.updateROIS()
# acquisition functions
# readNFrames waits till all frames have been collected (using Picam_Acquire)
# N = number of frames
# timeout = max wait time between frames in ms
def readNFrames(self, N=1, timeout=100):
"""This function acquires N frames using Picam_Acquire. It waits till all frames have been collected before it returns.
:param int N: Number of frames to collect (>= 1, default=1). This number is essentially limited by the available memory.
:param float timeout: Maximum wait time between frames in milliseconds (default=100). This parameter is important when using external triggering.
:returns: List of acquired frames.
"""
available = PicamAvailableData()
errors = piint()
running = pibln()
self.lib.Picam_IsAcquisitionRunning(self.cam, ptr(running))
if running.value:
print "ERROR: acquisition still running"
return []
# start acquisition
self.status(self.lib.Picam_Acquire(self.cam, pi64s(N), piint(timeout), ptr(available), ptr(errors)))
# return data as numpy array
if available.readout_count >= N:
return self.getBuffer(available.initial_readout, available.readout_count)[0:N]
return []
# this is a helper function that converts a readout buffer into a sequence of numpy arrays
# it reads all available data at once into a numpy buffer and reformats data to fit to the output mask
# size is number of readouts to read
# returns data as floating point
def getBuffer(self, address, size):
"""This is an internally used function to convert the readout buffer into a sequence of numpy arrays.
It reads all available data at once into a numpy buffer and reformats data to a usable format.
:param long address: Memory address where the readout buffer is stored.
:param int size: Number of readouts available in the readout buffer.
:returns: List of ROIS; for each ROI, array of readouts; each readout is a NxM array.
"""
# get number of pixels contained in a single readout and a single frame
# parameters are bytes, a pixel in resulting array is 2 bytes
readoutstride = self.getParameter("ReadoutStride") / 2
framestride = self.getParameter("FrameStride") / 2
frames = self.getParameter("FramesPerReadout")
# create a pointer to data
dataArrayType = pi16u * readoutstride * size
dataArrayPointerType = ctypes.POINTER(dataArrayType)
dataPointer = ctypes.cast(address, dataArrayPointerType)
# create a numpy array from the buffer
data = np.frombuffer(dataPointer.contents, dtype='uint16')
# cast it into a usable format - [frames][data]
data = ((data.reshape(size, readoutstride)[:, :frames * framestride]).reshape(size, frames, framestride)[:, :, :self.totalFrameSize]).reshape(size * frames, self.totalFrameSize).astype(float)
# if there is just a single ROI, we are done
if len(self.ROIS) == 1:
return [data.reshape(size * frames, self.ROIS[0][0], self.ROIS[0][1])]
# otherwise, iterate through rois and add to output list (has to be list due to possibly different sizes)
out = []
for i, r in self.ROIS:
out.append(data[:, r[2]:r[0] * r[1] + r[2]])
return out
if __name__ == '__main__':
cam = picam()
cam.loadLibrary()
cam.getAvailableCameras()
cam.connect()
# cool down CCD
cam.setParameter("SensorTemperatureSetPoint", -75)
# shortest expoure
cam.setParameter("ExposureTime", 0)
# readout mode
cam.setParameter("ReadoutControlMode", PicamReadoutControlMode["FullFrame"])
# custom chip settings
cam.setROI(0, 1340, 1, 0, 100, 100)
cam.setParameter("ActiveWidth", 1340)
cam.setParameter("ActiveHeight", 100)
cam.setParameter("ActiveLeftMargin", 0)
cam.setParameter("ActiveRightMargin", 0)
cam.setParameter("ActiveTopMargin", 8)
cam.setParameter("ActiveBottomMargin", 8)
cam.setParameter("VerticalShiftRate", 3.2) # select fastest
# set logic out to not ready
cam.setParameter("OutputSignal", PicamOutputSignal["Busy"])
# shutter delays; open before trigger corresponds to shutter opening pre delay
cam.setParameter("ShutterTimingMode", PicamShutterTimingMode["Normal"])
cam.setParameter("ShutterClosingDelay", 0)
# sensor cleaning
cam.setParameter("CleanSectionFinalHeightCount", 1)
cam.setParameter("CleanSectionFinalHeight", 100)
cam.setParameter("CleanSerialRegister", False)
cam.setParameter("CleanCycleCount", 1)
cam.setParameter("CleanCycleHeight", 100)
cam.setParameter("CleanUntilTrigger", True)
# sensor gain settings
# according to manual, Pixis supports 100kHz and 2MHz; select fastest
cam.setParameter("AdcSpeed", 2.0)
cam.setParameter("AdcAnalogGain", PicamAdcAnalogGain["Low"])
cam.setParameter("AdcQuality", PicamAdcQuality["HighCapacity"])
# trigger and timing settings
cam.setParameter("TriggerDetermination", PicamTriggerDetermination["PositivePolarity"])
cam.setParameter("TriggerResponse", PicamTriggerResponse["ReadoutPerTrigger"])
# send configuration
cam.sendConfiguration()
# get readout speed
print "Estimated readout time = %f ms" % cam.getParameter("ReadoutTimeCalculation")
cam.disconnect()
cam.unloadLibrary()
| ddietze/pyFSRS | drivers/picam/__init__.py | Python | gpl-3.0 | 29,058 |
import unittest
import Tkinter as tkinter
from Tkinter import TclError
import os
import sys
from test.test_support import requires, run_unittest
from test_ttk.support import (tcl_version, requires_tcl, get_tk_patchlevel,
widget_eq)
from widget_tests import (
add_standard_options, noconv, noconv_meth, int_round, pixels_round,
AbstractWidgetTest, StandardOptionsTests,
IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pad_pixels = noconv_meth
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'],
widget.__class__.__name__.title())
self.checkInvalidParam(widget, 'class', 'Foo',
errmsg="can't modify -class option after widget is created")
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_colormap(self):
widget = self.create()
self.assertEqual(widget['colormap'], '')
self.checkInvalidParam(widget, 'colormap', 'new',
errmsg="can't modify -colormap option after widget is created")
widget2 = self.create(colormap='new')
self.assertEqual(widget2['colormap'], 'new')
def test_container(self):
widget = self.create()
self.assertEqual(widget['container'], 0 if self.wantobjects else '0')
self.checkInvalidParam(widget, 'container', 1,
errmsg="can't modify -container option after widget is created")
widget2 = self.create(container=True)
self.assertEqual(widget2['container'], 1 if self.wantobjects else '1')
def test_visual(self):
widget = self.create()
self.assertEqual(widget['visual'], '')
self.checkInvalidParam(widget, 'visual', 'default',
errmsg="can't modify -visual option after widget is created")
widget2 = self.create(visual='default')
self.assertEqual(widget2['visual'], 'default')
@add_standard_options(StandardOptionsTests)
class ToplevelTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'menu', 'padx', 'pady', 'relief', 'screen',
'takefocus', 'use', 'visual', 'width',
)
def create(self, **kwargs):
return tkinter.Toplevel(self.root, **kwargs)
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(self.root)
self.checkParam(widget, 'menu', menu, eq=widget_eq)
self.checkParam(widget, 'menu', '')
def test_screen(self):
widget = self.create()
self.assertEqual(widget['screen'], '')
try:
display = os.environ['DISPLAY']
except KeyError:
self.skipTest('No $DISPLAY set.')
self.checkInvalidParam(widget, 'screen', display,
errmsg="can't modify -screen option after widget is created")
widget2 = self.create(screen=display)
self.assertEqual(widget2['screen'], display)
def test_use(self):
widget = self.create()
self.assertEqual(widget['use'], '')
parent = self.create(container=True)
wid = parent.winfo_id()
widget2 = self.create(use=wid)
self.assertEqual(int(widget2['use']), wid)
@add_standard_options(StandardOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'relief', 'takefocus', 'visual', 'width',
)
def create(self, **kwargs):
return tkinter.Frame(self.root, **kwargs)
@add_standard_options(StandardOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'labelanchor', 'labelwidget', 'padx', 'pady', 'relief',
'takefocus', 'text', 'visual', 'width',
)
def create(self, **kwargs):
return tkinter.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw',
's', 'se', 'sw', 'w', 'wn', 'ws')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = tkinter.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest, IntegerSizeTests):
_conv_pixels = noconv_meth
def test_highlightthickness(self):
widget = self.create()
self.checkPixelsParam(widget, 'highlightthickness',
0, 1.3, 2.6, 6, -2, '10p')
@add_standard_options(StandardOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'justify', 'padx', 'pady', 'relief', 'state',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
def create(self, **kwargs):
return tkinter.Label(self.root, **kwargs)
@add_standard_options(StandardOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor', 'default',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'justify', 'overrelief', 'padx', 'pady', 'relief',
'repeatdelay', 'repeatinterval',
'state', 'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength')
def create(self, **kwargs):
return tkinter.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'active', 'disabled', 'normal')
@add_standard_options(StandardOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify',
'offrelief', 'offvalue', 'onvalue', 'overrelief',
'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
'takefocus', 'text', 'textvariable',
'tristateimage', 'tristatevalue',
'underline', 'variable', 'width', 'wraplength',
)
def create(self, **kwargs):
return tkinter.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
@add_standard_options(StandardOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify', 'offrelief', 'overrelief',
'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
'takefocus', 'text', 'textvariable',
'tristateimage', 'tristatevalue',
'underline', 'value', 'variable', 'width', 'wraplength',
)
def create(self, **kwargs):
return tkinter.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
@add_standard_options(StandardOptionsTests)
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'compound', 'cursor', 'direction',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify', 'menu',
'padx', 'pady', 'relief', 'state',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = staticmethod(pixels_round)
def create(self, **kwargs):
return tkinter.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'flush', 'left', 'right')
def test_height(self):
widget = self.create()
self.checkIntegerParam(widget, 'height', 100, -100, 0, conv=str)
test_highlightthickness = StandardOptionsTests.test_highlightthickness.im_func
@unittest.skipIf(sys.platform == 'darwin',
'crashes with Cocoa Tk (issue19733)')
def test_image(self):
widget = self.create()
image = tkinter.PhotoImage(master=self.root, name='image1')
self.checkParam(widget, 'image', image, conv=str)
errmsg = 'image "spam" doesn\'t exist'
with self.assertRaises(tkinter.TclError) as cm:
widget['image'] = 'spam'
if errmsg is not None:
self.assertEqual(str(cm.exception), errmsg)
with self.assertRaises(tkinter.TclError) as cm:
widget.configure({'image': 'spam'})
if errmsg is not None:
self.assertEqual(str(cm.exception), errmsg)
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, eq=widget_eq)
menu.destroy()
def test_padx(self):
widget = self.create()
self.checkPixelsParam(widget, 'padx', 3, 4.4, 5.6, '12m')
self.checkParam(widget, 'padx', -2, expected=0)
def test_pady(self):
widget = self.create()
self.checkPixelsParam(widget, 'pady', 3, 4.4, 5.6, '12m')
self.checkParam(widget, 'pady', -2, expected=0)
def test_width(self):
widget = self.create()
self.checkIntegerParam(widget, 'width', 402, -402, 0, conv=str)
class OptionMenuTest(MenubuttonTest, unittest.TestCase):
def create(self, default='b', values=('a', 'b', 'c'), **kwargs):
return tkinter.OptionMenu(self.root, None, default, *values, **kwargs)
@add_standard_options(IntegerSizeTests, StandardOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth', 'cursor',
'disabledbackground', 'disabledforeground',
'exportselection', 'font', 'foreground',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'invalidcommand', 'justify', 'readonlybackground', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'show', 'state', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def create(self, **kwargs):
return tkinter.Entry(self.root, **kwargs)
def test_disabledbackground(self):
widget = self.create()
self.checkColorParam(widget, 'disabledbackground')
def test_insertborderwidth(self):
widget = self.create(insertwidth=100)
self.checkPixelsParam(widget, 'insertborderwidth',
0, 1.3, 2.6, 6, -2, '10p')
# insertborderwidth is bounded above by a half of insertwidth.
self.checkParam(widget, 'insertborderwidth', 60, expected=100//2)
def test_insertwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'insertwidth', 1.3, 3.6, '10p')
self.checkParam(widget, 'insertwidth', 0.1, expected=2)
self.checkParam(widget, 'insertwidth', -2, expected=2)
if pixels_round(0.9) <= 0:
self.checkParam(widget, 'insertwidth', 0.9, expected=2)
else:
self.checkParam(widget, 'insertwidth', 0.9, expected=1)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
self.checkCommandParam(widget, 'invcmd')
def test_readonlybackground(self):
widget = self.create()
self.checkColorParam(widget, 'readonlybackground')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
self.checkCommandParam(widget, 'vcmd')
@add_standard_options(StandardOptionsTests)
class SpinboxTest(EntryTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'background', 'borderwidth',
'buttonbackground', 'buttoncursor', 'buttondownrelief', 'buttonuprelief',
'command', 'cursor', 'disabledbackground', 'disabledforeground',
'exportselection', 'font', 'foreground', 'format', 'from',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'increment',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'invalidcommand', 'justify', 'relief', 'readonlybackground',
'repeatdelay', 'repeatinterval',
'selectbackground', 'selectborderwidth', 'selectforeground',
'state', 'takefocus', 'textvariable', 'to',
'validate', 'validatecommand', 'values',
'width', 'wrap', 'xscrollcommand',
)
def create(self, **kwargs):
return tkinter.Spinbox(self.root, **kwargs)
test_show = None
def test_buttonbackground(self):
widget = self.create()
self.checkColorParam(widget, 'buttonbackground')
def test_buttoncursor(self):
widget = self.create()
self.checkCursorParam(widget, 'buttoncursor')
def test_buttondownrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'buttondownrelief')
def test_buttonuprelief(self):
widget = self.create()
self.checkReliefParam(widget, 'buttonuprelief')
def test_format(self):
widget = self.create()
self.checkParam(widget, 'format', '%2f')
self.checkParam(widget, 'format', '%2.2f')
self.checkParam(widget, 'format', '%.2f')
self.checkParam(widget, 'format', '%2.f')
self.checkInvalidParam(widget, 'format', '%2e-1f')
self.checkInvalidParam(widget, 'format', '2.2')
self.checkInvalidParam(widget, 'format', '%2.-2f')
self.checkParam(widget, 'format', '%-2.02f')
self.checkParam(widget, 'format', '% 2.02f')
self.checkParam(widget, 'format', '% -2.200f')
self.checkParam(widget, 'format', '%09.200f')
self.checkInvalidParam(widget, 'format', '%d')
def test_from(self):
widget = self.create()
self.checkParam(widget, 'to', 100.0)
self.checkFloatParam(widget, 'from', -10, 10.2, 11.7)
self.checkInvalidParam(widget, 'from', 200,
errmsg='-to value must be greater than -from value')
def test_increment(self):
widget = self.create()
self.checkFloatParam(widget, 'increment', -1, 1, 10.2, 12.8, 0)
def test_to(self):
widget = self.create()
self.checkParam(widget, 'from', -100.0)
self.checkFloatParam(widget, 'to', -10, 10.2, 11.7)
self.checkInvalidParam(widget, 'to', -200,
errmsg='-to value must be greater than -from value')
def test_values(self):
# XXX
widget = self.create()
self.assertEqual(widget['values'], '')
self.checkParam(widget, 'values', 'mon tue wed thur')
self.checkParam(widget, 'values', ('mon', 'tue', 'wed', 'thur'),
expected='mon tue wed thur')
self.checkParam(widget, 'values', (42, 3.14, '', 'any string'),
expected='42 3.14 {} {any string}')
self.checkParam(widget, 'values', '')
def test_wrap(self):
widget = self.create()
self.checkBooleanParam(widget, 'wrap')
def test_bbox(self):
widget = self.create()
self.assertIsBoundingBox(widget.bbox(0))
self.assertRaises(tkinter.TclError, widget.bbox, 'noindex')
self.assertRaises(tkinter.TclError, widget.bbox, None)
self.assertRaises(TypeError, widget.bbox)
self.assertRaises(TypeError, widget.bbox, 0, 1)
@add_standard_options(StandardOptionsTests)
class TextTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'autoseparators', 'background', 'blockcursor', 'borderwidth',
'cursor', 'endline', 'exportselection',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'inactiveselectbackground', 'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertunfocussed', 'insertwidth',
'maxundo', 'padx', 'pady', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'setgrid', 'spacing1', 'spacing2', 'spacing3', 'startline', 'state',
'tabs', 'tabstyle', 'takefocus', 'undo', 'width', 'wrap',
'xscrollcommand', 'yscrollcommand',
)
if tcl_version < (8, 5):
_stringify = True
def create(self, **kwargs):
return tkinter.Text(self.root, **kwargs)
def test_autoseparators(self):
widget = self.create()
self.checkBooleanParam(widget, 'autoseparators')
@requires_tcl(8, 5)
def test_blockcursor(self):
widget = self.create()
self.checkBooleanParam(widget, 'blockcursor')
@requires_tcl(8, 5)
def test_endline(self):
widget = self.create()
text = '\n'.join('Line %d' for i in range(100))
widget.insert('end', text)
self.checkParam(widget, 'endline', 200, expected='')
self.checkParam(widget, 'endline', -10, expected='')
self.checkInvalidParam(widget, 'endline', 'spam',
errmsg='expected integer but got "spam"')
self.checkParam(widget, 'endline', 50)
self.checkParam(widget, 'startline', 15)
self.checkInvalidParam(widget, 'endline', 10,
errmsg='-startline must be less than or equal to -endline')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, '3c')
self.checkParam(widget, 'height', -100, expected=1)
self.checkParam(widget, 'height', 0, expected=1)
def test_maxundo(self):
widget = self.create()
self.checkIntegerParam(widget, 'maxundo', 0, 5, -1)
@requires_tcl(8, 5)
def test_inactiveselectbackground(self):
widget = self.create()
self.checkColorParam(widget, 'inactiveselectbackground')
@requires_tcl(8, 6)
def test_insertunfocussed(self):
widget = self.create()
self.checkEnumParam(widget, 'insertunfocussed',
'hollow', 'none', 'solid')
def test_selectborderwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'selectborderwidth',
1.3, 2.6, -2, '10p', conv=noconv,
keep_orig=tcl_version >= (8, 5))
def test_spacing1(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing1', 20, 21.4, 22.6, '0.5c')
self.checkParam(widget, 'spacing1', -5, expected=0)
def test_spacing2(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing2', 5, 6.4, 7.6, '0.1c')
self.checkParam(widget, 'spacing2', -1, expected=0)
def test_spacing3(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing3', 20, 21.4, 22.6, '0.5c')
self.checkParam(widget, 'spacing3', -10, expected=0)
@requires_tcl(8, 5)
def test_startline(self):
widget = self.create()
text = '\n'.join('Line %d' for i in range(100))
widget.insert('end', text)
self.checkParam(widget, 'startline', 200, expected='')
self.checkParam(widget, 'startline', -10, expected='')
self.checkInvalidParam(widget, 'startline', 'spam',
errmsg='expected integer but got "spam"')
self.checkParam(widget, 'startline', 10)
self.checkParam(widget, 'endline', 50)
self.checkInvalidParam(widget, 'startline', 70,
errmsg='-startline must be less than or equal to -endline')
def test_state(self):
widget = self.create()
if tcl_version < (8, 5):
self.checkParams(widget, 'state', 'disabled', 'normal')
else:
self.checkEnumParam(widget, 'state', 'disabled', 'normal')
def test_tabs(self):
widget = self.create()
if get_tk_patchlevel() < (8, 5, 11):
self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'),
expected=('10.2', '20.7', '1i', '2i'))
else:
self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'))
self.checkParam(widget, 'tabs', '10.2 20.7 1i 2i',
expected=('10.2', '20.7', '1i', '2i'))
self.checkParam(widget, 'tabs', '2c left 4c 6c center',
expected=('2c', 'left', '4c', '6c', 'center'))
self.checkInvalidParam(widget, 'tabs', 'spam',
errmsg='bad screen distance "spam"',
keep_orig=tcl_version >= (8, 5))
@requires_tcl(8, 5)
def test_tabstyle(self):
widget = self.create()
self.checkEnumParam(widget, 'tabstyle', 'tabular', 'wordprocessor')
def test_undo(self):
widget = self.create()
self.checkBooleanParam(widget, 'undo')
def test_width(self):
widget = self.create()
self.checkIntegerParam(widget, 'width', 402)
self.checkParam(widget, 'width', -402, expected=1)
self.checkParam(widget, 'width', 0, expected=1)
def test_wrap(self):
widget = self.create()
if tcl_version < (8, 5):
self.checkParams(widget, 'wrap', 'char', 'none', 'word')
else:
self.checkEnumParam(widget, 'wrap', 'char', 'none', 'word')
def test_bbox(self):
widget = self.create()
self.assertIsBoundingBox(widget.bbox('1.1'))
self.assertIsNone(widget.bbox('end'))
self.assertRaises(tkinter.TclError, widget.bbox, 'noindex')
self.assertRaises(tkinter.TclError, widget.bbox, None)
self.assertRaises(tkinter.TclError, widget.bbox)
self.assertRaises(tkinter.TclError, widget.bbox, '1.1', 'end')
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class CanvasTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'closeenough', 'confine', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'relief', 'scrollregion',
'selectbackground', 'selectborderwidth', 'selectforeground',
'state', 'takefocus',
'xscrollcommand', 'xscrollincrement',
'yscrollcommand', 'yscrollincrement', 'width',
)
_conv_pixels = staticmethod(int_round)
_stringify = True
def create(self, **kwargs):
return tkinter.Canvas(self.root, **kwargs)
def test_closeenough(self):
widget = self.create()
self.checkFloatParam(widget, 'closeenough', 24, 2.4, 3.6, -3,
conv=float)
def test_confine(self):
widget = self.create()
self.checkBooleanParam(widget, 'confine')
def test_scrollregion(self):
widget = self.create()
self.checkParam(widget, 'scrollregion', '0 0 200 150')
self.checkParam(widget, 'scrollregion', (0, 0, 200, 150),
expected='0 0 200 150')
self.checkParam(widget, 'scrollregion', '')
self.checkInvalidParam(widget, 'scrollregion', 'spam',
errmsg='bad scrollRegion "spam"')
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 'spam'))
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200))
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 150, 0))
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state', 'disabled', 'normal',
errmsg='bad state value "{}": must be normal or disabled')
def test_xscrollincrement(self):
widget = self.create()
self.checkPixelsParam(widget, 'xscrollincrement',
40, 0, 41.2, 43.6, -40, '0.5i')
def test_yscrollincrement(self):
widget = self.create()
self.checkPixelsParam(widget, 'yscrollincrement',
10, 0, 11.2, 13.6, -10, '0.1i')
@add_standard_options(IntegerSizeTests, StandardOptionsTests)
class ListboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activestyle', 'background', 'borderwidth', 'cursor',
'disabledforeground', 'exportselection',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'listvariable', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'selectmode', 'setgrid', 'state',
'takefocus', 'width', 'xscrollcommand', 'yscrollcommand',
)
def create(self, **kwargs):
return tkinter.Listbox(self.root, **kwargs)
def test_activestyle(self):
widget = self.create()
self.checkEnumParam(widget, 'activestyle',
'dotbox', 'none', 'underline')
def test_listvariable(self):
widget = self.create()
var = tkinter.DoubleVar(self.root)
self.checkVariableParam(widget, 'listvariable', var)
def test_selectmode(self):
widget = self.create()
self.checkParam(widget, 'selectmode', 'single')
self.checkParam(widget, 'selectmode', 'browse')
self.checkParam(widget, 'selectmode', 'multiple')
self.checkParam(widget, 'selectmode', 'extended')
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state', 'disabled', 'normal')
def test_itemconfigure(self):
widget = self.create()
with self.assertRaisesRegexp(TclError, 'item number "0" out of range'):
widget.itemconfigure(0)
colors = 'red orange yellow green blue white violet'.split()
widget.insert('end', *colors)
for i, color in enumerate(colors):
widget.itemconfigure(i, background=color)
with self.assertRaises(TypeError):
widget.itemconfigure()
with self.assertRaisesRegexp(TclError, 'bad listbox index "red"'):
widget.itemconfigure('red')
self.assertEqual(widget.itemconfigure(0, 'background'),
('background', 'background', 'Background', '', 'red'))
self.assertEqual(widget.itemconfigure('end', 'background'),
('background', 'background', 'Background', '', 'violet'))
self.assertEqual(widget.itemconfigure('@0,0', 'background'),
('background', 'background', 'Background', '', 'red'))
d = widget.itemconfigure(0)
self.assertIsInstance(d, dict)
for k, v in d.items():
self.assertIn(len(v), (2, 5))
if len(v) == 5:
self.assertEqual(v, widget.itemconfigure(0, k))
self.assertEqual(v[4], widget.itemcget(0, k))
def check_itemconfigure(self, name, value):
widget = self.create()
widget.insert('end', 'a', 'b', 'c', 'd')
widget.itemconfigure(0, **{name: value})
self.assertEqual(widget.itemconfigure(0, name)[4], value)
self.assertEqual(widget.itemcget(0, name), value)
with self.assertRaisesRegexp(TclError, 'unknown color name "spam"'):
widget.itemconfigure(0, **{name: 'spam'})
def test_itemconfigure_background(self):
self.check_itemconfigure('background', '#ff0000')
def test_itemconfigure_bg(self):
self.check_itemconfigure('bg', '#ff0000')
def test_itemconfigure_fg(self):
self.check_itemconfigure('fg', '#110022')
def test_itemconfigure_foreground(self):
self.check_itemconfigure('foreground', '#110022')
def test_itemconfigure_selectbackground(self):
self.check_itemconfigure('selectbackground', '#110022')
def test_itemconfigure_selectforeground(self):
self.check_itemconfigure('selectforeground', '#654321')
def test_box(self):
lb = self.create()
lb.insert(0, *('el%d' % i for i in range(8)))
lb.pack()
self.assertIsBoundingBox(lb.bbox(0))
self.assertIsNone(lb.bbox(-1))
self.assertIsNone(lb.bbox(10))
self.assertRaises(TclError, lb.bbox, 'noindex')
self.assertRaises(TclError, lb.bbox, None)
self.assertRaises(TypeError, lb.bbox)
self.assertRaises(TypeError, lb.bbox, 0, 1)
def test_curselection(self):
lb = self.create()
lb.insert(0, *('el%d' % i for i in range(8)))
lb.selection_clear(0, tkinter.END)
lb.selection_set(2, 4)
lb.selection_set(6)
self.assertEqual(lb.curselection(), (2, 3, 4, 6))
self.assertRaises(TypeError, lb.curselection, 0)
def test_get(self):
lb = self.create()
lb.insert(0, *('el%d' % i for i in range(8)))
self.assertEqual(lb.get(0), 'el0')
self.assertEqual(lb.get(3), 'el3')
self.assertEqual(lb.get('end'), 'el7')
self.assertEqual(lb.get(8), '')
self.assertEqual(lb.get(-1), '')
self.assertEqual(lb.get(3, 5), ('el3', 'el4', 'el5'))
self.assertEqual(lb.get(5, 'end'), ('el5', 'el6', 'el7'))
self.assertEqual(lb.get(5, 0), ())
self.assertEqual(lb.get(0, 0), ('el0',))
self.assertRaises(TclError, lb.get, 'noindex')
self.assertRaises(TclError, lb.get, None)
self.assertRaises(TypeError, lb.get)
self.assertRaises(TclError, lb.get, 'end', 'noindex')
self.assertRaises(TypeError, lb.get, 1, 2, 3)
self.assertRaises(TclError, lb.get, 2.4)
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'background', 'bigincrement', 'borderwidth',
'command', 'cursor', 'digits', 'font', 'foreground', 'from',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'label', 'length', 'orient', 'relief',
'repeatdelay', 'repeatinterval',
'resolution', 'showvalue', 'sliderlength', 'sliderrelief', 'state',
'takefocus', 'tickinterval', 'to', 'troughcolor', 'variable', 'width',
)
default_orient = 'vertical'
def create(self, **kwargs):
return tkinter.Scale(self.root, **kwargs)
def test_bigincrement(self):
widget = self.create()
self.checkFloatParam(widget, 'bigincrement', 12.4, 23.6, -5)
def test_digits(self):
widget = self.create()
self.checkIntegerParam(widget, 'digits', 5, 0)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=round)
def test_label(self):
widget = self.create()
self.checkParam(widget, 'label', 'any string')
self.checkParam(widget, 'label', '')
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_resolution(self):
widget = self.create()
self.checkFloatParam(widget, 'resolution', 4.2, 0, 6.7, -2)
def test_showvalue(self):
widget = self.create()
self.checkBooleanParam(widget, 'showvalue')
def test_sliderlength(self):
widget = self.create()
self.checkPixelsParam(widget, 'sliderlength',
10, 11.2, 15.6, -3, '3m')
def test_sliderrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'sliderrelief')
def test_tickinterval(self):
widget = self.create()
self.checkFloatParam(widget, 'tickinterval', 1, 4.3, 7.6, 0,
conv=round)
self.checkParam(widget, 'tickinterval', -2, expected=2,
conv=round)
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10,
conv=round)
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activerelief',
'background', 'borderwidth',
'command', 'cursor', 'elementborderwidth',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'jump', 'orient', 'relief',
'repeatdelay', 'repeatinterval',
'takefocus', 'troughcolor', 'width',
)
_conv_pixels = staticmethod(int_round)
_stringify = True
default_orient = 'vertical'
def create(self, **kwargs):
return tkinter.Scrollbar(self.root, **kwargs)
def test_activerelief(self):
widget = self.create()
self.checkReliefParam(widget, 'activerelief')
def test_elementborderwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'elementborderwidth', 4.3, 5.6, -2, '1m')
def test_orient(self):
widget = self.create()
self.checkEnumParam(widget, 'orient', 'vertical', 'horizontal',
errmsg='bad orientation "{}": must be vertical or horizontal')
def test_activate(self):
sb = self.create()
for e in ('arrow1', 'slider', 'arrow2'):
sb.activate(e)
sb.activate('')
self.assertRaises(TypeError, sb.activate)
self.assertRaises(TypeError, sb.activate, 'arrow1', 'arrow2')
def test_set(self):
sb = self.create()
sb.set(0.2, 0.4)
self.assertEqual(sb.get(), (0.2, 0.4))
self.assertRaises(TclError, sb.set, 'abc', 'def')
self.assertRaises(TclError, sb.set, 0.6, 'def')
self.assertRaises(TclError, sb.set, 0.6, None)
self.assertRaises(TclError, sb.set, 0.6)
self.assertRaises(TclError, sb.set, 0.6, 0.7, 0.8)
@add_standard_options(StandardOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth', 'cursor',
'handlepad', 'handlesize', 'height',
'opaqueresize', 'orient', 'relief',
'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
'showhandle', 'width',
)
default_orient = 'horizontal'
def create(self, **kwargs):
return tkinter.PanedWindow(self.root, **kwargs)
def test_handlepad(self):
widget = self.create()
self.checkPixelsParam(widget, 'handlepad', 5, 6.4, 7.6, -3, '1m')
def test_handlesize(self):
widget = self.create()
self.checkPixelsParam(widget, 'handlesize', 8, 9.4, 10.6, -3, '2m',
conv=noconv)
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i',
conv=noconv)
def test_opaqueresize(self):
widget = self.create()
self.checkBooleanParam(widget, 'opaqueresize')
def test_sashcursor(self):
widget = self.create()
self.checkCursorParam(widget, 'sashcursor')
def test_sashpad(self):
widget = self.create()
self.checkPixelsParam(widget, 'sashpad', 8, 1.3, 2.6, -2, '2m')
def test_sashrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'sashrelief')
def test_sashwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'sashwidth', 10, 11.1, 15.6, -3, '1m',
conv=noconv)
def test_showhandle(self):
widget = self.create()
self.checkBooleanParam(widget, 'showhandle')
def test_width(self):
widget = self.create()
self.checkPixelsParam(widget, 'width', 402, 403.4, 404.6, -402, 0, '5i',
conv=noconv)
def create2(self):
p = self.create()
b = tkinter.Button(p)
c = tkinter.Button(p)
p.add(b)
p.add(c)
return p, b, c
def test_paneconfigure(self):
p, b, c = self.create2()
self.assertRaises(TypeError, p.paneconfigure)
d = p.paneconfigure(b)
self.assertIsInstance(d, dict)
for k, v in d.items():
self.assertEqual(len(v), 5)
self.assertEqual(v, p.paneconfigure(b, k))
self.assertEqual(v[4], p.panecget(b, k))
def check_paneconfigure(self, p, b, name, value, expected, stringify=False):
conv = lambda x: x
if not self.wantobjects or stringify:
expected = str(expected)
if self.wantobjects and stringify:
conv = str
p.paneconfigure(b, **{name: value})
self.assertEqual(conv(p.paneconfigure(b, name)[4]), expected)
self.assertEqual(conv(p.panecget(b, name)), expected)
def check_paneconfigure_bad(self, p, b, name, msg):
with self.assertRaisesRegexp(TclError, msg):
p.paneconfigure(b, **{name: 'badValue'})
def test_paneconfigure_after(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'after', c, str(c))
self.check_paneconfigure_bad(p, b, 'after',
'bad window path name "badValue"')
def test_paneconfigure_before(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'before', c, str(c))
self.check_paneconfigure_bad(p, b, 'before',
'bad window path name "badValue"')
def test_paneconfigure_height(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'height', 10, 10,
stringify=tcl_version < (8, 5))
self.check_paneconfigure_bad(p, b, 'height',
'bad screen distance "badValue"')
@requires_tcl(8, 5)
def test_paneconfigure_hide(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'hide', False, 0)
self.check_paneconfigure_bad(p, b, 'hide',
'expected boolean value but got "badValue"')
def test_paneconfigure_minsize(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'minsize', 10, 10)
self.check_paneconfigure_bad(p, b, 'minsize',
'bad screen distance "badValue"')
def test_paneconfigure_padx(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'padx', 1.3, 1)
self.check_paneconfigure_bad(p, b, 'padx',
'bad screen distance "badValue"')
def test_paneconfigure_pady(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'pady', 1.3, 1)
self.check_paneconfigure_bad(p, b, 'pady',
'bad screen distance "badValue"')
def test_paneconfigure_sticky(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'sticky', 'nsew', 'nesw')
self.check_paneconfigure_bad(p, b, 'sticky',
'bad stickyness value "badValue": must '
'be a string containing zero or more of '
'n, e, s, and w')
@requires_tcl(8, 5)
def test_paneconfigure_stretch(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'stretch', 'alw', 'always')
self.check_paneconfigure_bad(p, b, 'stretch',
'bad stretch "badValue": must be '
'always, first, last, middle, or never')
def test_paneconfigure_width(self):
p, b, c = self.create2()
self.check_paneconfigure(p, b, 'width', 10, 10,
stringify=tcl_version < (8, 5))
self.check_paneconfigure_bad(p, b, 'width',
'bad screen distance "badValue"')
@add_standard_options(StandardOptionsTests)
class MenuTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeborderwidth', 'activeforeground',
'background', 'borderwidth', 'cursor',
'disabledforeground', 'font', 'foreground',
'postcommand', 'relief', 'selectcolor', 'takefocus',
'tearoff', 'tearoffcommand', 'title', 'type',
)
_conv_pixels = noconv_meth
def create(self, **kwargs):
return tkinter.Menu(self.root, **kwargs)
def test_postcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'postcommand')
def test_tearoff(self):
widget = self.create()
self.checkBooleanParam(widget, 'tearoff')
def test_tearoffcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'tearoffcommand')
def test_title(self):
widget = self.create()
self.checkParam(widget, 'title', 'any string')
def test_type(self):
widget = self.create()
self.checkEnumParam(widget, 'type',
'normal', 'tearoff', 'menubar')
def test_entryconfigure(self):
m1 = self.create()
m1.add_command(label='test')
self.assertRaises(TypeError, m1.entryconfigure)
with self.assertRaisesRegexp(TclError, 'bad menu entry index "foo"'):
m1.entryconfigure('foo')
d = m1.entryconfigure(1)
self.assertIsInstance(d, dict)
for k, v in d.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, tuple)
self.assertEqual(len(v), 5)
self.assertEqual(v[0], k)
self.assertEqual(m1.entrycget(1, k), v[4])
m1.destroy()
def test_entryconfigure_label(self):
m1 = self.create()
m1.add_command(label='test')
self.assertEqual(m1.entrycget(1, 'label'), 'test')
m1.entryconfigure(1, label='changed')
self.assertEqual(m1.entrycget(1, 'label'), 'changed')
def test_entryconfigure_variable(self):
m1 = self.create()
v1 = tkinter.BooleanVar(self.root)
v2 = tkinter.BooleanVar(self.root)
m1.add_checkbutton(variable=v1, onvalue=True, offvalue=False,
label='Nonsense')
self.assertEqual(str(m1.entrycget(1, 'variable')), str(v1))
m1.entryconfigure(1, variable=v2)
self.assertEqual(str(m1.entrycget(1, 'variable')), str(v2))
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class MessageTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'anchor', 'aspect', 'background', 'borderwidth',
'cursor', 'font', 'foreground',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'justify', 'padx', 'pady', 'relief',
'takefocus', 'text', 'textvariable', 'width',
)
_conv_pad_pixels = noconv_meth
def create(self, **kwargs):
return tkinter.Message(self.root, **kwargs)
def test_aspect(self):
widget = self.create()
self.checkIntegerParam(widget, 'aspect', 250, 0, -300)
tests_gui = [
ButtonTest, CanvasTest, CheckbuttonTest, EntryTest,
FrameTest, LabelFrameTest,LabelTest, ListboxTest,
MenubuttonTest, MenuTest, MessageTest, OptionMenuTest,
PanedWindowTest, RadiobuttonTest, ScaleTest, ScrollbarTest,
SpinboxTest, TextTest, ToplevelTest,
]
if __name__ == '__main__':
run_unittest(*tests_gui)
| sdlBasic/sdlbrt | win32/mingw/opt/lib/python2.7/lib-tk/test/test_tkinter/test_widgets.py | Python | lgpl-2.1 | 45,690 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='base.html')),
url(r'^api/', include('todos.urls')),
# Examples:
# url(r'^$', 'todolists.views.home', name='home'),
# url(r'^todolists/', include('todolists.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| danjac/django-angular-tasks | todolists/todolists/urls.py | Python | mit | 719 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.