code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
class Arco():
def __init__(self, origem, destino, valor):
self.valor = valor
self.vertices = (origem, destino)
def __hash__(self):
return hash(self.vertices + (self.valor,))
def __eq__(self, arco):
return (self.valor,) + self.vertices == (arco.valor,) + arco.vertices
def __repr__(self):
return 'Arco({!r}, {!r}, {!r})'.format(self.vertices[0], self.vertices[1], self.valor)
def oposto(self,oposto):
if oposto==self.vertices[0]:
return self.vertices[1]
else:
return self.vertices[0]
class Grafo():
def __init__(self):
self.vertice=tuple()
self.arco=tuple()
def vertices(self):
return self.vertice
def adicionar_vertice(self,ver):
self.vertice=self.vertice+(ver,)
def adicionar_arco(self,arc):
self.arco=self.arco+(arc,)
def arcos(self,ver):
resposta=tuple()
for x in self.arco:
if ver in x.vertices:
resposta=resposta+(x,)
return resposta
def adjacencias(self,ver):
resposta=tuple()
for x in self.arco:
if ver in x.vertices:
resposta=resposta+(x.oposto(ver),)
return resposta
def caminho(self,ver1,ver2):
resposta=[]
if ver1 is ver2:
resposta.append(ver1)
return resposta
elif self.arcos(ver1)==tuple():
return resposta
else:
vez=ver1
while(vez is not ver2):
if self.arcos(vez)==tuple():
break
for i in self.arcos(vez):
if i.oposto(vez) not in resposta:
resposta.append(vez)
vez=i.oposto(vez)
break
resposta.append(vez)
return resposta
def calcular_melhores_caminhos_partindo_de(self,ver):
resposta={}
visitados=[ver]
distancia={ver:0}
caminho={ver:[ver]}
vez=ver
while(len(visitados)<len(self.vertice)):
for d in self.arcos(vez):
if d.oposto(vez) not in visitados:
if d.oposto(vez) not in distancia.keys():
distancia[d.oposto(vez)]=distancia[vez]+d.valor
else:
if distancia[d.oposto(vez)]>distancia[vez]+d.valor:
distancia[d.oposto(vez)]=distancia[vez]+d.valor
menor=0
ponto=None
for a,b in distancia.items():
if a not in visitados:
menor=b
ponto=a
break
for a,b in distancia.items():
if a not in visitados:
if b<menor:
menor=b
ponto=a
if ponto not in visitados:
visitados.append(ponto)
vez=ponto
for c in self.arcos(vez):
if c.oposto(ponto) in visitados:
menor=distancia[c.oposto(vez)]+c.valor
break
val=0
for c in self.arcos(vez):
if c.oposto(vez) in visitados:
if distancia[c.oposto(vez)]+c.valor<=menor:
menor=distancia[c.oposto(vez)]+c.valor
ponto=c.oposto(vez)
val=c.valor
caminho[vez]=caminho[ponto]+[val,vez]
#juntar
for k,l in distancia.items():
resposta[k]=tuple()
resposta[k]=resposta[k]+(l,caminho[k])
return resposta
import unittest
class ArcoTestes(unittest.TestCase):
def teste_init(self):
arco = Arco('origem', 'destino', 1)
self.assertTupleEqual(('origem', 'destino'), arco.vertices)
self.assertEqual(1, arco.valor)
def teste_oposto(self):
arco = Arco('origem', 'destino', 1)
self.assertEqual('origem', arco.oposto('destino'))
self.assertEqual('destino', arco.oposto('origem'))
# Dados a serem usados nos testes
# Dados de vérticos
bertioga = 'Bertioga'
caragua = 'Caragua'
jacarei = 'Jacareí'
mogi = 'Mogi da Cruzes'
santos = 'Santos'
sjc = 'São José dos Campos'
sao_paulo = 'São Paulo'
taubate = 'Taubaté'
vertices_cidades = (taubate,
caragua,
jacarei,
mogi,
sjc,
sao_paulo,
bertioga,
santos)
# Dados de arcos
arco_tauba_sjc = Arco(taubate, sjc, 43900)
arco_scj_jaca = Arco(sjc, jacarei, 13200)
arco_scj_caragua = Arco(sjc, caragua, 86900)
arco_caragua_bertioga = Arco(caragua, bertioga, 114000)
arco_bertioga_mogi = Arco(bertioga, mogi, 48700)
arco_mogi_jaca = Arco(mogi, jacarei, 54300)
arco_mogi_sp = Arco(mogi, sao_paulo, 61900)
arco_jaca_sp = Arco(jacarei, sao_paulo, 81800)
arco_santos_sp = Arco(santos, sao_paulo, 72800)
arco_santos_bertioga = Arco(santos, bertioga, 74400)
arcos_distancias = (arco_tauba_sjc,
arco_scj_jaca,
arco_scj_caragua,
arco_caragua_bertioga,
arco_bertioga_mogi,
arco_mogi_jaca,
arco_mogi_sp,
arco_jaca_sp,
arco_santos_sp,
arco_santos_bertioga)
class GrafoTestes(unittest.TestCase):
def teste_adicionar_vertice(self):
grafo = Grafo()
self.assert_mesmo_elementos(tuple(), grafo.vertices())
grafo.adicionar_vertice(santos)
self.assert_mesmo_elementos((santos,), grafo.vertices())
grafo.adicionar_vertice(jacarei)
self.assert_mesmo_elementos((santos, jacarei), grafo.vertices())
grafo.adicionar_vertice(mogi)
self.assert_mesmo_elementos((santos, jacarei, mogi), grafo.vertices())
grafo.adicionar_vertice(caragua)
self.assert_mesmo_elementos((santos, jacarei, mogi, caragua), grafo.vertices())
def teste_adicionar_arco(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
self.assert_mesmo_elementos(tuple(), grafo.arcos(sjc))
self.assert_mesmo_elementos(tuple(), grafo.adjacencias(sjc))
grafo.adicionar_vertice(jacarei)
self.assert_mesmo_elementos(tuple(), grafo.arcos(jacarei))
self.assert_mesmo_elementos(tuple(), grafo.adjacencias(sjc))
self.assert_mesmo_elementos(tuple(), grafo.adjacencias(jacarei))
grafo.adicionar_arco(arco_scj_jaca)
self.assert_mesmo_elementos((arco_scj_jaca,), grafo.arcos(jacarei))
self.assert_mesmo_elementos((arco_scj_jaca,), grafo.arcos(sjc))
self.assert_mesmo_elementos((jacarei,), grafo.adjacencias(sjc))
self.assert_mesmo_elementos((sjc,), grafo.adjacencias(jacarei))
grafo.adicionar_vertice(taubate)
grafo.adicionar_arco(arco_tauba_sjc)
self.assert_mesmo_elementos((arco_scj_jaca, arco_tauba_sjc), grafo.arcos(sjc))
self.assert_mesmo_elementos((arco_tauba_sjc,), grafo.arcos(taubate))
self.assert_mesmo_elementos((sjc,), grafo.adjacencias(jacarei))
self.assert_mesmo_elementos((sjc,), grafo.adjacencias(taubate))
self.assert_mesmo_elementos((taubate, jacarei), grafo.adjacencias(sjc))
def teste_caminho_para_proprio_vertice(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
self.assertListEqual([sjc], grafo.caminho(sjc, sjc))
def teste_caminho_vertices_desconexos(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
grafo.adicionar_vertice(jacarei)
self.assertListEqual([], grafo.caminho(sjc, jacarei))
def teste_caminho_dois_vertices_conexos(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
grafo.adicionar_vertice(jacarei)
grafo.adicionar_arco(arco_scj_jaca)
self.assertListEqual([sjc, jacarei], grafo.caminho(sjc, jacarei))
def teste_caminho_tres_vertices_conexos(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
grafo.adicionar_vertice(jacarei)
grafo.adicionar_vertice(taubate)
grafo.adicionar_arco(arco_scj_jaca)
grafo.adicionar_arco(arco_tauba_sjc)
self.assertListEqual([taubate, sjc, jacarei], grafo.caminho(taubate, jacarei))
self.assertListEqual([taubate, sjc], grafo.caminho(taubate, sjc))
def teste_caminho_4_vertices_conexos_nao_lineares(self):
grafo = Grafo()
grafo.adicionar_vertice(sjc)
grafo.adicionar_vertice(jacarei)
grafo.adicionar_vertice(mogi)
grafo.adicionar_vertice(sao_paulo)
grafo.adicionar_arco(arco_scj_jaca)
grafo.adicionar_arco(arco_jaca_sp)
grafo.adicionar_arco(arco_mogi_jaca)
grafo.adicionar_arco(arco_mogi_sp)
caminho = grafo.caminho(sjc, sao_paulo)
self.assertTrue([sjc, jacarei, sao_paulo] == caminho or [sjc, jacarei, mogi, sao_paulo] == caminho)
def teste_melhor_caminho_partindo_de_taubate_considerando_distancias(self):
grafo = Grafo()
for v in vertices_cidades:
grafo.adicionar_vertice(v)
for a in arcos_distancias:
grafo.adicionar_arco(a)
dct = grafo.calcular_melhores_caminhos_partindo_de(taubate)
self.assert_mesmo_elementos(vertices_cidades, dct.keys())
distancia, caminho = dct[taubate]
self.assertEqual(0, distancia)
self.assertListEqual([taubate], caminho)
distancia, caminho = dct[sjc]
self.assertEqual(43900, distancia)
self.assertListEqual([taubate, 43900, sjc], caminho)
distancia, caminho = dct[jacarei]
self.assertEqual(57100, distancia)
self.assertListEqual([taubate, 43900, sjc, 13200, jacarei], caminho)
distancia, caminho = dct[mogi]
self.assertEqual(111400, distancia)
self.assertListEqual([taubate, 43900, sjc, 13200, jacarei, 54300, mogi], caminho)
distancia, caminho = dct[caragua]
self.assertEqual(130800, distancia)
self.assertListEqual([taubate, 43900, sjc, 86900, caragua], caminho)
distancia, caminho = dct[sao_paulo]
self.assertEqual(138900, distancia)
self.assertListEqual([taubate, 43900, sjc, 13200, jacarei, 81800, sao_paulo], caminho)
distancia, caminho = dct[bertioga]
self.assertEqual(160100, distancia)
self.assertListEqual([taubate, 43900, sjc, 13200, jacarei, 54300, mogi, 48700, bertioga], caminho)
distancia, caminho = dct[santos]
self.assertEqual(211700, distancia)
self.assertListEqual([taubate, 43900, sjc, 13200, jacarei, 81800, sao_paulo, 72800, santos], caminho)
def teste_melhor_caminho_partindo_de_taubate_considerando_custo(self):
grafo = Grafo()
for v in vertices_cidades:
grafo.adicionar_vertice(v)
preco_gasolina = 3.65 # R$/litro
rendimento_carro_popular = 15000 # metros/litro
preco_por_distancia = preco_gasolina / rendimento_carro_popular # R$/metro
arcos_custo = [Arco(a.vertices[0], a.vertices[1], a.valor * preco_por_distancia)
for a in arcos_distancias]
pedagios = {(jacarei, sao_paulo): 11.8, (jacarei, mogi): 6.1, (sao_paulo, santos): 23, (sao_paulo, mogi): 3.2,
(bertioga, santos): 10.8}
for a in arcos_custo:
vertices_contrarios = (a.vertices[1], a.vertices[0])
pedagio = pedagios.get(a.vertices, pedagios.get(vertices_contrarios, 0))
a.valor = round(pedagio + a.valor)
grafo.adicionar_arco(a)
dct = grafo.calcular_melhores_caminhos_partindo_de(taubate)
self.assert_mesmo_elementos(vertices_cidades, dct.keys())
distancia, caminho = dct[taubate]
self.assertEqual(0, distancia)
self.assertListEqual([taubate], caminho)
distancia, caminho = dct[sjc]
self.assertEqual(11, distancia)
self.assertListEqual([taubate, 11, sjc], caminho)
distancia, caminho = dct[jacarei]
self.assertEqual(14, distancia)
self.assertListEqual([taubate, 11, sjc, 3, jacarei], caminho)
distancia, caminho = dct[mogi]
self.assertEqual(33, distancia)
self.assertListEqual([taubate, 11, sjc, 3, jacarei, 19, mogi], caminho)
distancia, caminho = dct[caragua]
self.assertEqual(32, distancia)
self.assertListEqual([taubate, 11, sjc, 21, caragua], caminho)
distancia, caminho = dct[sao_paulo]
self.assertEqual(46, distancia)
self.assertListEqual([taubate, 11, sjc, 3, jacarei, 32, sao_paulo], caminho)
distancia, caminho = dct[bertioga]
self.assertEqual(45, distancia)
self.assertListEqual([taubate, 11, sjc, 3, jacarei, 19, mogi, 12, bertioga], caminho)
distancia, caminho = dct[santos]
self.assertEqual(74, distancia)
self.assertListEqual([taubate, 11, sjc, 3, jacarei, 19, mogi, 12, bertioga, 29, santos], caminho)
def assert_mesmo_elementos(self, iteravel, outro_iteravel):
"Método auxiliar para asserção de elementos"
self.assertSetEqual(set(iteravel), set(outro_iteravel))
if __name__ == '__main__':
unittest.main()
| walelite/ES | grafo.py | Python | mit | 13,713 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import six
# -----------------------------------------------------------------------------
# List API versions
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/"
headers = {
'cache-control': "no-cache"
}
six.print_("Make GET request to %s" % url)
response = requests.request("GET", url, headers=headers)
six.print_("Response is %s. Headers is %s" % (
response.text,
response.headers))
# -----------------------------------------------------------------------------
# Create Foo resource
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/foos/"
payload = {
"foo-field1": 999,
"foo-field2": "foo obj"
}
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
six.print_("Make POST request to %s with payload %s" % (url, payload))
response = requests.request("POST", url, json=payload, headers=headers)
six.print_("Response is %s. Headers is %s" % (
response.text,
response.headers))
foo_uuid = response.json()['uuid']
# -----------------------------------------------------------------------------
# Get list of Foo resources
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/foos/"
headers = {
'cache-control': "no-cache"
}
six.print_("Make GET (list on collection) request to %s" % url)
response = requests.request("GET", url, headers=headers)
six.print_("Response is %s. Headers is %s" % (
response.text,
response.headers))
# -----------------------------------------------------------------------------
# Get Foo resource by uuid
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/foos/%s" % foo_uuid
headers = {
'cache-control': "no-cache"
}
six.print_("Make GET request to foo resource %s" % url)
response = requests.request("GET", url, headers=headers)
six.print_("Response is %s. Headers is %s" % (
response.text,
response.headers))
# -----------------------------------------------------------------------------
# Create Bar resource
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/foos/%s/bars/" % foo_uuid
payload = {
"bar-field1": "test bar"
}
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
}
six.print_("Make POST request to %s with payload %s" % (url, payload))
response = requests.request("POST", url, json=payload, headers=headers)
six.print_("Response is %s. Headers is %s" % (
response.text,
response.headers))
bar_uuid = response.json()['uuid']
# -----------------------------------------------------------------------------
# Delete Bar resource
# -----------------------------------------------------------------------------
url = "http://127.0.0.1:8000/bars/%s" % bar_uuid
headers = {
'cache-control': "no-cache"
}
six.print_("Make DELETE request to %s" % url)
response = requests.request("DELETE", url, headers=headers)
six.print_("Done!")
| phantomii/restalchemy | examples/restapi_foo_bar_client.py | Python | apache-2.0 | 3,833 |
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class UploadcCom(XFSAccount):
__name__ = "UploadcCom"
__type__ = "account"
__version__ = "0.05"
__status__ = "testing"
__description__ = """Uploadc.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
PLUGIN_DOMAIN = "uploadc.com"
| fzimmermann89/pyload | module/plugins/accounts/UploadcCom.py | Python | gpl-3.0 | 413 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 21:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0086_auto_20170303_1233'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='info_brief',
new_name='info',
),
migrations.AddField(
model_name='locale',
name='style_guide',
field=models.URLField(blank=True, help_text=b'\n URL to style guide for this locale.\n '),
),
migrations.AddField(
model_name='project',
name='can_be_requested',
field=models.BooleanField(default=True, help_text=b'\n Allow localizers to request the project for their team.\n '),
),
migrations.AddField(
model_name='project',
name='l10n_contact',
field=models.ForeignKey(blank=True, help_text=b'\n L10n driver in charge of the project.\n ', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='l10n_contact_for', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='project',
name='preview_url',
field=models.URLField(blank=True, help_text=b'\n URL to translation preview environment, e.g. staging website,\n screenshots, development build, etc.\n ', verbose_name=b'L10n Preview URL'),
),
migrations.AddField(
model_name='project',
name='project_contact',
field=models.ForeignKey(blank=True, help_text=b'\n Project manager or developer contact.\n ', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='project_contact_for', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='project',
name='project_url',
field=models.URLField(blank=True, help_text=b'\n URL to released project, e.g. production website or product download.\n ', verbose_name=b'Project URL'),
),
migrations.AlterField(
model_name='project',
name='disabled',
field=models.BooleanField(default=False, help_text=b'\n Hide project from the UI and only keep it accessible from the admin.\n Disable the project instead of deleting it to keep translation memory\n and attributions.\n '),
),
]
| mastizada/pontoon | pontoon/base/migrations/0087_auto_20170306_2113.py | Python | bsd-3-clause | 2,698 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask_appbuilder import Model
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable
class UserAttribute(Model, AuditMixinNullable):
"""
Custom attributes attached to the user.
Extending the user attribute is tricky due to its dependency on the
authentication typew an circular dependencies in Superset. Instead, we use
a custom model for adding attributes.
"""
__tablename__ = "user_attribute"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("ab_user.id"))
user = relationship(
security_manager.user_model, backref="extra_attributes", foreign_keys=[user_id]
)
welcome_dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
welcome_dashboard = relationship("Dashboard")
| airbnb/superset | superset/models/user_attributes.py | Python | apache-2.0 | 1,690 |
"""Invoice details"""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.account import AccountManager as AccountManager
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@click.option('--details', is_flag=True, default=False, show_default=True,
help="Shows a very detailed list of charges")
@environment.pass_env
def cli(env, identifier, details):
"""Invoice details"""
manager = AccountManager(env.client)
top_items = manager.get_billing_items(identifier)
title = "Invoice %s" % identifier
table = formatting.Table(["Item Id", "Category", "Description", "Single",
"Monthly", "Create Date", "Location"], title=title)
table.align['category'] = 'l'
table.align['description'] = 'l'
for item in top_items:
fqdn = "%s.%s" % (item.get('hostName', ''), item.get('domainName', ''))
# category id=2046, ram_usage doesn't have a name...
category = utils.lookup(item, 'category', 'name') or item.get('categoryCode')
description = nice_string(item.get('description'))
if fqdn != '.':
description = "%s (%s)" % (item.get('description'), fqdn)
table.add_row([
item.get('id'),
category,
nice_string(description),
"$%.2f" % float(item.get('oneTimeAfterTaxAmount')),
"$%.2f" % float(item.get('recurringAfterTaxAmount')),
utils.clean_time(item.get('createDate'), out_format="%Y-%m-%d"),
utils.lookup(item, 'location', 'name')
])
if details:
for child in item.get('children', []):
table.add_row([
'>>>',
utils.lookup(child, 'category', 'name'),
nice_string(child.get('description')),
"$%.2f" % float(child.get('oneTimeAfterTaxAmount')),
"$%.2f" % float(child.get('recurringAfterTaxAmount')),
'---',
'---'
])
env.fout(table)
def nice_string(ugly_string, limit=100):
"""Format and trims strings"""
return (ugly_string[:limit] + '..') if len(ugly_string) > limit else ugly_string
| kyubifire/softlayer-python | SoftLayer/CLI/account/invoice_detail.py | Python | mit | 2,334 |
#!/usr/bin/env python3
""" 2018 AOC Day 07 """
import argparse
from collections import defaultdict
import heapq
import re
import typing
import unittest
def parse(puzzle: str) -> typing.List[typing.Tuple[str, str]]:
''' Parse the input into a list of (step, requirement) tuples '''
line_re = re.compile(r'Step (?P<req>\w) must be finished before step (?P<step>\w) can begin.')
parsed = []
for line in puzzle.splitlines():
match = line_re.match(line.strip())
if not match:
raise ValueError('Bad line: ' + line)
parsed.append((match['step'], match['req']))
return parsed
def part1(puzzle: str) -> str:
""" Solve part 1 """
requirements: typing.Mapping[str, typing.List[str]] = defaultdict(list)
reverse_reqs: typing.Mapping[str, typing.List[str]] = defaultdict(list)
for (step, requirement) in parse(puzzle):
requirements[step].append(requirement)
reverse_reqs[requirement].append(step)
steps = set(requirements.keys()) | set(reverse_reqs.keys())
heap = [step for step in steps if not requirements[step]]
heapq.heapify(heap)
complete: typing.Set[str] = set()
final_order = ''
while heap:
next_step = heapq.heappop(heap)
complete.add(next_step)
final_order += next_step
for rev_req in reverse_reqs[next_step]:
if all(r in complete for r in requirements[rev_req]):
heapq.heappush(heap, rev_req)
if complete != steps:
raise AssertionError('Never completed ' + str(complete - steps))
return final_order
def part2(puzzle: str, nworkers=5, base_time: int = 60) -> int:
""" Solve part 2 """
def step_time(step: str) -> int:
return base_time + ord(step[0]) - ord('A') + 1
assert nworkers > 0
requirements: typing.Mapping[str, typing.List[str]] = defaultdict(list)
reverse_reqs: typing.Mapping[str, typing.List[str]] = defaultdict(list)
for (step, requirement) in parse(puzzle):
requirements[step].append(requirement)
reverse_reqs[requirement].append(step)
all_tasks = set(requirements.keys()) | set(reverse_reqs.keys())
available_tasks = [task for task in all_tasks if not requirements[task]]
heapq.heapify(available_tasks)
queued_tasks: typing.List[typing.Tuple[int, str]] = [] # heap of (finish time, task) pairs
completed_tasks: typing.Set[str] = set()
current_time = 0
while available_tasks or queued_tasks:
# queue up as many available tasks as possible
# postcondition: have at least one queued task, since we started with some available
while available_tasks and len(queued_tasks) < nworkers:
todo = heapq.heappop(available_tasks)
heapq.heappush(queued_tasks, (current_time + step_time(todo), todo))
# pop off one of the queued tasks
assert queued_tasks
current_time, completed = heapq.heappop(queued_tasks)
completed_tasks.add(completed)
# add newly available tasks
for rev_req in reverse_reqs[completed]:
if all(r in completed_tasks for r in requirements[rev_req]):
heapq.heappush(available_tasks, rev_req)
if completed_tasks != all_tasks:
raise AssertionError('Never completed ' + str(all_tasks - completed_tasks))
return current_time
def main():
""" Run 2018 Day 07 """
parser = argparse.ArgumentParser(description='Advent of Code 2018 Day 07')
parser.add_argument('input', help='input file')
opts = parser.parse_args()
with open(opts.input) as handle:
puzzle = handle.read().strip()
print('Part 1:', part1(puzzle))
print('Part 2:', part2(puzzle))
if __name__ == '__main__':
main()
class ExampleTest(unittest.TestCase):
example = (
'Step C must be finished before step A can begin.\n'
'Step C must be finished before step F can begin.\n'
'Step A must be finished before step B can begin.\n'
'Step A must be finished before step D can begin.\n'
'Step B must be finished before step E can begin.\n'
'Step D must be finished before step E can begin.\n'
'Step F must be finished before step E can begin.\n'
)
def test_part1(self):
self.assertEqual(part1(self.example), 'CABDFE')
def test_part2(self):
self.assertEqual(part2(self.example, nworkers=2, base_time=0), 15)
| devonhollowood/adventofcode | 2018/day07.py | Python | mit | 4,416 |
"""
WSGI config for smarthumidor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smarthumidor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| bkillenit/smarthumidor | django/smarthumidor/smarthumidor/wsgi.py | Python | mit | 399 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005, 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Sale return implementation """
from stoqlib.lib.translation import stoqlib_gettext as _
from stoqlib.reporting.report import HTMLReport
class SaleReturnReport(HTMLReport):
"""Return sales report. Show client information, returned sale and returned
items informations
"""
template_filename = 'return_sale/return_sale.html'
title = _('Sale Return')
complete_header = False
client = None
returned_sale = None
returned_items = None
def __init__(self, filename, store, client, returned_sale, returned_sale_items):
self.client = client
self.returned_sale = returned_sale
self.returned_items = returned_sale_items
HTMLReport.__init__(self, filename)
def get_subtitle(self):
return _(u'Sale %s') % self.returned_sale.identifier
class PendingReturnReceipt(HTMLReport):
template_filename = 'return_sale/pending_receipt.html'
title = _("Pending Returned Sale Report")
complete_header = False
def __init__(self, filename, pending_return):
self.pending_return = pending_return
HTMLReport.__init__(self, filename)
def get_namespace(self):
return dict(subtitle="Return number: %s" % (self.pending_return.identifier, ),
pending_return=self.pending_return)
def get_subtitle(self):
return _(u'Returned Sale %s') % self.pending_return.identifier
| andrebellafronte/stoq | stoqlib/reporting/salereturn.py | Python | gpl-2.0 | 2,327 |
# -*- coding: utf-8 -*-
import sys
import os
# Import the common config file
# Note that paths in the common config are interpreted as if they were
# in the location of this file
# Setup the config
sys.path.insert(0, os.path.abspath('../../_common'))
from common_conf import *
html_short_title_toc, html_short_title, html_context = set_conf_for_manual()
| mpouttuclarke/cdap | cdap-docs/reference-manual/source/conf.py | Python | apache-2.0 | 360 |
# -*- coding: utf-8 -*-
"""
sphinx.roles
~~~~~~~~~~~~
Handlers for additional ReST roles.
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
from docutils import nodes, utils
from docutils.parsers.rst import roles
from sphinx import addnodes
ws_re = re.compile(r'\s+')
caption_ref_re = re.compile(r'^([^<]+?)\s*<(.+)>$')
generic_docroles = {
'command' : nodes.strong,
'dfn' : nodes.emphasis,
'guilabel' : nodes.strong,
'kbd' : nodes.literal,
'mailheader' : addnodes.literal_emphasis,
'makevar' : nodes.Text,
'manpage' : addnodes.literal_emphasis,
'mimetype' : addnodes.literal_emphasis,
'newsgroup' : addnodes.literal_emphasis,
'program' : nodes.strong,
'regexp' : nodes.literal,
}
for rolename, nodeclass in generic_docroles.iteritems():
roles.register_generic_role(rolename, nodeclass)
def indexmarkup_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
env = inliner.document.settings.env
if not typ:
typ = env.config.default_role
text = utils.unescape(etext)
targetid = 'index-%s' % env.index_num
env.index_num += 1
indexnode = addnodes.index()
targetnode = nodes.target('', '', ids=[targetid])
inliner.document.note_explicit_target(targetnode)
if typ == 'envvar':
env.note_index_entry('single', text, targetid, text)
env.note_index_entry('single', 'environment variable; %s' % text,
targetid, text)
indexnode['entries'] = [('single', text, targetid, text),
('single', 'environment variable; %s' % text,
targetid, text)]
xref_nodes = xfileref_role(typ, rawtext, etext, lineno, inliner,
options, content)[0]
return [indexnode, targetnode] + xref_nodes, []
elif typ == 'pep':
env.note_index_entry('single', 'Python Enhancement Proposals!PEP %s' % text,
targetid, 'PEP %s' % text)
indexnode['entries'] = [('single', 'Python Enhancement Proposals!PEP %s' % text,
targetid, 'PEP %s' % text)]
try:
pepnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
sn = nodes.strong('PEP '+text, 'PEP '+text)
rn = nodes.reference('', '', refuri=ref)
rn += sn
return [indexnode, targetnode, rn], []
elif typ == 'rfc':
env.note_index_entry('single', 'RFC; RFC %s' % text,
targetid, 'RFC %s' % text)
indexnode['entries'] = [('single', 'RFC; RFC %s' % text,
targetid, 'RFC %s' % text)]
try:
rfcnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid RFC number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
sn = nodes.strong('RFC '+text, 'RFC '+text)
rn = nodes.reference('', '', refuri=ref)
rn += sn
return [indexnode, targetnode, rn], []
roles.register_canonical_role('envvar', indexmarkup_role)
roles.register_local_role('pep', indexmarkup_role)
roles.register_local_role('rfc', indexmarkup_role)
# default is `literal`
innernodetypes = {
'ref': nodes.emphasis,
'term': nodes.emphasis,
'token': nodes.strong,
'envvar': nodes.strong,
'option': addnodes.literal_emphasis,
}
def _fix_parens(typ, text, env):
if typ in ('func', 'meth', 'cfunc'):
if text.endswith('()'):
# remove parentheses
text = text[:-2]
if env.config.add_function_parentheses:
# add them back to all occurrences if configured
text += '()'
return text
def xfileref_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
env = inliner.document.settings.env
if not typ:
typ = env.config.default_role
text = utils.unescape(text)
# if the first character is a bang, don't cross-reference at all
if text[0:1] == '!':
text = _fix_parens(typ, text[1:], env)
return [innernodetypes.get(typ, nodes.literal)(
rawtext, text, classes=['xref'])], []
# we want a cross-reference, create the reference node
pnode = addnodes.pending_xref(rawtext, reftype=typ, refcaption=False,
modname=env.currmodule, classname=env.currclass)
# we may need the line number for warnings
pnode.line = lineno
# the link title may differ from the target, but by default they are the same
title = target = text
titleistarget = True
# look if explicit title and target are given with `foo <bar>` syntax
brace = text.find('<')
if brace != -1:
titleistarget = False
pnode['refcaption'] = True
m = caption_ref_re.match(text)
if m:
target = m.group(2)
title = m.group(1)
else:
# fallback: everything after '<' is the target
target = text[brace+1:]
title = text[:brace]
# special target for Python object cross-references
if typ in ('data', 'exc', 'func', 'class', 'const', 'attr', 'meth', 'mod', 'obj'):
# fix-up parentheses in link title
if titleistarget:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
title = _fix_parens(typ, title, env)
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
# remove parentheses from the target too
if target.endswith('()'):
target = target[:-2]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
pnode['refspecific'] = True
# some other special cases for the target
elif typ == 'option' and target[0] in '-/':
# strip option marker from target
target = target[1:]
elif typ == 'term':
# normalize whitespace in definition terms (if the term reference is
# broken over a line, a newline will be in target)
target = ws_re.sub(' ', target).lower()
elif typ == 'ref':
# reST label names are always lowercased
target = ws_re.sub('', target).lower()
else:
# remove all whitespace to avoid referencing problems
target = ws_re.sub('', target)
pnode['reftarget'] = target
pnode += innernodetypes.get(typ, nodes.literal)(rawtext, title, classes=['xref'])
return [pnode], []
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
return [nodes.emphasis(
rawtext, utils.unescape(text).replace('-->', u'\N{TRIANGULAR BULLET}'))], []
_litvar_re = re.compile('{([^}]+)}')
def emph_literal_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
pos = 0
retnode = nodes.literal(role=typ)
for m in _litvar_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnode += nodes.Text(txt, txt)
retnode += nodes.emphasis(m.group(1), m.group(1))
pos = m.end()
if pos < len(text):
retnode += nodes.Text(text[pos:], text[pos:])
return [retnode], []
specific_docroles = {
'data': xfileref_role,
'exc': xfileref_role,
'func': xfileref_role,
'class': xfileref_role,
'const': xfileref_role,
'attr': xfileref_role,
'meth': xfileref_role,
'obj': xfileref_role,
'cfunc' : xfileref_role,
'cdata' : xfileref_role,
'ctype' : xfileref_role,
'cmacro' : xfileref_role,
'mod' : xfileref_role,
'keyword': xfileref_role,
'ref': xfileref_role,
'token' : xfileref_role,
'term': xfileref_role,
'option': xfileref_role,
'menuselection' : menusel_role,
'file' : emph_literal_role,
'samp' : emph_literal_role,
}
for rolename, func in specific_docroles.iteritems():
roles.register_canonical_role(rolename, func)
| creasyw/IMTAphy | documentation/doctools/tags/0.4.3/sphinx/roles.py | Python | gpl-2.0 | 8,698 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PRIMO2 -- Probabilistic Inference Modules.
# Copyright (C) 2013-2017 Social Cognitive Systems Group,
# Faculty of Technology, Bielefeld University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
from primo2.networks import BayesianNetwork
from primo2.io import XMLBIFParser, DBNSpec
from primo2.nodes import DiscreteNode
class XMLBIFTest(unittest.TestCase):
def test_readXMLBIF(self):
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
nodes = bn.get_all_nodes()
self.assertTrue("slippery_road" in nodes)
self.assertTrue("sprinkler" in nodes)
self.assertTrue("rain" in nodes)
self.assertTrue("wet_grass" in nodes)
self.assertTrue("winter" in nodes)
self.assertEqual(len(nodes), 5)
slipperyNode = bn.get_node("slippery_road")
self.assertTrue("rain" in slipperyNode.parents)
sprinklerNode = bn.get_node("sprinkler")
self.assertTrue("winter" in sprinklerNode.parents)
rainNode = bn.get_node("rain")
self.assertTrue("winter" in rainNode.parents)
cpt = np.array([[0.8,0.1],[0.2,0.9]])
np.testing.assert_array_almost_equal(rainNode.cpd, cpt)
wetNode = bn.get_node("wet_grass")
self.assertTrue("sprinkler" in wetNode.parents)
self.assertTrue("rain" in wetNode.parents)
self.assertTrue("true" in wetNode.values)
cpt = np.array([[[0.95, 0.8],[0.1,0.0]], [[0.05, 0.2],[0.9, 1.0]]])
self.assertEqual(wetNode.get_probability("false", {"rain":["true"], "sprinkler":["false"]}),0.2)
self.assertEqual(wetNode.get_probability("true", {"rain":["false"], "sprinkler":["true"]}),0.1)
# np.testing.assert_array_almost_equal(wetNode.cpd, cpt)
def test_readXMLBIF_different_parent_sizes(self):
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif")
# nodes = bn.get_all_nodes()
johnNode = bn.get_node("John_calls")
cpt = np.array([[[0.8,0.5,0.7],[0.6,0.2,0.1]],[[0.2,0.5,0.3],[0.4,0.8,0.9]]])
np.testing.assert_array_almost_equal(johnNode.cpd, cpt)
def test_readXMLBIF_with_variable_properties(self):
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif", ignoreProperties=False)
johnNode = bn.get_node("John_calls")
self.assertEqual(len(johnNode.meta), 1)
self.assertTrue("position" in johnNode.meta[0])
alarmNode = bn.get_node("Alarm")
self.assertEqual(len(alarmNode.meta), 2)
self.assertTrue("position" in alarmNode.meta[0])
self.assertEqual("Random meta test", alarmNode.meta[1])
def test_readXMLBIF_with_variable_properties_ignored(self):
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif", ignoreProperties=True)
johnNode = bn.get_node("John_calls")
self.assertEqual(len(johnNode.meta), 0)
alarmNode = bn.get_node("Alarm")
self.assertEqual(len(alarmNode.meta), 0)
def test_readXMLBIF_with_network_properties(self):
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif", ignoreProperties=False)
self.assertEqual(len(bn.meta), 2)
self.assertEqual("Random network property", bn.meta[0])
self.assertEqual("Author jpoeppel", bn.meta[1])
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif", ignoreProperties=False)
self.assertEqual(len(bn.meta), 0)
def test_readXMLBIF_with_network_properties_ignored(self):
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif", ignoreProperties=True)
self.assertEqual(len(bn.meta), 0)
def test_writeXMLBIF_simple(self):
path= "primo2/tests/test.xbif"
bn = BayesianNetwork()
n1 = DiscreteNode("a")
n2 = DiscreteNode("b", ["happy", "sad"])
bn.add_node(n1)
bn.add_node(n2)
bn.add_edge(n1,n2)
XMLBIFParser.write(bn, path)
bn2 = XMLBIFParser.parse(path)
for n in bn2.get_all_nodes():
tmpn = bn.get_node(n)
for value in tmpn.values:
self.assertTrue(value in n.values)
for p in tmpn.parents.keys():
self.assertTrue(p in n.parents)
np.testing.assert_almost_equal(tmpn.cpd, n.cpd)
# remove testfile
import os
os.remove(path)
def test_writeXMLBIF(self):
testPath = "primo2/tests/testSlippery.xbif"
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
XMLBIFParser.write(bn, testPath)
bn2 = XMLBIFParser.parse(testPath)
for n in bn2.get_all_nodes():
tmpn = bn.get_node(n)
for value in tmpn.values:
self.assertTrue(value in n.values)
for i,p in enumerate(tmpn.parentOrder):
self.assertEqual(p, n.parentOrder[i])
np.testing.assert_almost_equal(tmpn.cpd, n.cpd)
# remove testfile
import os
os.remove(testPath)
def test_writeXMLBIF_with_network_properties_ignored(self):
testPath = "primo2/tests/testSlippery.xbif"
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
bn.meta = ["Dummy property"]
XMLBIFParser.write(bn, testPath, ignoreProperties=True)
bn2 = XMLBIFParser.parse(testPath, ignoreProperties=False)
self.assertEqual(len(bn2.meta),0)
self.assertEqual("Dummy property", bn.meta[0])
os.remove(testPath)
def test_writeXMLBIF_with_network_properties(self):
testPath = "primo2/tests/testSlippery.xbif"
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
bn.meta = ["Dummy property"]
XMLBIFParser.write(bn, testPath, ignoreProperties=False)
bn2 = XMLBIFParser.parse(testPath, ignoreProperties=False)
self.assertEqual(len(bn2.meta),1)
self.assertEqual("Dummy property", bn.meta[0])
os.remove(testPath)
def test_writeXMLBIF_with_variable_properties_ignored(self):
testPath = "primo2/tests/test_testfile.xbif"
readPath = "primo2/tests/testfile.xbif"
bn = XMLBIFParser.parse(readPath, ignoreProperties=False)
XMLBIFParser.write(bn, testPath, ignoreProperties=True)
bn2 = XMLBIFParser.parse(testPath, ignoreProperties=False)
johnNode = bn2.get_node("John_calls")
self.assertEqual(len(johnNode.meta), 0)
alarmNode = bn2.get_node("Alarm")
self.assertEqual(len(alarmNode.meta), 0)
os.remove(testPath)
def test_writeXMLBIF_with_variable_properties(self):
testPath = "primo2/tests/test_testfile.xbif"
bn = XMLBIFParser.parse("primo2/tests/testfile.xbif", ignoreProperties=False)
XMLBIFParser.write(bn, testPath, ignoreProperties=False)
bn2 = XMLBIFParser.parse(testPath, ignoreProperties=False)
johnNode = bn2.get_node("John_calls")
self.assertEqual(len(johnNode.meta), 1)
self.assertTrue("position" in johnNode.meta[0])
alarmNode = bn2.get_node("Alarm")
self.assertEqual(len(alarmNode.meta), 2)
self.assertTrue("position" in alarmNode.meta[0])
self.assertEqual("Random meta test", alarmNode.meta[1])
os.remove(testPath)
class DBNSpecTest(unittest.TestCase):
def test_parseDBN(self):
dbn = DBNSpec.parse("primo2/tests/dbn-test.conf")
self.assertEqual(dbn._b0.name, "Test_DBN_B0")
self.assertEqual(dbn._two_tbn.name, "Test_DBN_2TBN")
def test_parseDBN_local_dir(self):
os.chdir("primo2/tests")
dbn = DBNSpec.parse("dbn-test.conf")
self.assertEqual(dbn._b0.name, "Test_DBN_B0")
self.assertEqual(dbn._two_tbn.name, "Test_DBN_2TBN")
os.chdir("../..")
def test_parseDBN_relative(self):
import shutil
shutil.copyfile("primo2/tests/dbn-test-b0.xbif", "primo2/dbn-test-b0.xbif")
shutil.copyfile("primo2/tests/dbn-test-2tbn.xbif", "primo2/dbn-test-2tbn.xbif")
dbn = DBNSpec.parse("primo2/tests/dbn-test-relative.conf")
self.assertEqual(dbn._b0.name, "Test_DBN_B0")
self.assertEqual(dbn._two_tbn.name, "Test_DBN_2TBN")
#Clean up
os.remove("primo2/dbn-test-b0.xbif")
os.remove("primo2/dbn-test-2tbn.xbif")
def test_parseDBN_absolute_path(self):
if os.path.exists("/tmp"):
import shutil
shutil.copyfile("primo2/tests/dbn-test-b0.xbif", "/tmp/dbn-test-b0.xbif")
shutil.copyfile("primo2/tests/dbn-test-2tbn.xbif", "/tmp/dbn-test-2tbn.xbif")
dbn = DBNSpec.parse("primo2/tests/dbn-test-abs.conf")
self.assertEqual(dbn._b0.name, "Test_DBN_B0")
self.assertEqual(dbn._two_tbn.name, "Test_DBN_2TBN")
#Clean up
os.remove("/tmp/dbn-test-b0.xbif")
os.remove("/tmp/dbn-test-2tbn.xbif")
def test_parseDBN_mixed_path(self):
if os.path.exists("/tmp"):
import shutil
shutil.copyfile("primo2/tests/dbn-test-b0.xbif", "/tmp/dbn-test-b0.xbif")
dbn = DBNSpec.parse("primo2/tests/dbn-test-mixed.conf")
self.assertEqual(dbn._b0.name, "Test_DBN_B0")
self.assertEqual(dbn._two_tbn.name, "Test_DBN_2TBN")
#Clean up
os.remove("/tmp/dbn-test-b0.xbif")
def test_parseDBN_load_properties(self):
dbn = DBNSpec.parse("primo2/tests/dbn-test.conf", ignoreProperties=False)
aNode = dbn.two_tbn.get_node("A")
self.assertEqual(len(aNode.meta), 1)
self.assertTrue("position" in aNode.meta[0])
bNode = dbn.b0.get_node("B")
self.assertEqual(len(bNode.meta), 1)
self.assertTrue("position" in bNode.meta[0])
def test_writeDBN(self):
testPath = "primo2/tests/"
testName = "test-dbn"
dbn = DBNSpec.parse("primo2/tests/dbn-test.conf",
ignoreProperties=False)
DBNSpec.write(dbn, testPath, testName) # implicit ignoreProperties=True
writtenDBN = DBNSpec.parse(testPath+testName+".conf"
,ignoreProperties=False)
aNode = writtenDBN.two_tbn.get_node("A")
self.assertEqual(len(aNode.meta), 0)
self.assertTrue(len(dbn.b0.get_all_nodes()) ==
len(writtenDBN.b0.get_all_nodes()))
self.assertTrue(len(dbn.two_tbn.get_all_nodes()) ==
len(writtenDBN.two_tbn.get_all_nodes()))
for node in dbn.b0.get_all_nodes():
self.assertTrue(node in writtenDBN.b0.get_all_nodes())
np.testing.assert_array_almost_equal(node.cpd,
writtenDBN.b0.get_node(node.name).cpd)
for node in dbn.two_tbn.get_all_nodes():
self.assertTrue(node in writtenDBN.two_tbn.get_all_nodes())
np.testing.assert_array_almost_equal(node.cpd,
writtenDBN.two_tbn.get_node(node.name).cpd)
for suf in [".conf", "-b0.xbif", "-2tbn.xbif"]:
os.remove(testPath + testName + suf)
def test_writeDBN_with_properties(self):
testPath = "primo2/tests/"
testName = "test-dbn"
dbn = DBNSpec.parse("primo2/tests/dbn-test.conf",
ignoreProperties=False)
DBNSpec.write(dbn, testPath, testName, ignoreProperties=False)
writtenDBN = DBNSpec.parse(testPath+testName+".conf",
ignoreProperties=False)
aNode = writtenDBN.two_tbn.get_node("A")
self.assertEqual(len(aNode.meta), 1)
self.assertTrue(len(dbn.b0.get_all_nodes()) ==
len(writtenDBN.b0.get_all_nodes()))
self.assertTrue(len(dbn.two_tbn.get_all_nodes()) ==
len(writtenDBN.two_tbn.get_all_nodes()))
for node in dbn.b0.get_all_nodes():
self.assertTrue(node in writtenDBN.b0.get_all_nodes())
np.testing.assert_array_almost_equal(node.cpd,
writtenDBN.b0.get_node(node.name).cpd)
for node in dbn.two_tbn.get_all_nodes():
self.assertTrue(node in writtenDBN.two_tbn.get_all_nodes())
np.testing.assert_array_almost_equal(node.cpd,
writtenDBN.two_tbn.get_node(node.name).cpd)
for suf in [".conf", "-b0.xbif", "-2tbn.xbif"]:
os.remove(testPath + testName + suf)
if __name__ == "__main__":
#Workaround so that this script also finds the resource files when run directly
# from within the tests folder
os.chdir("../..")
unittest.main() | SocialCognitiveSystems/PRIMO | primo2/tests/IO_test.py | Python | lgpl-3.0 | 13,732 |
# -*- coding: utf-8 -*-
"""
Framework for code to synthesise a library of spectra.
"""
import argparse
import hashlib
import json
import logging
import os
import re
import sqlite3
import time
from os import path as os_path
from fourgp_speclib import SpectrumLibrarySqlite, Spectrum
from fourgp_specsynth import TurboSpectrum
from fourgp_telescope_data import FourMost
class Synthesizer:
# Convenience function to provide dictionary access to rows of an astropy table
@staticmethod
def astropy_row_to_dict(x):
return dict([(i, x[i]) for i in x.columns])
# Read input parameters
def __init__(self, library_name, logger, docstring, root_path="../../../..", spectral_resolution=50000):
self.logger = logger
self.our_path = os_path.split(os_path.abspath(__file__))[0]
self.root_path = os_path.abspath(os_path.join(self.our_path, root_path, ".."))
self.pid = os.getpid()
self.spectral_resolution = spectral_resolution
parser = argparse.ArgumentParser(description=docstring)
parser.add_argument('--output-library',
required=False,
default="turbospec_{}".format(library_name),
dest="library",
help="Specify the name of the SpectrumLibrary we are to feed synthesized spectra into.")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries.")
parser.add_argument('--create',
required=False,
action='store_true',
dest="create",
help="Create a clean SpectrumLibrary to feed synthesized spectra into")
parser.add_argument('--no-create',
required=False,
action='store_false',
dest="create",
help="Do not create a clean SpectrumLibrary to feed synthesized spectra into")
parser.set_defaults(create=True)
parser.add_argument('--log-dir',
required=False,
default="/tmp/turbospec_{}_{}".format(library_name, self.pid),
dest="log_to",
help="Specify a log directory where we log our progress and configuration files.")
parser.add_argument('--dump-to-sqlite-file',
required=False,
default="",
dest="sqlite_out",
help="Specify an sqlite3 filename where we dump the stellar parameters of the stars.")
parser.add_argument('--line-lists-dir',
required=False,
default=self.root_path,
dest="lines_dir",
help="Specify a directory where line lists for TurboSpectrum can be found.")
parser.add_argument('--elements',
required=False,
default="",
dest="elements",
help="Only read the abundances of a comma-separated list of elements, and use scaled-solar "
"abundances for everything else.")
parser.add_argument('--binary-path',
required=False,
default=self.root_path,
dest="binary_path",
help="Specify a directory where Turbospectrum and Interpol packages are installed.")
parser.add_argument('--every',
required=False,
default=1,
type=int,
dest="every",
help="Only process every nth spectrum. "
"This is useful when parallelising this script across multiple processes.")
parser.add_argument('--skip',
required=False,
default=0,
type=int,
dest="skip",
help="Skip n spectra before starting to process every nth. "
"This is useful when parallelising this script across multiple processes.")
parser.add_argument('--limit',
required=False,
default=0,
type=int,
dest="limit",
help="Only process a maximum of n spectra.")
self.args = parser.parse_args()
logging.info("Synthesizing {} to <{}>".format(library_name, self.args.library))
# Set path to workspace where we create libraries of spectra
self.workspace = (self.args.workspace if self.args.workspace else
os_path.abspath(os_path.join(self.our_path, root_path, "workspace")))
os.system("mkdir -p {}".format(self.workspace))
def set_star_list(self, star_list):
self.star_list = star_list
# Ensure that every star has a name; number stars of not
for i, item in enumerate(self.star_list):
if 'name' not in item:
item['name'] = "star_{:08d}".format(i)
# Ensure that every star has free_abundances and extra metadata
for i, item in enumerate(self.star_list):
if 'free_abundances' not in item:
item['free_abundances'] = {}
if 'extra_metadata' not in item:
item['extra_metadata'] = {}
if 'microturbulence' not in item:
item['microturbulence'] = 1
# Ensure that we have a table of input data to dump to SQLite, if requested
for item in self.star_list:
if 'input_data' not in item:
item['input_data'] = {'name': item['name'],
'Teff': item['Teff'],
'[Fe/H]': item['[Fe/H]'],
'logg': item['logg']}
item['input_data'].update(item['free_abundances'])
item['input_data'].update(item['extra_metadata'])
if 'name' not in item['input_data']:
item['input_data']['name'] = item['name']
def dump_stellar_parameters_to_sqlite(self):
# Output data into sqlite3 db
if self.args.sqlite_out:
os.system("rm -f {}".format(self.args.sqlite_out))
conn = sqlite3.connect(self.args.sqlite_out)
c = conn.cursor()
columns = []
for col_name, col_value in list(self.star_list[0]['input_data'].items()):
col_type_str = isinstance(col_value, str)
columns.append("{} {}".format(col_name, "TEXT" if col_type_str else "REAL"))
c.execute("CREATE TABLE stars (uid INTEGER PRIMARY KEY, {});".format(",".join(columns)))
for i, item in enumerate(self.star_list):
print(("Writing sqlite parameter dump: %5d / %5d" % (i, len(self.star_list))))
c.execute("INSERT INTO stars (name) VALUES (?);", (item['input_data']['name'],))
uid = c.lastrowid
for col_name in item['input_data']:
if col_name == "name":
continue
arguments = (
str(item['input_data'][col_name]) if isinstance(item['input_data'][col_name], str)
else float(item['input_data'][col_name]),
uid
)
c.execute("UPDATE stars SET %s=? WHERE uid=?;" % col_name, arguments)
conn.commit()
conn.close()
def create_spectrum_library(self):
# Create new SpectrumLibrary
self.library_name = re.sub("/", "_", self.args.library)
self.library_path = os_path.join(self.workspace, self.library_name)
self.library = SpectrumLibrarySqlite(path=self.library_path, create=self.args.create)
# Invoke FourMost data class. Ensure that the spectra we produce are much higher resolution than 4MOST.
# We down-sample them later to whatever resolution we actually want.
self.FourMostData = FourMost()
self.lambda_min = self.FourMostData.bands["LRS"]["lambda_min"]
self.lambda_max = self.FourMostData.bands["LRS"]["lambda_max"]
self.line_lists_path = self.FourMostData.bands["LRS"]["line_lists_edvardsson"]
# Invoke a TurboSpectrum synthesizer instance
self.synthesizer = TurboSpectrum(
turbospec_path=os_path.join(self.args.binary_path, "turbospectrum-15.1/exec-gf-v15.1"),
interpol_path=os_path.join(self.args.binary_path, "interpol_marcs"),
line_list_paths=[os_path.join(self.args.lines_dir, self.line_lists_path)],
marcs_grid_path=os_path.join(self.args.binary_path, "fromBengt/marcs_grid"))
self.synthesizer.configure(lambda_min=self.lambda_min,
lambda_max=self.lambda_max,
lambda_delta=float(self.lambda_min) / self.spectral_resolution,
line_list_paths=[os_path.join(self.args.lines_dir, self.line_lists_path)],
stellar_mass=1)
self.counter_output = 0
# Start making log output
os.system("mkdir -p {}".format(self.args.log_to))
self.logfile = os.path.join(self.args.log_to, "synthesis.log")
def do_synthesis(self):
# Iterate over the spectra we're supposed to be synthesizing
with open(self.logfile, "w") as result_log:
for star in self.star_list:
star_name = star['name']
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
metadata = {
"Starname": str(star_name),
"uid": str(unique_id),
"Teff": float(star['Teff']),
"[Fe/H]": float(star['[Fe/H]']),
"logg": float(star['logg']),
"microturbulence": float(star["microturbulence"])
}
# User can specify that we should only do every nth spectrum, if we're running in parallel
self.counter_output += 1
if (self.args.limit > 0) and (self.counter_output > self.args.limit):
break
if (self.counter_output - self.args.skip) % self.args.every != 0:
continue
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = dict(star['free_abundances'])
for element, abundance in list(free_abundances.items()):
metadata["[{}/H]".format(element)] = float(abundance)
# Propagate all ionisation states into metadata
metadata.update(star['extra_metadata'])
# Configure Turbospectrum with the stellar parameters of the next star
self.synthesizer.configure(
t_eff=float(star['Teff']),
metallicity=float(star['[Fe/H]']),
log_g=float(star['logg']),
stellar_mass=1 if "stellar_mass" not in star else star["stellar_mass"],
turbulent_velocity=1 if "microturbulence" not in star else star["microturbulence"],
free_abundances=free_abundances
)
# Make spectrum
time_start = time.time()
turbospectrum_out = self.synthesizer.synthesise()
time_end = time.time()
# Log synthesizer status
logfile_this = os.path.join(self.args.log_to, "{}.log".format(star_name))
open(logfile_this, "w").write(json.dumps(turbospectrum_out))
# Check for errors
errors = turbospectrum_out['errors']
if errors:
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(),
time_end - time_start,
star_name,
errors))
logging.warn("Star <{}> could not be synthesised. Errors were: {}".
format(star_name, errors))
result_log.flush()
continue
else:
logging.info("Synthesis completed without error.")
# Fetch filename of the spectrum we just generated
filepath = os_path.join(turbospectrum_out["output_file"])
# Insert spectrum into SpectrumLibrary
try:
filename = "spectrum_{:08d}".format(self.counter_output)
# First import continuum-normalised spectrum, which is in columns 1 and 2
metadata['continuum_normalised'] = 1
spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 1), binary=False)
self.library.insert(spectra=spectrum, filenames=filename)
# Then import version with continuum, which is in columns 1 and 3
metadata['continuum_normalised'] = 0
spectrum = Spectrum.from_file(filename=filepath, metadata=metadata, columns=(0, 2), binary=False)
self.library.insert(spectra=spectrum, filenames=filename)
except (ValueError, IndexError):
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start,
star_name, "Could not read bsyn output"))
result_log.flush()
continue
# Update log file to show our progress
result_log.write("[{}] {:6.0f} sec {}: {}\n".format(time.asctime(), time_end - time_start,
star_name, "OK"))
result_log.flush()
def clean_up(self):
logging.info("Synthesized {:d} spectra.".format(self.counter_output))
# Close TurboSpectrum synthesizer instance
self.synthesizer.close()
| dcf21/4most-4gp-scripts | src/helper_code/base_synthesizer.py | Python | mit | 14,736 |
# -*- coding:utf-8 -*-
#
# This file is part of OpenFisca.
# OpenFisca is a socio-fiscal microsimulation software
# Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul
# Licensed under the terms of the GVPLv3 or later license
# (see openfisca/__init__.py for details)
from __future__ import division
import codecs
import cStringIO
import csv
from datetime import datetime
import locale
import os
import numpy as np
from pandas import DataFrame, ExcelWriter
#from openfisca_core import model
from ...gui.config import get_icon
from ...gui.baseconfig import get_translation
from ...gui.qt.compat import to_qvariant
from ...gui.qt.QtCore import QAbstractItemModel, QModelIndex, Qt
from ...gui.qt.QtGui import QFileDialog, QMessageBox, QWidget, QAbstractItemView, QVBoxLayout
from ...gui.qthelpers import OfTreeView
from ...gui.utils.qthelpers import create_action
from .. import OpenfiscaPluginWidget
from ..utils import OutNode
_ = get_translation('openfisca_qt')
locale.setlocale(locale.LC_ALL, '')
class ScenarioTableWidget(OpenfiscaPluginWidget):
"""
Scenario Table Widget
"""
CONF_SECTION = 'composition'
def __init__(self, parent = None):
super(ScenarioTableWidget, self).__init__(parent)
self.setObjectName(_("Table"))
self.setWindowTitle(_("Table"))
self.dockWidgetContents = QWidget(self)
self.verticalLayout = QVBoxLayout(self.dockWidgetContents)
self.treeView = OfTreeView(self.dockWidgetContents)
self.treeView.setAlternatingRowColors(True)
self.treeView.setIndentation(10)
selection_behavior = QAbstractItemView.SelectRows
# we should enable contguous selection, but the copy method does not yet handle this.
# selection_mode = QAbstractItemView.ContiguousSelection
selection_mode = QAbstractItemView.SingleSelection
self.treeView.setSelectionBehavior(selection_behavior)
self.treeView.setSelectionMode(selection_mode)
self.verticalLayout.addWidget(self.treeView)
self.setLayout(self.verticalLayout)
self.table_format = self.get_option('table/format')
self.initialize_plugin()
#------ Public API ---------------------------------------------
def clearModel(self):
self.treeView.setModel(None)
def updateTable(self, scenario):
'''
Updates table
'''
data = OutNode.create_from_scenario_decomposition_json(scenario, decomposiiton_json = None)
dataDefault = data # TODO: fix this
if dataDefault is None:
dataDefault = data
mode = scenario.mode
x_axis = scenario.x_axis
print scenario.x_axis
for axe in self.main.composition.XAXIS_PROPERTIES.itervalues():
print axe
if axe['name'] == x_axis:
x_axis_typ_tot = axe['typ_tot_default']
break
headers = dataDefault[x_axis_typ_tot]
n = len(headers.vals)
self.data = data
self.outputModel = OutputModel(data, headers, n , self)
self.treeView.setModel(self.outputModel)
self.treeView.expandAll()
self.treeView.setColumnWidth(0, 200)
if mode == 'bareme':
for i in range(n):
self.treeView.resizeColumnToContents(i+1)
else:
self.treeView.setColumnWidth(1,100)
def create_dataframe(self):
'''
Formats data into a dataframe
'''
data_dict = dict()
index = []
for row in self.data:
if not row.desc in ('root'):
index.append(row.desc)
data_dict[row.desc] = row.vals
df = DataFrame(data_dict).T
df = df.reindex(index)
return df
def create_description(self):
'''
Creates a description dataframe
'''
now = datetime.now()
descr = [u'OpenFisca',
u'Calculé le %s à %s' % (now.strftime('%d-%m-%Y'), now.strftime('%H:%M')),
u'Système socio-fiscal au %s' % str(self.simulation.datesim)]
return DataFrame(descr)
def save_table(self):
table_format = self.table_format
filename = _("Untitled.") + table_format
output_dir = self.get_option('table/export_dir')
user_path = os.path.join(output_dir, filename)
extension = table_format.upper() + " (*." + table_format + ")"
fname = QFileDialog.getSaveFileName(self,
_("Save table"), user_path, extension)
if fname:
output_dir = os.path.dirname(str(fname))
self.set_option('table/export_dir', output_dir)
try:
if table_format == "xls":
writer = ExcelWriter(str(fname))
df = self.create_dataframe()
descr = self.create_description()
df.to_excel(writer, "table", index=True, header= False)
descr.to_excel(writer, "description", index = False, header=False)
writer.save()
elif table_format =="csv":
# TODO: use DataFrame's ?
now = datetime.now()
csvfile = open(fname, 'wb')
writer = UnicodeWriter(csvfile, dialect= csv.excel, delimiter=';')
for row in self.data:
if not row.desc in ('root'):
outlist = [row.desc]
for val in row.vals:
outlist.append(locale.str(val))
writer.writerow(outlist)
writer.writerow(['OpenFisca'])
writer.writerow([_('Computed on %s at %s') % (now.strftime('%d-%m-%Y'), now.strftime('%H:%M'))])
writer.writerow([_('Socio-fiscal legislation of date %s') % str(self.simulation.datesim)])
writer.writerow([])
csvfile.close()
except Exception, e:
QMessageBox.critical(
self, "Error saving file", str(e),
QMessageBox.Ok, QMessageBox.NoButton)
#------ OpenfiscaPluginMixin API ---------------------------------------------
#------ OpenfiscaPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""
Return plugin title
Note: after some thinking, it appears that using a method
is more flexible here than using a class attribute
"""
return "Table"
def get_plugin_icon(self):
"""
Return plugin icon (QIcon instance)
Note: this is required for plugins creating a main window
(see OpenfiscaPluginMixin.create_mainwindow)
and for configuration dialog widgets creation
"""
return get_icon('OpenFisca22.png')
def get_plugin_actions(self):
"""
Return a list of actions related to plugin
Note: these actions will be enabled when plugin's dockwidget is visible
and they will be disabled when it's hidden
"""
self.save_action = create_action(self, _("Save &table"),
icon='filesave.png', tip=_("Save test-case table"),
triggered=self.save_table)
self.register_shortcut(self.save_action, context="Table",
name=_("Save test-case table"), default="Ctrl+T")
self.file_menu_actions = [self.save_action,]
self.main.file_menu_actions += self.file_menu_actions
# self.main.test_case_toolbar_actions += self.file_menu_actions
return self.file_menu_actions
def register_plugin(self):
"""
Register plugin in OpenFisca's main window
"""
self.main.add_dockwidget(self)
def refresh_plugin(self):
'''
Update Scenario Table
'''
# set the table model to None before changing data
self.clearModel()
self.updateTable(self.main.scenario)
def closing_plugin(self, cancelable=False):
"""
Perform actions before parent main window is closed
Return True or False whether the plugin may be closed immediately or not
Note: returned value is ignored if *cancelable* is False
"""
return True
class OutputModel(QAbstractItemModel):
def __init__(self, root, headers, ncol, parent=None):
super(OutputModel, self).__init__(parent)
self._rootNode = root
self._ncolumn = ncol
self._headers = headers
def rowCount(self, parent):
if not parent.isValid():
parentNode = self._rootNode
else:
parentNode = self.getNode(parent)
return parentNode.childCount()
def columnCount(self, parent):
return self._ncolumn +1
def data(self, index, role = Qt.DisplayRole):
if not index.isValid():
return None
node = self.getNode(index)
col = index.column()
if role == Qt.DisplayRole:
if col == 0:
return to_qvariant(node.desc)
else:
return to_qvariant(int(np.round(node.vals[col-1])))
if role == Qt.TextAlignmentRole:
if col == 0:
return Qt.AlignLeft
return Qt.AlignRight
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
if section == 0: return to_qvariant(self._headers.desc)
else:
return to_qvariant(int(self._headers.vals[section-1]))
def flags(self, index):
node = self.getNode(index)
if np.any(node.vals != 0):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
return Qt.ItemIsSelectable
"""Should return the parent of the node with the given QModelIndex"""
def parent(self, index):
node = self.getNode(index)
parentNode = node.parent
if parentNode == self._rootNode:
return QModelIndex()
return self.createIndex(parentNode.row(), 0, parentNode)
"""Should return a QModelIndex that corresponds to the given row, column and parent node"""
def index(self, row, column, parent):
parentNode = self.getNode(parent)
childItem = parentNode.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QModelIndex()
def getNode(self, index):
if index.isValid():
node = index.internalPointer()
if node:
return node
return self._rootNode
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| openfisca/openfisca-qt | openfisca_qt/plugins/scenario/table.py | Python | agpl-3.0 | 12,500 |
from gettext import gettext as _
import glob
import gzip
import logging
import os
import shutil
import traceback
from xml.sax.saxutils import XMLGenerator
from pulp.common import error_codes
from pulp.server.exceptions import PulpCodedValidationException, PulpCodedException
from verification import CHECKSUM_FUNCTIONS
_LOG = logging.getLogger(__name__)
BUFFER_SIZE = 1024
class MetadataFileContext(object):
"""
Context manager class for metadata file generation.
"""
def __init__(self, metadata_file_path, checksum_type=None):
"""
:param metadata_file_path: full path to metadata file to be generated
:type metadata_file_path: str
:param checksum_type: checksum type to be used to generate and prepend checksum
to the file names of files. If checksum_type is None,
no checksum is added to the filename
:type checksum_type: str or None
"""
self.metadata_file_path = metadata_file_path
self.metadata_file_handle = None
self.checksum_type = checksum_type
self.checksum = None
if self.checksum_type is not None:
checksum_function = CHECKSUM_FUNCTIONS.get(checksum_type)
if not checksum_function:
raise PulpCodedValidationException(
[PulpCodedException(error_codes.PLP1005, checksum_type=checksum_type)])
self.checksum_constructor = checksum_function
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if None not in (exc_type, exc_val, exc_tb):
err_msg = '\n'.join(traceback.format_exception(exc_type, exc_val, exc_tb))
log_msg = _('Exception occurred while writing [%(m)s]\n%(e)s')
# any errors here should have already been caught and logged
_LOG.debug(log_msg % {'m': self.metadata_file_path, 'e': err_msg})
self.finalize()
return True
def initialize(self):
"""
Create the new metadata file and write the header.
"""
if self.metadata_file_handle is not None:
# initialize has already, at least partially, been run
return
self._open_metadata_file_handle()
self._write_file_header()
def finalize(self):
"""
Write the footer into the metadata file and close it.
"""
if self._is_closed(self.metadata_file_handle):
# finalize has already been run or initialize has not been run
return
try:
self._write_file_footer()
except Exception, e:
_LOG.exception(e)
try:
self._close_metadata_file_handle()
except Exception, e:
_LOG.exception(e)
# Add calculated checksum to the filename
file_name = os.path.basename(self.metadata_file_path)
if self.checksum_type is not None:
with open(self.metadata_file_path, 'rb') as file_handle:
content = file_handle.read()
checksum = self.checksum_constructor(content).hexdigest()
self.checksum = checksum
file_name_with_checksum = checksum + '-' + file_name
new_file_path = os.path.join(os.path.dirname(self.metadata_file_path),
file_name_with_checksum)
os.rename(self.metadata_file_path, new_file_path)
self.metadata_file_path = new_file_path
# Set the metadata_file_handle to None so we don't double call finalize
self.metadata_file_handle = None
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will overwrite it.
"""
assert self.metadata_file_handle is None
_LOG.debug('Opening metadata file: %s' % self.metadata_file_path)
if not os.path.exists(self.metadata_file_path):
parent_dir = os.path.dirname(self.metadata_file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir, mode=0770)
elif not os.access(parent_dir, os.R_OK | os.W_OK | os.X_OK):
msg = _('Insufficient permissions to write metadata file in directory [%(d)s]')
raise RuntimeError(msg % {'d': parent_dir})
else:
msg = _('Overwriting existing metadata file [%(p)s]')
_LOG.warn(msg % {'p': self.metadata_file_path})
if not os.access(self.metadata_file_path, os.R_OK | os.W_OK):
msg = _('Insufficient permissions to overwrite [%(p)s]')
raise RuntimeError(msg % {'p': self.metadata_file_path})
msg = _('Opening metadata file handle for [%(p)s]')
_LOG.debug(msg % {'p': self.metadata_file_path})
if self.metadata_file_path.endswith('.gz'):
self.metadata_file_handle = gzip.open(self.metadata_file_path, 'w')
else:
self.metadata_file_handle = open(self.metadata_file_path, 'w')
def _write_file_header(self):
"""
Write any headers for the metadata file
"""
pass
def _write_file_footer(self):
"""
Write any file footers for the metadata file.
"""
pass
def _close_metadata_file_handle(self):
"""
Flush any cached writes to the metadata file handle and close it.
"""
_LOG.debug('Closing metadata file: %s' % self.metadata_file_path)
if not self._is_closed(self.metadata_file_handle):
self.metadata_file_handle.flush()
self.metadata_file_handle.close()
@staticmethod
def _is_closed(file_object):
"""
Determine if the file object has been closed. If it is None, it is assumed to be closed.
:param file_object: a file object
:type file_object: file
:return: True if the file object is closed or is None, otherwise False
:rtype: bool
"""
if file_object is None:
# finalize has already been run or initialize has not been run
return True
try:
return file_object.closed
except AttributeError:
# python 2.6 doesn't have a "closed" attribute on a GzipFile,
# so we must look deeper.
if isinstance(file_object, gzip.GzipFile):
return file_object.myfileobj is None or file_object.myfileobj.closed
else:
raise
class JSONArrayFileContext(MetadataFileContext):
"""
Context manager for writing out units as a json array.
"""
def __init__(self, *args, **kwargs):
"""
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(JSONArrayFileContext, self).__init__(*args, **kwargs)
self.units_added = False
def _write_file_header(self):
"""
Write out the beginning of the json file
"""
self.metadata_file_handle.write('[')
def _write_file_footer(self):
"""
Write out the end of the json file
"""
self.metadata_file_handle.write(']')
def add_unit_metadata(self, unit):
"""
Add the specific metadata for this unit
"""
if self.units_added:
self.metadata_file_handle.write(',')
else:
self.units_added = True
class XmlFileContext(MetadataFileContext):
"""
Context manager for writing out units as xml
"""
def __init__(self, metadata_file_path, root_tag, root_attributes=None, *args, **kwargs):
"""
:param metadata_file_path: The file path for the file to write
:type metadata_file_path: str
:param root_tag: The root tag for the xml tree
:type root_tag: str
:param root_attributes: Any attributes to populate on the root xml tag
:type root_attributes: dict of str
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(XmlFileContext, self).__init__(metadata_file_path, *args, **kwargs)
self.root_tag = root_tag
if not root_attributes:
root_attributes = {}
self.root_attributes = root_attributes
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will overwrite it.
"""
super(XmlFileContext, self)._open_metadata_file_handle()
self.xml_generator = XMLGenerator(self.metadata_file_handle, 'UTF-8')
def _write_file_header(self):
"""
Write out the beginning of the json file
"""
self.xml_generator.startDocument()
self.xml_generator.startElement(self.root_tag, self.root_attributes)
def _write_file_footer(self):
"""
Write out the end of the json file
"""
self.xml_generator.endElement(self.root_tag)
self.xml_generator.endDocument()
class FastForwardXmlFileContext(XmlFileContext):
"""
Context manager for reopening an existing XML file context to insert more data.
"""
def __init__(self, metadata_file_path, root_tag, search_tag, root_attributes=None,
*args, **kwargs):
"""
:param metadata_file_path: The file path for the file to write
:type metadata_file_path: str
:param root_tag: The root tag for the xml tree
:type root_tag: str
:param search_tag: The tag that denotes the beginning of content to copy
:param root_attributes: Any attributes to populate on the root xml tag
:type root_attributes: dict of str, str
:param args: any positional arguments to be passed to the superclass
:type args: list
:param kwargs: any keyword arguments to be passed to the superclass
:type kwargs: dict
"""
super(FastForwardXmlFileContext, self).__init__(metadata_file_path, root_tag,
root_attributes, *args, **kwargs)
self.fast_forward = False
self.search_tag = search_tag
self.existing_file = None
self.xml_generator = None
def _open_metadata_file_handle(self):
"""
Open the metadata file handle, creating any missing parent directories.
If the file already exists, this will copy it to a new name and open it as an input
for filtering/modification.
"""
# Figure out if we are fast forwarding a file
# find the primary file
working_dir, file_name = os.path.split(self.metadata_file_path)
if self.checksum_type:
# Look for a file matching the checksum-filename pattern
expression = '[0-9a-zA-Z]*-%s' % file_name
expression = os.path.join(working_dir, expression)
file_list = glob.glob(expression)
if file_list:
# We only want to work on the latest one
stat_files = ((os.stat(path).st_mtime, path) for path in file_list)
sorted_files = sorted(stat_files, reverse=True)
working_dir, existing_file_name = os.path.split(sorted_files[0][1])
self.existing_file = existing_file_name
self.fast_forward = True
elif not self.checksum_type and os.path.exists(self.metadata_file_path):
self.existing_file = file_name
self.fast_forward = True
if self.fast_forward:
# move the file so that we can still process it if the name is the same
if self.existing_file:
new_file_name = 'original.%s' % self.existing_file
shutil.move(os.path.join(working_dir, self.existing_file),
os.path.join(working_dir, new_file_name))
self.existing_file = new_file_name
self.existing_file = os.path.join(working_dir, self.existing_file)
# Open the file, unzip if necessary so that seek operations can be performed
self.original_file_handle = None
if self.existing_file.endswith('.gz'):
non_compressed_file = self.existing_file[:self.existing_file.rfind('.gz')]
with open(os.path.join(working_dir, non_compressed_file), 'wb') as plain_handle:
gzip_handle = gzip.open(os.path.join(working_dir, self.existing_file), 'rb')
try:
content = gzip_handle.read(BUFFER_SIZE)
while content:
plain_handle.write(content)
content = gzip_handle.read(BUFFER_SIZE)
finally:
if gzip_handle:
gzip_handle.close()
# clean up the zipped file
os.unlink(self.existing_file)
self.existing_file = non_compressed_file
self.original_file_handle = open(os.path.join(working_dir, self.existing_file), 'r')
super(FastForwardXmlFileContext, self)._open_metadata_file_handle()
def _write_file_header(self):
"""
Write out the beginning of the file only if we are not in fast forward mode
"""
super(FastForwardXmlFileContext, self)._write_file_header()
if self.fast_forward:
start_tag = '<%s' % self.search_tag
end_tag = '</%s' % self.root_tag
# Find the start offset
content = ''
index = -1
while index < 0:
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
if not content_buffer:
# The search tag was never found, This is an empty file where no FF is necessary
msg = _('When attempting to fast forward the file %(file)s, the search tag '
'%(tag)s was not found so the assumption is that no fast forward is to '
'take place.')
_LOG.debug(msg, {'file': self.metadata_file_path, 'tag': start_tag})
return
content += content_buffer
index = content.find(start_tag)
start_offset = index
# Find the end offset
content = ''
index = -1
self.original_file_handle.seek(0, os.SEEK_END)
while index < 0:
amount_to_read = min(BUFFER_SIZE, self.original_file_handle.tell())
self.original_file_handle.seek(-amount_to_read, os.SEEK_CUR)
content_buffer = self.original_file_handle.read(amount_to_read)
if not content_buffer:
raise Exception(_('Error: %(tag)s not found in the xml file.')
% {'tag': end_tag})
bytes_read = len(content_buffer)
self.original_file_handle.seek(-bytes_read, os.SEEK_CUR)
content = content_buffer + content
index = content.rfind(end_tag)
end_offset = self.original_file_handle.tell() + index
# stream out the content
self.original_file_handle.seek(start_offset)
bytes_to_read = end_offset - start_offset
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
while bytes_to_read > 0:
buffer_size = len(content_buffer)
if buffer_size > bytes_to_read:
content_buffer = content_buffer[:bytes_to_read]
self.metadata_file_handle.write(content_buffer)
bytes_to_read -= buffer_size
content_buffer = self.original_file_handle.read(BUFFER_SIZE)
def _close_metadata_file_handle(self):
"""
Close any open file handles and remove the original file if a new one
was generated
"""
super(FastForwardXmlFileContext, self)._close_metadata_file_handle()
# Close & remove the existing file that was copied
if self.fast_forward:
if not self._is_closed(self.original_file_handle):
self.original_file_handle.close()
# We will always have renamed the original file so remove it
os.unlink(self.existing_file)
| credativ/pulp | server/pulp/plugins/util/metadata_writer.py | Python | gpl-2.0 | 16,777 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from spikeval.module import ModMetricFranke, ModuleExecutionError
from .models.result import ResultFranke
__author__ = "pmeier82"
__all__ = ["ModuleFranke"]
def toint(val):
# if type(val) == type(""):
res = int(float(val))
return res
class ModuleFranke(ModMetricFranke):
"""spikeval module for the franke metric"""
# RESULT_TYPES
# MRTable, # res_table - this is what we will save!
# MRTable, # similarity_matrix
# MRTable, # shift_matrix
# MRTable, # sp.atleast_2d(delta_shift)
# MRDict, # alignment
# MRDict, # O
# MRTable, # spike_no_assignment_matrix
# MRDict, # EL
# MRDict, # GL
# MRTable, # sp.atleast_2d(TP)
# MRTable, # sp.atleast_2d(TPO)
# MRTable, # sp.atleast_2d(FPA)
# MRTable, # sp.atleast_2d(FPAO)
# MRTable, # sp.atleast_2d(FN)
# MRTable, # sp.atleast_2d(FNO)
# MRTable, # sp.atleast_2d(FP)
# MRTable, # sp.atleast_2d(u_k2f)
# MRTable, # sp.atleast_2d(u_f2k)
def save(self, mod, ana):
"""save django result entities"""
# check for results
if self._stage != 3:
raise ModuleExecutionError("save initiated when module was not finalised!")
# result saving
for row in self.result[0].value:
res_entity = ResultFranke(analysis=ana, module=mod)
res_entity.unit_gt = row[0]
res_entity.unit_an = row[1]
res_entity.KS = toint(row[2])
res_entity.KSO = toint(row[3])
res_entity.FS = toint(row[4])
res_entity.TP = toint(row[5])
res_entity.TPO = toint(row[6])
res_entity.FPA = toint(row[7])
res_entity.FPAE = toint(row[8])
res_entity.FPAO = toint(row[9])
res_entity.FPAOE = toint(row[10])
res_entity.FN = toint(row[11])
res_entity.FNO = toint(row[12])
res_entity.FP = toint(row[13])
res_entity.save()
if __name__ == "__main__":
pass
| pmeier82/django-spikeval-franke | djspikeval_franke/module.py | Python | bsd-3-clause | 2,076 |
# coding=utf-8
"""
This is example to attach file with mimetype
"""
import logging
# https://pypi.org/project/python-magic/
import magic
from atlassian import Confluence
logging.basicConfig(level=logging.DEBUG)
confluence = Confluence(
url="http://localhost:8090",
username="admin",
password="admin",
)
def attach_file(page_title, file_location, file_name, mime_type, space):
page_id = confluence.get_page_by_title(space=space, title=page_title).get("id") or None
if page_id is None:
return 1
try:
confluence.attach_file(
filename=file_location, name=file_name, content_type=mime_type, page_id=page_id, space=space
)
except Exception:
return 1
return 0
mime_type = magic.Magic(mime=True)
file_location_with_page = "~/test/test_file.pdf"
file_name = "So excited overview of report.pdf"
title = "The page with attachments"
space = "TST"
content_type = magic.from_file(file_name, mime=True)
attach_file(
file_location=file_location_with_page, file_name=file_name, mime_type=content_type, page_title=title, space=space
)
| MattAgile/atlassian-python-api | examples/confluence/confluence_attach_file.py | Python | apache-2.0 | 1,107 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
import six
from sentry.models import (
Commit,
GroupLink,
GroupResolution,
ReleaseCommit,
Repository,
)
from sentry.testutils import APITestCase
class IssuesResolvedInReleaseEndpointTest(APITestCase):
def setUp(self):
super(IssuesResolvedInReleaseEndpointTest, self).setUp()
self.user = self.create_user()
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=self.user, teams=[self.team])
self.project = self.create_project(
team=self.team,
)
self.release = self.create_release(
project=self.project,
)
self.group = self.create_group(project=self.project)
self.login_as(self.user)
self.path = reverse(
'sentry-api-0-release-resolved',
kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
'version': self.release.version,
}
)
def test_shows_issues_from_groupresolution(self):
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupResolution model
"""
GroupResolution.objects.create(
group=self.group,
release=self.release,
type=GroupResolution.Type.in_release,
)
response = self.client.get(self.path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(self.group.id)
def test_shows_issues_from_grouplink(self):
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink model
"""
repo = Repository.objects.create(
organization_id=self.org.id,
name=self.project.name,
)
commit = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='a' * 40,
)
commit2 = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='b' * 40,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit,
order=1,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit2,
order=0,
)
GroupLink.objects.create(
group_id=self.group.id,
project_id=self.group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
response = self.client.get(self.path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(self.group.id)
def test_does_not_return_duplicate_groups(self):
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink and GroupResolution model
but will not return the groups twice if they appear in both
"""
repo = Repository.objects.create(
organization_id=self.org.id,
name=self.project.name,
)
commit = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='a' * 40,
)
commit2 = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='b' * 40,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit,
order=1,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit2,
order=0,
)
GroupLink.objects.create(
group_id=self.group.id,
project_id=self.group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
GroupResolution.objects.create(
group=self.group,
release=self.release,
type=GroupResolution.Type.in_release,
)
response = self.client.get(self.path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(self.group.id)
def test_return_groups_from_both_types(self):
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from both the GroupLink and GroupResolution model
"""
group2 = self.create_group(project=self.project)
repo = Repository.objects.create(
organization_id=self.org.id,
name=self.project.name,
)
commit = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='a' * 40,
)
commit2 = Commit.objects.create(
organization_id=self.org.id,
repository_id=repo.id,
key='b' * 40,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit,
order=1,
)
ReleaseCommit.objects.create(
organization_id=self.org.id,
release=self.release,
commit=commit2,
order=0,
)
GroupLink.objects.create(
group_id=self.group.id,
project_id=self.group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
GroupResolution.objects.create(
group=group2,
release=self.release,
type=GroupResolution.Type.in_release,
)
response = self.client.get(self.path)
assert response.status_code == 200, response.content
assert len(response.data) == 2
| gencer/sentry | tests/sentry/api/endpoints/test_issues_resolved_in_release.py | Python | bsd-3-clause | 6,561 |
from django.contrib import admin
from .models import Applications
class ApplicationsModelAdmin(admin.ModelAdmin):
list_display = ['name', 'tags', 'url_name', 'visible']
list_editable = ['url_name']
admin.site.register(Applications, ApplicationsModelAdmin)
| JacekKarnasiewicz/HomePage | apps/search_app/admin.py | Python | mit | 261 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from logging.config import dictConfig
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.configuration import conf
from airflow.dag_processing.manager import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagBag, DagModel, errors
from airflow.models.dagcode import DagCode
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _create_process(file_path, callback_requests, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callback_requests,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager:
def setup_method(self):
dictConfig(DEFAULT_LOGGING_CONFIG)
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def teardown_class(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_remove_file_clears_import_error(self, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines('an invalid airflow DAG')
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
with create_session() as session:
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
filename_to_parse.remove()
# Rerun the scheduler once the dag file has been removed
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
child_pipe.close()
parent_pipe.close()
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_excludes_missing_file(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Check that a file is not enqueued for processing if it has been deleted"""
dag_files = ["file_3.py", "file_2.py", "file_4.py"]
mock_getmtime.side_effect = [1.0, 2.0, FileNotFoundError()]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_2.py', 'file_3.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_files = ["file_1.py"]
mock_getmtime.side_effect = [initial_file_1_mtime]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=3,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
# let's say the DAG was just parsed 2 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
"file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1),
}
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_path_queue()
assert manager._file_path_queue == []
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_path_queue()
# Check that file is added to the queue even though file was just recently passed
assert manager._file_path_queue == ["file_1.py"]
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds()
)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_pipe_full_deadlock(self, mock_processor):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
@conf_vars({('core', 'load_examples'): 'False'})
@mock.patch('airflow.dag_processing.manager.Stats.timing')
def test_send_file_processing_statsd_timing(self, statsd_timing_mock, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
dag_code = dedent(
"""
from airflow import DAG
dag = DAG(dag_id='temp_dag', schedule_interval='0 0 * * *')
"""
)
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(dag_code)
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
last_runtime = manager.get_last_runtime(manager.file_paths[0])
child_pipe.close()
parent_pipe.close()
statsd_timing_mock.assert_called_with('dag_processing.last_duration.temp_dag', last_runtime)
def test_refresh_dags_dir_doesnt_delete_zipped_dags(self, tmpdir):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
manager = DagFileProcessorManager(
dag_directory=TEST_DAG_FOLDER,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(dag_folder=tmpdir, include_examples=False)
zipped_dag_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag.process_file(zipped_dag_path)
dag = dagbag.get_dag("test_zip_dag")
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
manager.last_dag_dir_refresh_time = timezone.utcnow() - timedelta(minutes=10)
manager._refresh_dag_dir()
# Assert dag not deleted in SDM
assert SerializedDagModel.has_dag('test_zip_dag')
# assert code not deleted
assert DagCode.has_dag(dag.fileloc)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
| bolkedebruin/airflow | tests/dag_processing/test_manager.py | Python | apache-2.0 | 29,613 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "cryptography"
__summary__ = ("cryptography is a package which provides cryptographic recipes"
" and primitives to Python developers.")
__uri__ = "https://github.com/pyca/cryptography"
__version__ = "1.1.dev1"
__author__ = "The cryptography developers"
__email__ = "cryptography-dev@python.org"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2013-2015 {0}".format(__author__)
| kimvais/cryptography | src/cryptography/__about__.py | Python | bsd-3-clause | 820 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/utils/gc.py | Python | bsd-2-clause | 5,986 |
# -*- coding: utf-8 -*-
"""
Display NVIDIA properties currently exhibiting in the NVIDIA GPUs.
nvidia-smi, short for NVIDIA System Management Interface program, is a cross
platform tool that supports all standard NVIDIA driver-supported Linux distros.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
format: display format for this module (default '{format_gpu}')
format_gpu: display format for NVIDIA GPUs
*(default '{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] '
'[\?color=memory.used_percent {memory.used_percent}%]')*
format_gpu_separator: show separator if more than one (default ' ')
memory_unit: specify memory unit, eg 'KiB', 'MiB', 'GiB', otherwise auto
(default None)
thresholds: specify color thresholds to use
(default [(0, 'good'), (65, 'degraded'), (75, 'orange'), (85, 'bad')])
Format placeholders:
{format_gpu} format for NVIDIA GPUs
format_gpu placeholders:
{index} Zero based index of the GPU.
{count} The number of NVIDIA GPUs in the system
{driver_version} The version of the installed NVIDIA display driver
{gpu_name} The official product name of the GPU
{gpu_uuid} Globally unique immutable identifier of the GPU
{memory.free} Total free memory
{memory.free_unit} Total free memory unit
{memory.total} Total installed GPU memory
{memory.total_unit} Total installed GPU memory unit
{memory.used} Total memory allocated by active contexts
{memory.used_percent} Total memory allocated by active contexts percentage
{memory.used_unit} Total memory unit
{temperature.gpu} Core GPU temperature in degrees C
Use `python /path/to/nvidia_smi.py --list-properties` for a full list of
supported NVIDIA properties to use. Not all of supported NVIDIA properties
will be usable. See `nvidia-smi --help-query-gpu` for more information.
Color thresholds:
format_gpu:
`xxx`: print a color based on the value of NVIDIA `xxx` property
Requires:
nvidia-smi: command line interface to query NVIDIA devices
Examples:
```
# display nvidia properties
nvidia_smi {
format_gpu = '{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] '
format_gpu += '[\?color=memory.used_percent {memory.used} {memory.used_unit}'
format_gpu += '[\?color=darkgray&show \|]{memory.used_percent:.1f}%]'
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'Quadro NVS 295 '},
{'color': '#00ff00', 'full_text': '51°C '},
{'color': '#00ff00', 'full_text': '60.8%'},
]
percent
[
{'full_text': 'GPU '},
{'full_text': '73°C ', 'color': '#ffff00'},
{'full_text': '192 MiB', 'color': '#ffa500'},
{'full_text': '|', 'color': '#a9a9a9'},
{'full_text': '75.3%', 'color': '#ffa500'}
]
"""
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
format = "{format_gpu}"
format_gpu = (
u"{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] "
"[\?color=memory.used_percent {memory.used_percent}%]"
)
format_gpu_separator = " "
memory_unit = None
thresholds = [(0, "good"), (65, "degraded"), (75, "orange"), (85, "bad")]
def post_config_hook(self):
command = "nvidia-smi --format=csv,noheader,nounits --query-gpu="
if not self.py3.check_commands(command.split()[0]):
raise Exception(STRING_NOT_INSTALLED)
properties = self.py3.get_placeholders_list(self.format_gpu)
format_gpu = {x: ":.1f" for x in properties if "used_percent" in x}
self.format_gpu = self.py3.update_placeholder_formats(
self.format_gpu, format_gpu
)
new_memory_properties = set()
new_properties = set(["memory.used", "memory.total"])
for name in properties:
if "used_percent" in name:
continue
if name.startswith("memory"):
if name.endswith("_unit"):
name = name[:-5]
new_memory_properties.add(name)
new_properties.add(name)
self.properties = list(new_properties)
self.memory_properties = list(new_memory_properties)
self.memory_unit = self.memory_unit or "B"
self.nvidia_command = command + ",".join(self.properties)
self.thresholds_init = self.py3.get_color_names_list(self.format_gpu)
def _get_nvidia_data(self):
return self.py3.command_output(self.nvidia_command)
def nvidia_smi(self):
nvidia_data = self._get_nvidia_data()
new_gpu = []
for line in nvidia_data.splitlines():
gpu = dict(zip(self.properties, line.split(", ")))
gpu["memory.used_percent"] = (
float(gpu["memory.used"]) / float(gpu["memory.total"]) * 100.0
)
for key in self.memory_properties:
value, unit_key = float(gpu[key]) * 1024 ** 2, key + "_unit"
value, unit_value = self.py3.format_units(value, self.memory_unit)
gpu.update({key: value, unit_key: unit_value})
for x in self.thresholds_init:
if x in gpu:
self.py3.threshold_get_color(gpu[x], x)
new_gpu.append(self.py3.safe_format(self.format_gpu, gpu))
format_gpu_separator = self.py3.safe_format(self.format_gpu_separator)
format_gpu = self.py3.composite_join(format_gpu_separator, new_gpu)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"format_gpu": format_gpu}),
}
if __name__ == "__main__":
from sys import argv
if "--list-properties" in argv:
from sys import exit
from json import dumps
from subprocess import check_output
help_cmd = "nvidia-smi --help-query-gpu"
help_data = check_output(help_cmd.split()).decode()
new_properties = []
e = ["Default", "Exclusive_Thread", "Exclusive_Process", "Prohibited"]
for line in help_data.splitlines():
if line.startswith('"'):
properties = line.split('"')[1::2]
for name in properties:
if name not in e:
new_properties.append(name)
properties = ",".join(new_properties)
gpu_cmd = "nvidia-smi --format=csv,noheader,nounits --query-gpu="
gpu_data = check_output((gpu_cmd + properties).split()).decode()
new_gpus = []
msg = "This GPU contains {} supported properties."
for line in gpu_data.splitlines():
gpu = dict(zip(new_properties, line.split(", ")))
gpu = {k: v for k, v in gpu.items() if "[Not Supported]" not in v}
gpu["= " + msg.format(len(gpu))] = ""
gpu["=" * (len(msg) + 2)] = ""
new_gpus.append(gpu)
print(dumps(new_gpus, sort_keys=True, indent=4))
exit()
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| Andrwe/py3status | py3status/modules/nvidia_smi.py | Python | bsd-3-clause | 7,243 |
from mycli.packages.completion_engine import suggest_type
import pytest
def sorted_dicts(dicts):
"""input is a list of dicts"""
return sorted(tuple(x.items()) for x in dicts)
def test_select_suggests_cols_with_visible_table_scope():
suggestions = suggest_type('SELECT FROM tabl', 'SELECT ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_select_suggests_cols_with_qualified_table_scope():
suggestions = suggest_type('SELECT FROM sch.tabl', 'SELECT ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [('sch', 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
@pytest.mark.parametrize('expression', [
'SELECT * FROM tabl WHERE ',
'SELECT * FROM tabl WHERE (',
'SELECT * FROM tabl WHERE foo = ',
'SELECT * FROM tabl WHERE bar OR ',
'SELECT * FROM tabl WHERE foo = 1 AND ',
'SELECT * FROM tabl WHERE (bar > 10 AND ',
'SELECT * FROM tabl WHERE (bar AND (baz OR (qux AND (',
'SELECT * FROM tabl WHERE 10 < ',
'SELECT * FROM tabl WHERE foo BETWEEN ',
'SELECT * FROM tabl WHERE foo BETWEEN foo AND ',
])
def test_where_suggests_columns_functions(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
@pytest.mark.parametrize('expression', [
'SELECT * FROM tabl WHERE foo IN (',
'SELECT * FROM tabl WHERE foo IN (bar, ',
])
def test_where_in_suggests_columns(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_where_equals_any_suggests_columns_or_keywords():
text = 'SELECT * FROM tabl WHERE foo = ANY('
suggestions = suggest_type(text, text)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'}])
def test_lparen_suggests_cols():
suggestion = suggest_type('SELECT MAX( FROM tbl', 'SELECT MAX(')
assert suggestion == [
{'type': 'column', 'tables': [(None, 'tbl', None)]}]
def test_select_suggests_cols_and_funcs():
suggestions = suggest_type('SELECT ', 'SELECT ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': []},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
@pytest.mark.parametrize('expression', [
'SELECT * FROM ',
'INSERT INTO ',
'COPY ',
'UPDATE ',
'DESCRIBE ',
'DESC ',
'EXPLAIN ',
'SELECT * FROM foo JOIN ',
])
def test_expression_suggests_tables_views_and_schemas(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
@pytest.mark.parametrize('expression', [
'SELECT * FROM sch.',
'INSERT INTO sch.',
'COPY sch.',
'UPDATE sch.',
'DESCRIBE sch.',
'DESC sch.',
'EXPLAIN sch.',
'SELECT * FROM foo JOIN sch.',
])
def test_expression_suggests_qualified_tables_views_and_schemas(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': 'sch'},
{'type': 'view', 'schema': 'sch'}])
def test_truncate_suggests_tables_and_schemas():
suggestions = suggest_type('TRUNCATE ', 'TRUNCATE ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'schema'}])
def test_truncate_suggests_qualified_tables():
suggestions = suggest_type('TRUNCATE sch.', 'TRUNCATE sch.')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': 'sch'}])
def test_distinct_suggests_cols():
suggestions = suggest_type('SELECT DISTINCT ', 'SELECT DISTINCT ')
assert suggestions == [{'type': 'column', 'tables': []}]
def test_col_comma_suggests_cols():
suggestions = suggest_type('SELECT a, b, FROM tbl', 'SELECT a, b,')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tbl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_table_comma_suggests_tables_and_schemas():
suggestions = suggest_type('SELECT a, b FROM tbl1, ',
'SELECT a, b FROM tbl1, ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
def test_into_suggests_tables_and_schemas():
suggestion = suggest_type('INSERT INTO ', 'INSERT INTO ')
assert sorted_dicts(suggestion) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
def test_insert_into_lparen_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (', 'INSERT INTO abc (')
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_insert_into_lparen_partial_text_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (i', 'INSERT INTO abc (i')
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_insert_into_lparen_comma_suggests_cols():
suggestions = suggest_type('INSERT INTO abc (id,', 'INSERT INTO abc (id,')
assert suggestions == [{'type': 'column', 'tables': [(None, 'abc', None)]}]
def test_partially_typed_col_name_suggests_col_names():
suggestions = suggest_type('SELECT * FROM tabl WHERE col_n',
'SELECT * FROM tabl WHERE col_n')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_dot_suggests_cols_of_a_table_or_schema_qualified_table():
suggestions = suggest_type('SELECT tabl. FROM tabl', 'SELECT tabl.')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', None)]},
{'type': 'table', 'schema': 'tabl'},
{'type': 'view', 'schema': 'tabl'},
{'type': 'function', 'schema': 'tabl'}])
def test_dot_suggests_cols_of_an_alias():
suggestions = suggest_type('SELECT t1. FROM tabl1 t1, tabl2 t2',
'SELECT t1.')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': 't1'},
{'type': 'view', 'schema': 't1'},
{'type': 'column', 'tables': [(None, 'tabl1', 't1')]},
{'type': 'function', 'schema': 't1'}])
def test_dot_col_comma_suggests_cols_or_schema_qualified_table():
suggestions = suggest_type('SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2',
'SELECT t1.a, t2.')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl2', 't2')]},
{'type': 'table', 'schema': 't2'},
{'type': 'view', 'schema': 't2'},
{'type': 'function', 'schema': 't2'}])
@pytest.mark.parametrize('expression', [
'SELECT * FROM (',
'SELECT * FROM foo WHERE EXISTS (',
'SELECT * FROM foo WHERE bar AND NOT EXISTS (',
])
def test_sub_select_suggests_keyword(expression):
suggestion = suggest_type(expression, expression)
assert suggestion == [{'type': 'keyword'}]
@pytest.mark.parametrize('expression', [
'SELECT * FROM (S',
'SELECT * FROM foo WHERE EXISTS (S',
'SELECT * FROM foo WHERE bar AND NOT EXISTS (S',
])
def test_sub_select_partial_text_suggests_keyword(expression):
suggestion = suggest_type(expression, expression)
assert suggestion == [{'type': 'keyword'}]
def test_outer_table_reference_in_exists_subquery_suggests_columns():
q = 'SELECT * FROM foo f WHERE EXISTS (SELECT 1 FROM bar WHERE f.'
suggestions = suggest_type(q, q)
assert suggestions == [
{'type': 'column', 'tables': [(None, 'foo', 'f')]},
{'type': 'table', 'schema': 'f'},
{'type': 'view', 'schema': 'f'},
{'type': 'function', 'schema': 'f'}]
@pytest.mark.parametrize('expression', [
'SELECT * FROM (SELECT * FROM ',
'SELECT * FROM foo WHERE EXISTS (SELECT * FROM ',
'SELECT * FROM foo WHERE bar AND NOT EXISTS (SELECT * FROM ',
])
def test_sub_select_table_name_completion(expression):
suggestion = suggest_type(expression, expression)
assert sorted_dicts(suggestion) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
def test_sub_select_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT FROM abc',
'SELECT * FROM (SELECT ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'abc', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
@pytest.mark.xfail
def test_sub_select_multiple_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT a, FROM abc',
'SELECT * FROM (SELECT a, ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'abc', None)]},
{'type': 'function', 'schema': []}])
def test_sub_select_dot_col_name_completion():
suggestions = suggest_type('SELECT * FROM (SELECT t. FROM tabl t',
'SELECT * FROM (SELECT t.')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'tabl', 't')]},
{'type': 'table', 'schema': 't'},
{'type': 'view', 'schema': 't'},
{'type': 'function', 'schema': 't'}])
@pytest.mark.parametrize('join_type', ['', 'INNER', 'LEFT', 'RIGHT OUTER'])
@pytest.mark.parametrize('tbl_alias', ['', 'foo'])
def test_join_suggests_tables_and_schemas(tbl_alias, join_type):
text = 'SELECT * FROM abc {0} {1} JOIN '.format(tbl_alias, join_type)
suggestion = suggest_type(text, text)
assert sorted_dicts(suggestion) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
@pytest.mark.parametrize('sql', [
'SELECT * FROM abc a JOIN def d ON a.',
'SELECT * FROM abc a JOIN def d ON a.id = d.id AND a.',
])
def test_join_alias_dot_suggests_cols1(sql):
suggestions = suggest_type(sql, sql)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'abc', 'a')]},
{'type': 'table', 'schema': 'a'},
{'type': 'view', 'schema': 'a'},
{'type': 'function', 'schema': 'a'}])
@pytest.mark.parametrize('sql', [
'SELECT * FROM abc a JOIN def d ON a.id = d.',
'SELECT * FROM abc a JOIN def d ON a.id = d.id AND a.id2 = d.',
])
def test_join_alias_dot_suggests_cols2(sql):
suggestions = suggest_type(sql, sql)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'def', 'd')]},
{'type': 'table', 'schema': 'd'},
{'type': 'view', 'schema': 'd'},
{'type': 'function', 'schema': 'd'}])
@pytest.mark.parametrize('sql', [
'select a.x, b.y from abc a join bcd b on ',
'select a.x, b.y from abc a join bcd b on a.id = b.id OR ',
])
def test_on_suggests_aliases(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{'type': 'alias', 'aliases': ['a', 'b']}]
@pytest.mark.parametrize('sql', [
'select abc.x, bcd.y from abc join bcd on ',
'select abc.x, bcd.y from abc join bcd on abc.id = bcd.id AND ',
])
def test_on_suggests_tables(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{'type': 'alias', 'aliases': ['abc', 'bcd']}]
@pytest.mark.parametrize('sql', [
'select a.x, b.y from abc a join bcd b on a.id = ',
'select a.x, b.y from abc a join bcd b on a.id = b.id AND a.id2 = ',
])
def test_on_suggests_aliases_right_side(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{'type': 'alias', 'aliases': ['a', 'b']}]
@pytest.mark.parametrize('sql', [
'select abc.x, bcd.y from abc join bcd on ',
'select abc.x, bcd.y from abc join bcd on abc.id = bcd.id and ',
])
def test_on_suggests_tables_right_side(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{'type': 'alias', 'aliases': ['abc', 'bcd']}]
@pytest.mark.parametrize('col_list', ['', 'col1, '])
def test_join_using_suggests_common_columns(col_list):
text = 'select * from abc inner join def using (' + col_list
assert suggest_type(text, text) == [
{'type': 'column',
'tables': [(None, 'abc', None), (None, 'def', None)],
'drop_unique': True}]
def test_2_statements_2nd_current():
suggestions = suggest_type('select * from a; select * from ',
'select * from a; select * from ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
suggestions = suggest_type('select * from a; select from b',
'select * from a; select ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'b', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
# Should work even if first statement is invalid
suggestions = suggest_type('select * from; select * from ',
'select * from; select * from ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
def test_2_statements_1st_current():
suggestions = suggest_type('select * from ; select * from b',
'select * from ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
suggestions = suggest_type('select from a; select * from b',
'select ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'a', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_3_statements_2nd_current():
suggestions = suggest_type('select * from a; select * from ; select * from c',
'select * from a; select * from ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
suggestions = suggest_type('select * from a; select from b; select * from c',
'select * from a; select ')
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'column', 'tables': [(None, 'b', None)]},
{'type': 'function', 'schema': []},
{'type': 'keyword'},
])
def test_create_db_with_template():
suggestions = suggest_type('create database foo with template ',
'create database foo with template ')
assert sorted_dicts(suggestions) == sorted_dicts([{'type': 'database'}])
@pytest.mark.parametrize('initial_text', ['', ' ', '\t \t'])
def test_specials_included_for_initial_completion(initial_text):
suggestions = suggest_type(initial_text, initial_text)
assert sorted_dicts(suggestions) == \
sorted_dicts([{'type': 'keyword'}, {'type': 'special'}])
def test_specials_not_included_after_initial_token():
suggestions = suggest_type('create table foo (dt d',
'create table foo (dt d')
assert sorted_dicts(suggestions) == sorted_dicts([{'type': 'keyword'}])
def test_drop_schema_qualified_table_suggests_only_tables():
text = 'DROP TABLE schema_name.table_name'
suggestions = suggest_type(text, text)
assert suggestions == [{'type': 'table', 'schema': 'schema_name'}]
@pytest.mark.parametrize('text', [',', ' ,', 'sel ,'])
def test_handle_pre_completion_comma_gracefully(text):
suggestions = suggest_type(text, text)
assert iter(suggestions)
def test_cross_join():
text = 'select * from v1 cross join v2 JOIN v1.id, '
suggestions = suggest_type(text, text)
assert sorted_dicts(suggestions) == sorted_dicts([
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []},
{'type': 'schema'}])
| mdsrosa/mycli | tests/test_completion_engine.py | Python | bsd-3-clause | 17,138 |
mes = int (raw_input())
if mes == 1:
print("enero")
elif mes ==2:
print("febrero")
elif mes == 3:
print("marzo")
else:
print("otro mes") | garciparedes/python-examples | miscellaneous/hello_worlds/control1.py | Python | mpl-2.0 | 143 |
#!/router/bin/python
import outer_packages
#from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, CStreamsPerPort
from trex_stl_lib.trex_stl_hltapi import *
import traceback
import sys, time
from pprint import pprint
import argparse
def error(err = None):
if not err:
raise Exception('Unknown exception, look traceback')
if type(err) is str and not err.startswith('[ERR]'):
err = '[ERR] ' + err
print err
sys.exit(1)
def check_res(res):
if res['status'] == 0:
error('Encountered error:\n%s' % res['log'])
return res
def print_brief_stats(res):
title_str = ' '*3
tx_str = 'TX:'
rx_str = 'RX:'
for port_id, stat in res.iteritems():
if type(port_id) is not int:
continue
title_str += ' '*10 + 'Port%s' % port_id
tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
print(title_str)
print(tx_str)
print(rx_str)
def wait_with_progress(seconds):
for i in range(0, seconds):
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print('')
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='Example of using stateless TRex via HLT API.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-v', dest = 'verbose', default = 0, help='Stateless API verbosity:\n0: No prints\n1: Commands and their status\n2: Same as 1 + ZMQ in&out')
parser.add_argument('--device', dest = 'device', default = 'localhost', help='Address of TRex server')
args = parser.parse_args()
hlt_client = CTRexHltApi(verbose = int(args.verbose))
print('Connecting to %s...' % args.device)
res = check_res(hlt_client.connect(device = args.device, port_list = [0, 1], username = 'danklei', break_locks = True, reset = True))
port_handle = res['port_handle']
print('Connected, got port handles %s' % port_handle)
ports_streams_dict = CStreamsPerPort()
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
l3_protocol = 'ipv4',
#length_mode = 'imix', l3_length = 200,
ipv6_dst_mode = 'decrement', ipv6_dst_count = 300, ipv6_dst_addr = 'fe80:0:0:0:0:0:0:000f',
port_handle = port_handle, port_handle2 = port_handle[1],
#save_to_yaml = '/tmp/d1.yaml',
#stream_id = 1,
)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'run')
print hlt_client.traffic_control(action = 'poll')
wait_with_progress(2)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'stop')
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_stats(mode = 'aggregate')
print hlt_client.traffic_control(action = 'clear_stats')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
#print res
#print hlt_client._streams_history
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[1])
#ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
res = check_res(hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
port_handle = port_handle[0], port_handle2 = port_handle[1], save_to_yaml = '/tmp/d1.yaml',
l4_protocol = 'udp',
#udp_src_port_mode = 'decrement',
#udp_src_port_count = 10, udp_src_port = 5,
))
ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
#print ports_streams_dict
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_src = '1-2-3:4:5:6', l4_protocol = 'udp', save_to_yaml = '/tmp/d2.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client._streams_history
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_dst = '{ 7 7 7-7:7:7}', save_to_yaml = '/tmp/d3.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle))
res = check_res(hlt_client.traffic_config(mode = 'create', bidirectional = True, length_mode = 'fixed',
port_handle = port_handle[0], port_handle2 = port_handle[1],
transmit_mode = 'single_burst', pkts_per_burst = 100, rate_pps = 100,
mac_src = '1-2-3-4-5-6',
mac_dst = '6:5:4:4:5:6',
save_to_yaml = '/tmp/imix.yaml'))
ports_streams_dict.add_streams_from_res(res)
print('Create single_burst 100 packets rate_pps=100 on port 0')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], transmit_mode = 'single_burst',
pkts_per_burst = 100, rate_pps = 100))
ports_streams_dict.add_streams_from_res(res)
# playground - creating various streams on port 1
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt2.yaml',
tcp_src_port_mode = 'decrement',
tcp_src_port_count = 10, tcp_dst_port_count = 10, tcp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt3.yaml',
l4_protocol = 'udp',
udp_src_port_mode = 'decrement',
udp_src_port_count = 10, udp_dst_port_count = 10, udp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt4.yaml',
length_mode = 'increment',
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt5.yaml',
length_mode = 'decrement', frame_size_min = 100, frame_size_max = 3000,
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
#ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2
))
ports_streams_dict.add_streams_from_res(res)
# remove the playground
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[1]))
print('Create continuous stream for port 1, rate_pps = 1')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt1.yaml',
#length_mode = 'increment', l3_length_min = 200,
ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle))
wait_with_progress(1)
print('Sample after 1 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'all', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Port 0 has finished the burst, put continuous instead with rate 1000. No stopping of other ports.')
check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[0]))
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[0]))
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], rate_pps = 1000))
ports_streams_dict.add_streams_from_res(res)
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle[0]))
wait_with_progress(5)
print('Sample after another 5 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Stop traffic at port 1')
res = check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[1]))
wait_with_progress(5)
print('Sample after another %s seconds (only packets count)' % 5)
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Full HLT stats:')
pprint(res)
check_res(hlt_client.cleanup_session())
except Exception as e:
print(traceback.print_exc())
print(e)
raise
finally:
print('Done.')
| kisel/trex-core | scripts/automation/regression/hltapi_playground.py | Python | apache-2.0 | 10,579 |
#!/usr/bin/env python
"""The main entry point. Invoke as `http' or `python -m httpie'.
"""
import sys
def main():
try:
from .core import main
sys.exit(main())
except KeyboardInterrupt:
from . import ExitStatus
sys.exit(ExitStatus.ERROR_CTRL_C)
if __name__ == '__main__':
main()
| Widiot/simpleblog | venv/lib/python3.5/site-packages/httpie/__main__.py | Python | mit | 327 |
def Call2():
print('Call2')
def Call1(a):
print('Call1')
if __name__ == '__main__':
Call1(Call2())
print('TEST SUCEEDED!')
| roscoeZA/GeoGigSync | pydev/tests_python/_debugger_case56.py | Python | cc0-1.0 | 154 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets workflow approval requests.
Workflow approval requests must be approved or rejected for a workflow to
finish.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
workflow_request_service = client.GetService(
'WorkflowRequestService', version='v201805')
# Create a statement to select workflow requests.
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('type = :type')
.WithBindVariable('type', 'WORKFLOW_APPROVAL_REQUEST'))
# Retrieve a small amount of workflow requests at a time, paging
# through until all workflow requests have been retrieved.
while True:
response = workflow_request_service.getWorkflowRequestsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for workflow_request in response['results']:
# Print out some information for each workflow request.
print('Workflow request with ID "%d", entity type "%s", and entity ID '
'"%d" was found.\n' % (workflow_request['id'],
workflow_request['entityType'],
workflow_request['entityId']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| Aloomaio/googleads-python-lib | examples/ad_manager/v201805/workflow_request_service/get_workflow_approval_requests.py | Python | apache-2.0 | 2,213 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# budgetdatapackage - Load and manage Budget data packages
# Copyright (C) 2013 Tryggvi Björgvinsson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .utils import Granularities, Statuses, Types, Currencies, Countries
import datapackage
import datetime
import warnings
from datapackage import compat
class BudgetResource(datapackage.Resource):
BUDGET_DATA_PACKAGE_STANDARD = "1.0.0-alpha"
SPECIFICATION = {'currency': compat.str,
'dateLastUpdated': compat.str,
'datePublished': compat.str,
'fiscalYear': compat.str,
'granularity': compat.str,
'standard': compat.str,
'status': compat.str,
'type': compat.str,
'location': compat.str,
'url': compat.str,
'path': compat.str,
'data': None,
'name': compat.str,
'format': compat.str,
'mediatype': compat.str,
'encoding': compat.str,
'bytes': int,
'hash': compat.str,
'schema': (dict, datapackage.schema.Schema),
'sources': list,
'licenses': list}
REQUIRED = (('url', 'path', 'data'), 'currency', 'dateLastUpdated',
'datePublished', 'fiscalYear', 'granularity', 'status',
'type')
GRANULARITIES = Granularities()
STATUSES = Statuses()
TYPES = Types()
CURRENCIES = Currencies()
COUNTRIES = Countries()
def __init__(self, *args, **kwargs):
self.standard = kwargs.pop('standard',
self.BUDGET_DATA_PACKAGE_STANDARD)
super(BudgetResource, self).__init__(*args, **kwargs)
@property
def currency(self):
return self['currency']
@currency.setter
def currency(self, value):
if self.CURRENCIES.is_valid(value):
self['currency'] = value
return
raise ValueError("'{0}' is not a valid currency".format(value))
@property
def dateLastUpdated(self):
return datetime.datetime.strptime(
self['dateLastUpdated'], '%Y-%m-%d').date()
@dateLastUpdated.setter
def dateLastUpdated(self, value):
datetime.datetime.strptime(value, '%Y-%m-%d')
self['dateLastUpdated'] = value
@property
def datePublished(self):
return datetime.datetime.strptime(
self['datePublished'], '%Y-%m-%d').date()
@datePublished.setter
def datePublished(self, value):
datetime.datetime.strptime(value, '%Y-%m-%d')
self['datePublished'] = value
@property
def fiscalYear(self):
return self['fiscalYear']
@fiscalYear.setter
def fiscalYear(self, value):
datetime.datetime.strptime(value, '%Y')
self['fiscalYear'] = value
@property
def granularity(self):
return self['granularity']
@granularity.setter
def granularity(self, value):
if self.GRANULARITIES.is_valid(value):
self['granularity'] = value
return
raise ValueError("'{0}' is not a valid granularity".format(value))
@property
def standard(self):
return self['standard']
@standard.setter
def standard(self, value):
if not value:
raise ValueError('standard is required')
if value != self.BUDGET_DATA_PACKAGE_STANDARD:
warnings.warn(
"BudgetDataPackage does not support multiple versions")
self['standard'] = datapackage.util.verify_version(value)
@property
def status(self):
return self['status']
@status.setter
def status(self, value):
if self.STATUSES.is_valid(value):
self['status'] = value
return
raise ValueError("'{0}' is not a valid status".format(value))
@property
def type(self):
return self['type']
@type.setter
def type(self, value):
if self.TYPES.is_valid(value):
self['type'] = value
return
raise ValueError("'{0}' is not a valid type".format(value))
@property
def location(self):
return self['location']
@location.setter
def location(self, value):
if self.COUNTRIES.is_valid(value):
self['location'] = value
return
raise ValueError("'{0}' is not a valid country code".format(value))
| trickvi/budgetdatapackage | budgetdatapackage/resource.py | Python | gpl-3.0 | 5,314 |
# pylint: skip-file
def get_cert_data(path, content):
'''get the data for a particular value'''
if not path and not content:
return None
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path, 'rU').read()
elif content:
rval = content
return rval
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for route
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
tls_termination=dict(default=None, type='str'),
dest_cacert_path=dict(default=None, type='str'),
cacert_path=dict(default=None, type='str'),
cert_path=dict(default=None, type='str'),
key_path=dict(default=None, type='str'),
dest_cacert_content=dict(default=None, type='str'),
cacert_content=dict(default=None, type='str'),
cert_content=dict(default=None, type='str'),
key_content=dict(default=None, type='str'),
service_name=dict(default=None, type='str'),
host=dict(default=None, type='str'),
),
mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
('cacert_path', 'cacert_content'),
('cert_path', 'cert_content'),
('key_path', 'key_content'),
],
supports_check_mode=True,
)
files = {'destcacert': {'path': module.params['dest_cacert_path'],
'content': module.params['dest_cacert_content'],
'value': None,
},
'cacert': {'path': module.params['cacert_path'],
'content': module.params['cacert_content'],
'value': None,
},
'cert': {'path': module.params['cert_path'],
'content': module.params['cert_content'],
'value': None,
},
'key': {'path': module.params['key_path'],
'content': module.params['key_content'],
'value': None,
},
}
if module.params['tls_termination']:
for key, option in files.items():
if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
continue
option['value'] = get_cert_data(option['path'], option['content'])
if not option['value']:
module.fail_json(msg='Verify that you pass a value for %s' % key)
rconfig = RouteConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
files['key']['value'],
module.params['host'],
module.params['tls_termination'],
module.params['service_name'],
)
oc_route = OCRoute(rconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_route.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_route.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_route.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_route.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_route.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_route.needs_update():
api_rval = oc_route.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/ansible/roles/lib_openshift_3.2/build/ansible/oc_route.py | Python | apache-2.0 | 5,836 |
# -*- test-case-name: twisted.conch.test.test_filetransfer -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import errno
import struct
from zope.interface import implementer
from twisted.conch.interfaces import ISFTPServer, ISFTPFile
from twisted.conch.ssh.common import NS, getNS
from twisted.internet import defer, protocol
from twisted.python import failure, log
from twisted.python.compat import (
_PY3, range, itervalues, networkString, nativeString)
class FileTransferBase(protocol.Protocol):
versions = (3, )
packetTypes = {}
def __init__(self):
self.buf = b''
self.otherVersion = None # this gets set
def sendPacket(self, kind, data):
self.transport.write(struct.pack('!LB', len(data)+1, kind) + data)
def dataReceived(self, data):
self.buf += data
while len(self.buf) > 5:
length, kind = struct.unpack('!LB', self.buf[:5])
if len(self.buf) < 4 + length:
return
data, self.buf = self.buf[5:4+length], self.buf[4+length:]
packetType = self.packetTypes.get(kind, None)
if not packetType:
log.msg('no packet type for', kind)
continue
f = getattr(self, 'packet_%s' % packetType, None)
if not f:
log.msg('not implemented: %s' % packetType)
log.msg(repr(data[4:]))
reqId, = struct.unpack('!L', data[:4])
self._sendStatus(reqId, FX_OP_UNSUPPORTED,
"don't understand %s" % packetType)
#XXX not implemented
continue
try:
f(data)
except Exception:
log.err()
continue
def _parseAttributes(self, data):
flags ,= struct.unpack('!L', data[:4])
attrs = {}
data = data[4:]
if flags & FILEXFER_ATTR_SIZE == FILEXFER_ATTR_SIZE:
size ,= struct.unpack('!Q', data[:8])
attrs['size'] = size
data = data[8:]
if flags & FILEXFER_ATTR_OWNERGROUP == FILEXFER_ATTR_OWNERGROUP:
uid, gid = struct.unpack('!2L', data[:8])
attrs['uid'] = uid
attrs['gid'] = gid
data = data[8:]
if flags & FILEXFER_ATTR_PERMISSIONS == FILEXFER_ATTR_PERMISSIONS:
perms ,= struct.unpack('!L', data[:4])
attrs['permissions'] = perms
data = data[4:]
if flags & FILEXFER_ATTR_ACMODTIME == FILEXFER_ATTR_ACMODTIME:
atime, mtime = struct.unpack('!2L', data[:8])
attrs['atime'] = atime
attrs['mtime'] = mtime
data = data[8:]
if flags & FILEXFER_ATTR_EXTENDED == FILEXFER_ATTR_EXTENDED:
extended_count ,= struct.unpack('!L', data[:4])
data = data[4:]
for i in range(extended_count):
extended_type, data = getNS(data)
extended_data, data = getNS(data)
attrs['ext_%s' % nativeString(extended_type)] = extended_data
return attrs, data
def _packAttributes(self, attrs):
flags = 0
data = b''
if 'size' in attrs:
data += struct.pack('!Q', attrs['size'])
flags |= FILEXFER_ATTR_SIZE
if 'uid' in attrs and 'gid' in attrs:
data += struct.pack('!2L', attrs['uid'], attrs['gid'])
flags |= FILEXFER_ATTR_OWNERGROUP
if 'permissions' in attrs:
data += struct.pack('!L', attrs['permissions'])
flags |= FILEXFER_ATTR_PERMISSIONS
if 'atime' in attrs and 'mtime' in attrs:
data += struct.pack('!2L', attrs['atime'], attrs['mtime'])
flags |= FILEXFER_ATTR_ACMODTIME
extended = []
for k in attrs:
if k.startswith('ext_'):
ext_type = NS(networkString(k[4:]))
ext_data = NS(attrs[k])
extended.append(ext_type+ext_data)
if extended:
data += struct.pack('!L', len(extended))
data += b''.join(extended)
flags |= FILEXFER_ATTR_EXTENDED
return struct.pack('!L', flags) + data
class FileTransferServer(FileTransferBase):
def __init__(self, data=None, avatar=None):
FileTransferBase.__init__(self)
self.client = ISFTPServer(avatar) # yay interfaces
self.openFiles = {}
self.openDirs = {}
def packet_INIT(self, data):
version ,= struct.unpack('!L', data[:4])
self.version = min(list(self.versions) + [version])
data = data[4:]
ext = {}
while data:
ext_name, data = getNS(data)
ext_data, data = getNS(data)
ext[ext_name] = ext_data
our_ext = self.client.gotVersion(version, ext)
our_ext_data = b""
for (k,v) in our_ext.items():
our_ext_data += NS(k) + NS(v)
self.sendPacket(FXP_VERSION, struct.pack('!L', self.version) + \
our_ext_data)
def packet_OPEN(self, data):
requestId = data[:4]
data = data[4:]
filename, data = getNS(data)
flags ,= struct.unpack('!L', data[:4])
data = data[4:]
attrs, data = self._parseAttributes(data)
assert data == b'', 'still have data in OPEN: %s' % repr(data)
d = defer.maybeDeferred(self.client.openFile, filename, flags, attrs)
d.addCallback(self._cbOpenFile, requestId)
d.addErrback(self._ebStatus, requestId, b"open failed")
def _cbOpenFile(self, fileObj, requestId):
fileId = networkString(str(hash(fileObj)))
if fileId in self.openFiles:
raise KeyError('id already open')
self.openFiles[fileId] = fileObj
self.sendPacket(FXP_HANDLE, requestId + NS(fileId))
def packet_CLOSE(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == b'', 'still have data in CLOSE: %s' % repr(data)
if handle in self.openFiles:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.close)
d.addCallback(self._cbClose, handle, requestId)
d.addErrback(self._ebStatus, requestId, b"close failed")
elif handle in self.openDirs:
dirObj = self.openDirs[handle][0]
d = defer.maybeDeferred(dirObj.close)
d.addCallback(self._cbClose, handle, requestId, 1)
d.addErrback(self._ebStatus, requestId, b"close failed")
else:
self._ebClose(failure.Failure(KeyError()), requestId)
def _cbClose(self, result, handle, requestId, isDir = 0):
if isDir:
del self.openDirs[handle]
else:
del self.openFiles[handle]
self._sendStatus(requestId, FX_OK, b'file closed')
def packet_READ(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
(offset, length), data = struct.unpack('!QL', data[:12]), data[12:]
assert data == b'', 'still have data in READ: %s' % repr(data)
if handle not in self.openFiles:
self._ebRead(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.readChunk, offset, length)
d.addCallback(self._cbRead, requestId)
d.addErrback(self._ebStatus, requestId, b"read failed")
def _cbRead(self, result, requestId):
if result == b'': # python's read will return this for EOF
raise EOFError()
self.sendPacket(FXP_DATA, requestId + NS(result))
def packet_WRITE(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
offset, = struct.unpack('!Q', data[:8])
data = data[8:]
writeData, data = getNS(data)
assert data == b'', 'still have data in WRITE: %s' % repr(data)
if handle not in self.openFiles:
self._ebWrite(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.writeChunk, offset, writeData)
d.addCallback(self._cbStatus, requestId, b"write succeeded")
d.addErrback(self._ebStatus, requestId, b"write failed")
def packet_REMOVE(self, data):
requestId = data[:4]
data = data[4:]
filename, data = getNS(data)
assert data == b'', 'still have data in REMOVE: %s' % repr(data)
d = defer.maybeDeferred(self.client.removeFile, filename)
d.addCallback(self._cbStatus, requestId, b"remove succeeded")
d.addErrback(self._ebStatus, requestId, b"remove failed")
def packet_RENAME(self, data):
requestId = data[:4]
data = data[4:]
oldPath, data = getNS(data)
newPath, data = getNS(data)
assert data == b'', 'still have data in RENAME: %s' % repr(data)
d = defer.maybeDeferred(self.client.renameFile, oldPath, newPath)
d.addCallback(self._cbStatus, requestId, b"rename succeeded")
d.addErrback(self._ebStatus, requestId, b"rename failed")
def packet_MKDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
attrs, data = self._parseAttributes(data)
assert data == b'', 'still have data in MKDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.makeDirectory, path, attrs)
d.addCallback(self._cbStatus, requestId, b"mkdir succeeded")
d.addErrback(self._ebStatus, requestId, b"mkdir failed")
def packet_RMDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == b'', 'still have data in RMDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.removeDirectory, path)
d.addCallback(self._cbStatus, requestId, b"rmdir succeeded")
d.addErrback(self._ebStatus, requestId, b"rmdir failed")
def packet_OPENDIR(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == b'', 'still have data in OPENDIR: %s' % repr(data)
d = defer.maybeDeferred(self.client.openDirectory, path)
d.addCallback(self._cbOpenDirectory, requestId)
d.addErrback(self._ebStatus, requestId, b"opendir failed")
def _cbOpenDirectory(self, dirObj, requestId):
handle = networkString(str(hash(dirObj)))
if handle in self.openDirs:
raise KeyError("already opened this directory")
self.openDirs[handle] = [dirObj, iter(dirObj)]
self.sendPacket(FXP_HANDLE, requestId + NS(handle))
def packet_READDIR(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == b'', 'still have data in READDIR: %s' % repr(data)
if handle not in self.openDirs:
self._ebStatus(failure.Failure(KeyError()), requestId)
else:
dirObj, dirIter = self.openDirs[handle]
d = defer.maybeDeferred(self._scanDirectory, dirIter, [])
d.addCallback(self._cbSendDirectory, requestId)
d.addErrback(self._ebStatus, requestId, b"scan directory failed")
def _scanDirectory(self, dirIter, f):
while len(f) < 250:
try:
info = dirIter.next()
except StopIteration:
if not f:
raise EOFError
return f
if isinstance(info, defer.Deferred):
info.addCallback(self._cbScanDirectory, dirIter, f)
return
else:
f.append(info)
return f
def _cbScanDirectory(self, result, dirIter, f):
f.append(result)
return self._scanDirectory(dirIter, f)
def _cbSendDirectory(self, result, requestId):
data = b''
for (filename, longname, attrs) in result:
data += NS(filename)
data += NS(longname)
data += self._packAttributes(attrs)
self.sendPacket(FXP_NAME, requestId +
struct.pack('!L', len(result))+data)
def packet_STAT(self, data, followLinks = 1):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == b'', 'still have data in STAT/LSTAT: %s' % repr(data)
d = defer.maybeDeferred(self.client.getAttrs, path, followLinks)
d.addCallback(self._cbStat, requestId)
d.addErrback(self._ebStatus, requestId, b'stat/lstat failed')
def packet_LSTAT(self, data):
self.packet_STAT(data, 0)
def packet_FSTAT(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
assert data == b'', 'still have data in FSTAT: %s' % repr(data)
if handle not in self.openFiles:
self._ebStatus(failure.Failure(KeyError('%s not in self.openFiles'
% handle)), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.getAttrs)
d.addCallback(self._cbStat, requestId)
d.addErrback(self._ebStatus, requestId, b'fstat failed')
def _cbStat(self, result, requestId):
data = requestId + self._packAttributes(result)
self.sendPacket(FXP_ATTRS, data)
def packet_SETSTAT(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
attrs, data = self._parseAttributes(data)
if data != b'':
log.msg('WARN: still have data in SETSTAT: %s' % repr(data))
d = defer.maybeDeferred(self.client.setAttrs, path, attrs)
d.addCallback(self._cbStatus, requestId, b'setstat succeeded')
d.addErrback(self._ebStatus, requestId, b'setstat failed')
def packet_FSETSTAT(self, data):
requestId = data[:4]
data = data[4:]
handle, data = getNS(data)
attrs, data = self._parseAttributes(data)
assert data == b'', 'still have data in FSETSTAT: %s' % repr(data)
if handle not in self.openFiles:
self._ebStatus(failure.Failure(KeyError()), requestId)
else:
fileObj = self.openFiles[handle]
d = defer.maybeDeferred(fileObj.setAttrs, attrs)
d.addCallback(self._cbStatus, requestId, b'fsetstat succeeded')
d.addErrback(self._ebStatus, requestId, b'fsetstat failed')
def packet_READLINK(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == b'', 'still have data in READLINK: %s' % repr(data)
d = defer.maybeDeferred(self.client.readLink, path)
d.addCallback(self._cbReadLink, requestId)
d.addErrback(self._ebStatus, requestId, b'readlink failed')
def _cbReadLink(self, result, requestId):
self._cbSendDirectory([(result, b'', {})], requestId)
def packet_SYMLINK(self, data):
requestId = data[:4]
data = data[4:]
linkPath, data = getNS(data)
targetPath, data = getNS(data)
d = defer.maybeDeferred(self.client.makeLink, linkPath, targetPath)
d.addCallback(self._cbStatus, requestId, b'symlink succeeded')
d.addErrback(self._ebStatus, requestId, b'symlink failed')
def packet_REALPATH(self, data):
requestId = data[:4]
data = data[4:]
path, data = getNS(data)
assert data == b'', 'still have data in REALPATH: %s' % repr(data)
d = defer.maybeDeferred(self.client.realPath, path)
d.addCallback(self._cbReadLink, requestId) # same return format
d.addErrback(self._ebStatus, requestId, b'realpath failed')
def packet_EXTENDED(self, data):
requestId = data[:4]
data = data[4:]
extName, extData = getNS(data)
d = defer.maybeDeferred(self.client.extendedRequest, extName, extData)
d.addCallback(self._cbExtended, requestId)
d.addErrback(self._ebStatus, requestId, networkString(
'extended %s failed' % extName))
def _cbExtended(self, data, requestId):
self.sendPacket(FXP_EXTENDED_REPLY, requestId + data)
def _cbStatus(self, result, requestId, msg = b"request succeeded"):
self._sendStatus(requestId, FX_OK, msg)
def _ebStatus(self, reason, requestId, msg = b"request failed"):
code = FX_FAILURE
message = msg
if isinstance(reason.value, (IOError, OSError)):
if reason.value.errno == errno.ENOENT: # no such file
code = FX_NO_SUCH_FILE
message = networkString(reason.value.strerror)
elif reason.value.errno == errno.EACCES: # permission denied
code = FX_PERMISSION_DENIED
message = networkString(reason.value.strerror)
elif reason.value.errno == errno.EEXIST:
code = FX_FILE_ALREADY_EXISTS
else:
log.err(reason)
elif isinstance(reason.value, EOFError): # EOF
code = FX_EOF
if reason.value.args:
message = networkString(reason.value.args[0])
elif isinstance(reason.value, NotImplementedError):
code = FX_OP_UNSUPPORTED
if reason.value.args:
message = networkString(reason.value.args[0])
elif isinstance(reason.value, SFTPError):
code = reason.value.code
message = networkString(reason.value.message)
else:
log.err(reason)
self._sendStatus(requestId, code, message)
def _sendStatus(self, requestId, code, message, lang = b''):
"""
Helper method to send a FXP_STATUS message.
"""
data = requestId + struct.pack('!L', code)
data += NS(message)
data += NS(lang)
self.sendPacket(FXP_STATUS, data)
def connectionLost(self, reason):
"""
Clean all opened files and directories.
"""
for fileObj in self.openFiles.values():
fileObj.close()
self.openFiles = {}
for (dirObj, dirIter) in self.openDirs.values():
dirObj.close()
self.openDirs = {}
class FileTransferClient(FileTransferBase):
def __init__(self, extData = {}):
"""
@param extData: a dict of extended_name : extended_data items
to be sent to the server.
"""
FileTransferBase.__init__(self)
self.extData = {}
self.counter = 0
self.openRequests = {} # id -> Deferred
self.wasAFile = {} # Deferred -> 1 TERRIBLE HACK
def connectionMade(self):
data = struct.pack('!L', max(self.versions))
for k,v in itervalues(self.extData):
data += NS(k) + NS(v)
self.sendPacket(FXP_INIT, data)
def _sendRequest(self, msg, data):
data = struct.pack('!L', self.counter) + data
d = defer.Deferred()
self.openRequests[self.counter] = d
self.counter += 1
self.sendPacket(msg, data)
return d
def _parseRequest(self, data):
(id,) = struct.unpack('!L', data[:4])
d = self.openRequests[id]
del self.openRequests[id]
return d, data[4:]
def openFile(self, filename, flags, attrs):
"""
Open a file.
This method returns a L{Deferred} that is called back with an object
that provides the L{ISFTPFile} interface.
@type filename: L{bytes}
@param filename: a string representing the file to open.
@param flags: an integer of the flags to open the file with, ORed together.
The flags and their values are listed at the bottom of this file.
@param attrs: a list of attributes to open the file with. It is a
dictionary, consisting of 0 or more keys. The possible keys are::
size: the size of the file in bytes
uid: the user ID of the file as an integer
gid: the group ID of the file as an integer
permissions: the permissions of the file with as an integer.
the bit representation of this field is defined by POSIX.
atime: the access time of the file as seconds since the epoch.
mtime: the modification time of the file as seconds since the epoch.
ext_*: extended attributes. The server is not required to
understand this, but it may.
NOTE: there is no way to indicate text or binary files. it is up
to the SFTP client to deal with this.
"""
data = NS(filename) + struct.pack('!L', flags) + self._packAttributes(attrs)
d = self._sendRequest(FXP_OPEN, data)
self.wasAFile[d] = (1, filename) # HACK
return d
def removeFile(self, filename):
"""
Remove the given file.
This method returns a Deferred that is called back when it succeeds.
@type filename: L{bytes}
@param filename: the name of the file as a string.
"""
return self._sendRequest(FXP_REMOVE, NS(filename))
def renameFile(self, oldpath, newpath):
"""
Rename the given file.
This method returns a Deferred that is called back when it succeeds.
@type oldpath: L{bytes}
@param oldpath: the current location of the file.
@type newpath: L{bytes}
@param newpath: the new file name.
"""
return self._sendRequest(FXP_RENAME, NS(oldpath)+NS(newpath))
def makeDirectory(self, path, attrs):
"""
Make a directory.
This method returns a Deferred that is called back when it is
created.
@type path: L{bytes}
@param path: the name of the directory to create as a string.
@param attrs: a dictionary of attributes to create the directory
with. Its meaning is the same as the attrs in the openFile method.
"""
return self._sendRequest(FXP_MKDIR, NS(path)+self._packAttributes(attrs))
def removeDirectory(self, path):
"""
Remove a directory (non-recursively)
It is an error to remove a directory that has files or directories in
it.
This method returns a Deferred that is called back when it is removed.
@type path: L{bytes}
@param path: the directory to remove.
"""
return self._sendRequest(FXP_RMDIR, NS(path))
def openDirectory(self, path):
"""
Open a directory for scanning.
This method returns a Deferred that is called back with an iterable
object that has a close() method.
The close() method is called when the client is finished reading
from the directory. At this point, the iterable will no longer
be used.
The iterable returns triples of the form (filename, longname, attrs)
or a Deferred that returns the same. The sequence must support
__getitem__, but otherwise may be any 'sequence-like' object.
filename is the name of the file relative to the directory.
logname is an expanded format of the filename. The recommended format
is:
-rwxr-xr-x 1 mjos staff 348911 Mar 25 14:29 t-filexfer
1234567890 123 12345678 12345678 12345678 123456789012
The first line is sample output, the second is the length of the field.
The fields are: permissions, link count, user owner, group owner,
size in bytes, modification time.
attrs is a dictionary in the format of the attrs argument to openFile.
@type path: L{bytes}
@param path: the directory to open.
"""
d = self._sendRequest(FXP_OPENDIR, NS(path))
self.wasAFile[d] = (0, path)
return d
def getAttrs(self, path, followLinks=0):
"""
Return the attributes for the given path.
This method returns a dictionary in the same format as the attrs
argument to openFile or a Deferred that is called back with same.
@type path: L{bytes}
@param path: the path to return attributes for as a string.
@param followLinks: a boolean. if it is True, follow symbolic links
and return attributes for the real path at the base. if it is False,
return attributes for the specified path.
"""
if followLinks: m = FXP_STAT
else: m = FXP_LSTAT
return self._sendRequest(m, NS(path))
def setAttrs(self, path, attrs):
"""
Set the attributes for the path.
This method returns when the attributes are set or a Deferred that is
called back when they are.
@type path: L{bytes}
@param path: the path to set attributes for as a string.
@param attrs: a dictionary in the same format as the attrs argument to
openFile.
"""
data = NS(path) + self._packAttributes(attrs)
return self._sendRequest(FXP_SETSTAT, data)
def readLink(self, path):
"""
Find the root of a set of symbolic links.
This method returns the target of the link, or a Deferred that
returns the same.
@type path: L{bytes}
@param path: the path of the symlink to read.
"""
d = self._sendRequest(FXP_READLINK, NS(path))
return d.addCallback(self._cbRealPath)
def makeLink(self, linkPath, targetPath):
"""
Create a symbolic link.
This method returns when the link is made, or a Deferred that
returns the same.
@type linkPath: L{bytes}
@param linkPath: the pathname of the symlink as a string
@type targetPath: L{bytes}
@param targetPath: the path of the target of the link as a string.
"""
return self._sendRequest(FXP_SYMLINK, NS(linkPath)+NS(targetPath))
def realPath(self, path):
"""
Convert any path to an absolute path.
This method returns the absolute path as a string, or a Deferred
that returns the same.
@type path: L{bytes}
@param path: the path to convert as a string.
"""
d = self._sendRequest(FXP_REALPATH, NS(path))
return d.addCallback(self._cbRealPath)
def _cbRealPath(self, result):
name, longname, attrs = result[0]
if _PY3:
name = name.decode("utf-8")
return name
def extendedRequest(self, request, data):
"""
Make an extended request of the server.
The method returns a Deferred that is called back with
the result of the extended request.
@type request: L{bytes}
@param request: the name of the extended request to make.
@type data: L{bytes}
@param data: any other data that goes along with the request.
"""
return self._sendRequest(FXP_EXTENDED, NS(request) + data)
def packet_VERSION(self, data):
version, = struct.unpack('!L', data[:4])
data = data[4:]
d = {}
while data:
k, data = getNS(data)
v, data = getNS(data)
d[k]=v
self.version = version
self.gotServerVersion(version, d)
def packet_STATUS(self, data):
d, data = self._parseRequest(data)
code, = struct.unpack('!L', data[:4])
data = data[4:]
if len(data) >= 4:
msg, data = getNS(data)
if len(data) >= 4:
lang, data = getNS(data)
else:
lang = b''
else:
msg = b''
lang = b''
if code == FX_OK:
d.callback((msg, lang))
elif code == FX_EOF:
d.errback(EOFError(msg))
elif code == FX_OP_UNSUPPORTED:
d.errback(NotImplementedError(msg))
else:
d.errback(SFTPError(code, nativeString(msg), lang))
def packet_HANDLE(self, data):
d, data = self._parseRequest(data)
isFile, name = self.wasAFile.pop(d)
if isFile:
cb = ClientFile(self, getNS(data)[0])
else:
cb = ClientDirectory(self, getNS(data)[0])
cb.name = name
d.callback(cb)
def packet_DATA(self, data):
d, data = self._parseRequest(data)
d.callback(getNS(data)[0])
def packet_NAME(self, data):
d, data = self._parseRequest(data)
count, = struct.unpack('!L', data[:4])
data = data[4:]
files = []
for i in range(count):
filename, data = getNS(data)
longname, data = getNS(data)
attrs, data = self._parseAttributes(data)
files.append((filename, longname, attrs))
d.callback(files)
def packet_ATTRS(self, data):
d, data = self._parseRequest(data)
d.callback(self._parseAttributes(data)[0])
def packet_EXTENDED_REPLY(self, data):
d, data = self._parseRequest(data)
d.callback(data)
def gotServerVersion(self, serverVersion, extData):
"""
Called when the client sends their version info.
@param otherVersion: an integer representing the version of the SFTP
protocol they are claiming.
@param extData: a dictionary of extended_name : extended_data items.
These items are sent by the client to indicate additional features.
"""
@implementer(ISFTPFile)
class ClientFile:
def __init__(self, parent, handle):
self.parent = parent
self.handle = NS(handle)
def close(self):
return self.parent._sendRequest(FXP_CLOSE, self.handle)
def readChunk(self, offset, length):
data = self.handle + struct.pack("!QL", offset, length)
return self.parent._sendRequest(FXP_READ, data)
def writeChunk(self, offset, chunk):
data = self.handle + struct.pack("!Q", offset) + NS(chunk)
return self.parent._sendRequest(FXP_WRITE, data)
def getAttrs(self):
return self.parent._sendRequest(FXP_FSTAT, self.handle)
def setAttrs(self, attrs):
data = self.handle + self.parent._packAttributes(attrs)
return self.parent._sendRequest(FXP_FSTAT, data)
class ClientDirectory:
def __init__(self, parent, handle):
self.parent = parent
self.handle = NS(handle)
self.filesCache = []
def read(self):
d = self.parent._sendRequest(FXP_READDIR, self.handle)
return d
def close(self):
return self.parent._sendRequest(FXP_CLOSE, self.handle)
def __iter__(self):
return self
def next(self):
if self.filesCache:
return self.filesCache.pop(0)
d = self.read()
d.addCallback(self._cbReadDir)
d.addErrback(self._ebReadDir)
return d
def _cbReadDir(self, names):
self.filesCache = names[1:]
return names[0]
def _ebReadDir(self, reason):
reason.trap(EOFError)
def _():
raise StopIteration
self.next = _
return reason
class SFTPError(Exception):
def __init__(self, errorCode, errorMessage, lang = ''):
Exception.__init__(self)
self.code = errorCode
self._message = errorMessage
self.lang = lang
def message(self):
"""
A string received over the network that explains the error to a human.
"""
# Python 2.6 deprecates assigning to the 'message' attribute of an
# exception. We define this read-only property here in order to
# prevent the warning about deprecation while maintaining backwards
# compatibility with object clients that rely on the 'message'
# attribute being set correctly. See bug #3897.
return self._message
message = property(message)
def __str__(self):
return 'SFTPError %s: %s' % (self.code, self.message)
FXP_INIT = 1
FXP_VERSION = 2
FXP_OPEN = 3
FXP_CLOSE = 4
FXP_READ = 5
FXP_WRITE = 6
FXP_LSTAT = 7
FXP_FSTAT = 8
FXP_SETSTAT = 9
FXP_FSETSTAT = 10
FXP_OPENDIR = 11
FXP_READDIR = 12
FXP_REMOVE = 13
FXP_MKDIR = 14
FXP_RMDIR = 15
FXP_REALPATH = 16
FXP_STAT = 17
FXP_RENAME = 18
FXP_READLINK = 19
FXP_SYMLINK = 20
FXP_STATUS = 101
FXP_HANDLE = 102
FXP_DATA = 103
FXP_NAME = 104
FXP_ATTRS = 105
FXP_EXTENDED = 200
FXP_EXTENDED_REPLY = 201
FILEXFER_ATTR_SIZE = 0x00000001
FILEXFER_ATTR_UIDGID = 0x00000002
FILEXFER_ATTR_OWNERGROUP = FILEXFER_ATTR_UIDGID
FILEXFER_ATTR_PERMISSIONS = 0x00000004
FILEXFER_ATTR_ACMODTIME = 0x00000008
FILEXFER_ATTR_EXTENDED = 0x80000000
FILEXFER_TYPE_REGULAR = 1
FILEXFER_TYPE_DIRECTORY = 2
FILEXFER_TYPE_SYMLINK = 3
FILEXFER_TYPE_SPECIAL = 4
FILEXFER_TYPE_UNKNOWN = 5
FXF_READ = 0x00000001
FXF_WRITE = 0x00000002
FXF_APPEND = 0x00000004
FXF_CREAT = 0x00000008
FXF_TRUNC = 0x00000010
FXF_EXCL = 0x00000020
FXF_TEXT = 0x00000040
FX_OK = 0
FX_EOF = 1
FX_NO_SUCH_FILE = 2
FX_PERMISSION_DENIED = 3
FX_FAILURE = 4
FX_BAD_MESSAGE = 5
FX_NO_CONNECTION = 6
FX_CONNECTION_LOST = 7
FX_OP_UNSUPPORTED = 8
FX_FILE_ALREADY_EXISTS = 11
# http://tools.ietf.org/wg/secsh/draft-ietf-secsh-filexfer/ defines more
# useful error codes, but so far OpenSSH doesn't implement them. We use them
# internally for clarity, but for now define them all as FX_FAILURE to be
# compatible with existing software.
FX_NOT_A_DIRECTORY = FX_FAILURE
FX_FILE_IS_A_DIRECTORY = FX_FAILURE
# initialize FileTransferBase.packetTypes:
g = globals()
for name in list(g.keys()):
if name.startswith('FXP_'):
value = g[name]
FileTransferBase.packetTypes[value] = name[4:]
del g, name, value
| whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/conch/ssh/filetransfer.py | Python | mit | 34,339 |
""" addons.xml generator """
import os
import md5
class Generator:
"""
Generates a new addons.xml file from each addons addon.xml file
and a new addons.xml.md5 hash file. Must be run from the root of
the checked-out repo. Only handles single depth folder structure.
"""
def __init__( self ):
# generate files
self._generate_addons_file()
self._generate_md5_file()
# notify user
print "Finished updating addons xml and md5 files"
def _generate_addons_file( self ):
# addon list
addons = sorted(os.listdir( "." ))
# final addons text
addons_xml = u"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n"
# loop thru and add each addons addon.xml file
for addon in addons:
try:
# skip any file or .svn folder
if ( not os.path.isdir( addon ) or addon.startswith('.') ): continue
# create path
_path = os.path.join( addon, "addon.xml" )
# split lines for stripping
xml_lines = open( _path, "r" ).read().splitlines()
# new addon
addon_xml = ""
# loop thru cleaning each line
for line in xml_lines:
# skip encoding format line
if ( line.find( "<?xml" ) >= 0 ): continue
# add line
addon_xml += unicode( line.rstrip() + "\n", "UTF-8" )
# we succeeded so add to our final addons.xml text
addons_xml += addon_xml.rstrip() + "\n\n"
except Exception, e:
# missing or poorly formatted addon.xml
print "Excluding %s for %s" % ( _path, e, )
# clean and add closing tag
addons_xml = addons_xml.strip() + u"\n</addons>\n"
# save file
self._save_file( addons_xml.encode( "UTF-8" ), file="addons.xml" )
def _generate_md5_file( self ):
try:
# create a new md5 hash
m = md5.new( open( "addons.xml" ).read() ).hexdigest()
# save file
self._save_file( m, file="addons.xml.md5" )
except Exception, e:
# oops
print "An error occurred creating addons.xml.md5 file!\n%s" % ( e, )
def _save_file( self, data, file ):
try:
# write data to the file
open( file, "w" ).write( data )
except Exception, e:
# oops
print "An error occurred saving %s file!\n%s" % ( file, e, )
if ( __name__ == "__main__" ):
# start
Generator()
| overflow-msgs/smartconnect | tools/addons_xml_generator.py | Python | gpl-2.0 | 2,748 |
"""
@file poi.py
@author Jakob Erdmann
@author Laura Bieker
@date 2014-02-13
@version $Id: poi.py 19649 2015-12-17 21:05:20Z behrisch $
This module includes functions for converting SUMO's fcd-output into
pois (useful for showing synthetic GPS disturtbances)
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2014 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from collections import defaultdict
from sumolib.shapes.poi import PoI
from sumolib.color import RGBAColor
from sumolib.miscutils import Colorgen
LAYER = 100 # show above everything else
def fcd2poi(inpFCD, outSTRM, ignored):
colors = defaultdict(
lambda: RGBAColor(*Colorgen(("random", 1, 1)).byteTuple()))
outSTRM.write("<pois>\n")
for timestep in inpFCD:
for v in timestep.vehicle:
outSTRM.write(" %s\n" % PoI("%s_%s" % (
v.id, timestep.time), v.id, LAYER, colors[v.id], v.x, v.y, lonLat=True).toXML())
outSTRM.write("</pois>\n")
| 702nADOS/sumo | tools/sumolib/output/convert/poi.py | Python | gpl-3.0 | 1,293 |
#!/usr/bin/env python3
from __future__ import print_function
import sys, os, re, subprocess as sp
from setuptools import setup
if not sys.version_info[0] == 3:
sys.exit("Python 2.x is not supported; Python 3.x is required.")
########################################
# Based on this recipe, adapted for Python 3, Git 2.8.x, and PEP-440 version identifiers
# http://blogs.nopcode.org/brainstorm/2013/05/20/pragmatic-python-versioning-via-setuptools-and-git-tags/
# https://www.python.org/dev/peps/pep-0440/#version-scheme
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
version_py = os.path.join(os.path.dirname(__file__), 'smxlogin', 'version.py')
try:
version_git = sp.check_output(["git", "describe", "--tags", "--dirty=_dirty"]).strip().decode('ascii')
final, dev, blob, dirty = re.match(r'v?((?:\d+\.)*\d+)(?:-(\d+)-(g[a-z0-9]+))?(_dirty)?', version_git).groups()
version_pep = final+('.dev%s+%s'%(dev,blob) if dev else '')+(dirty if dirty else '')
except:
d = {}
with open(version_py, 'r') as fh:
exec(fh.read(), d)
version_pep = d['__version__']
else:
with open(version_py, 'w') as fh:
print("# Do not edit this file, versioning is governed by git tags", file=fh)
print('__version__="%s"' % version_pep, file=fh)
########################################
setup(name="smxlogin",
version=version_pep,
description=("Automatically enter SecureMatrix password image pattern for a Juniper VPNs"),
long_description=open('README.md').read(),
author="Daniel Lenski",
author_email="dlenski@gmail.com",
install_requires=[ 'robobrowser>=0.5.0', 'werkzeug<0.15' ],
license='GPL v3 or later',
url="https://github.com/dlenski/smxlogin",
packages=["smxlogin"],
entry_points={ 'console_scripts': [ 'smxlogin=smxlogin.__main__:main' ] }
)
| dlenski/smxlogin | setup.py | Python | gpl-3.0 | 1,944 |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "PyTimer",
version = "0.0.1",
author = "Gertjan van den Burg",
author_email = "gertjanvandenburg@gmail.com",
description = ("A command line time tracking application"),
license = "GPL v2",
long_description = read("README.md"),
install_requires = [
'termcolor',
'readchar',
'dateutil'
],
py_modules = ['pytimer'],
)
| GjjvdBurg/PyTimer | setup.py | Python | gpl-2.0 | 583 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
import re
from jx_elasticsearch import elasticsearch
from jx_python import jx
from mo_dots import Null, coalesce, wrap
from mo_future import items, sort_using_key
from mo_json import CAN_NOT_DECODE_JSON, json2value, value2json
from mo_kwargs import override
from mo_logs import Log
from mo_logs.exceptions import Except
from mo_math.randoms import Random
from mo_threads import Lock, Thread
from mo_times.dates import Date, unicode2Date, unix2Date
from mo_times.durations import Duration
from mo_times.timer import Timer
from pyLibrary.aws.s3 import KEY_IS_WRONG_FORMAT, strip_extension
MAX_RECORD_LENGTH = 400000
DATA_TOO_OLD = "data is too old to be indexed"
DEBUG = False
class RolloverIndex(object):
"""
MIMIC THE elasticsearch.Index, WITH EXTRA keys() FUNCTION
AND THREADED QUEUE AND SPLIT DATA BY
"""
@override
def __init__(
self,
rollover_field, # the FIELD with a timestamp to use for determining which index to push to
rollover_interval, # duration between roll-over to new index
rollover_max, # remove old indexes, do not add old records
schema, # es schema
queue_size=10000, # number of documents to queue in memory
batch_size=5000, # number of documents to push at once
typed=None, # indicate if we are expected typed json
kwargs=None # plus additional ES settings
):
if kwargs.tjson != None:
Log.error("not expected")
if typed == None:
Log.error("not expected")
schema.settings.index.max_result_window = 100000 # REQUIRED FOR ACTIVEDATA NESTED QUERIES
schema.settings.index.max_inner_result_window = 100000 # REQUIRED FOR ACTIVEDATA NESTED QUERIES
self.settings = kwargs
self.locker = Lock("lock for rollover_index")
self.rollover_field = jx.get(rollover_field)
self.rollover_interval = self.settings.rollover_interval = Duration(rollover_interval)
self.rollover_max = self.settings.rollover_max = Duration(rollover_max)
self.known_queues = {} # MAP DATE TO INDEX
self.cluster = elasticsearch.Cluster(self.settings)
def __getattr__(self, item):
return getattr(self.cluster, item)
# Log.error("Not supported")
def _get_queue(self, row):
row = wrap(row)
if row.json:
row.value, row.json = json2value(row.json), None
timestamp = Date(self.rollover_field(row.value))
if timestamp == None:
return Null
elif timestamp < Date.today() - self.rollover_max:
return DATA_TOO_OLD
rounded_timestamp = timestamp.floor(self.rollover_interval)
with self.locker:
queue = self.known_queues.get(rounded_timestamp.unix)
if queue == None:
candidates = sort_using_key(
filter(
lambda r: re.match(
re.escape(self.settings.index) + r"\d\d\d\d\d\d\d\d_\d\d\d\d\d\d$",
r['index']
),
self.cluster.get_aliases()
),
key=lambda r: r['index']
)
best = None
for c in candidates:
c = wrap(c)
c.date = unicode2Date(c.index[-15:], elasticsearch.INDEX_DATE_FORMAT)
if timestamp > c.date:
best = c
if not best or rounded_timestamp > best.date:
if rounded_timestamp < wrap(candidates[-1]).date:
es = self.cluster.get_or_create_index(read_only=False, alias=best.alias, index=best.index, kwargs=self.settings)
else:
try:
es = self.cluster.create_index(create_timestamp=rounded_timestamp, kwargs=self.settings)
es.add_alias(self.settings.index)
except Exception as e:
e = Except.wrap(e)
if "IndexAlreadyExistsException" not in e:
Log.error("Problem creating index", cause=e)
return self._get_queue(row) # TRY AGAIN
else:
es = self.cluster.get_or_create_index(read_only=False, alias=best.alias, index=best.index, kwargs=self.settings)
def refresh(please_stop):
try:
es.set_refresh_interval(seconds=60 * 10, timeout=5)
except Exception:
Log.note("Could not set refresh interval for {{index}}", index=es.settings.index)
Thread.run("refresh", refresh)
self._delete_old_indexes(candidates)
threaded_queue = es.threaded_queue(max_size=self.settings.queue_size, batch_size=self.settings.batch_size, silent=True)
with self.locker:
queue = self.known_queues[rounded_timestamp.unix] = threaded_queue
return queue
def _delete_old_indexes(self, candidates):
for c in candidates:
timestamp = unicode2Date(c.index[-15:], "%Y%m%d_%H%M%S")
if timestamp + self.rollover_interval < Date.today() - self.rollover_max:
# Log.warning("Will delete {{index}}", index=c.index)
try:
self.cluster.delete_index(c.index)
except Exception as e:
Log.warning("could not delete index {{index}}", index=c.index, cause=e)
for t, q in items(self.known_queues):
if unix2Date(t) + self.rollover_interval < Date.today() - self.rollover_max:
with self.locker:
del self.known_queues[t]
pass
# ADD keys() SO ETL LOOP CAN FIND WHAT'S GETTING REPLACED
def keys(self, prefix=None):
from activedata_etl import etl2path, key2etl
path = jx.reverse(etl2path(key2etl(prefix)))
if self.cluster.version.startswith(("5.", "6.")):
stored_fields = "stored_fields"
else:
stored_fields = "fields"
result = self.es.search({
stored_fields: ["_id"],
"query": {
"bool": {
"query": {"match_all": {}},
"filter": {"and": [{"term": {"etl" + (".source" * i) + ".id": v}} for i, v in enumerate(path)]}
}
}
})
if result.hits.hits:
return set(result.hits.hits._id)
else:
return set()
def extend(self, documents, queue=None):
if len(documents) == 0:
return
i = 0
if queue == None:
for i, doc in enumerate(documents):
queue = self._get_queue(doc)
if queue != None:
break
else:
Log.note("All documents are too old")
return
queue.extend(documents[i::])
def add(self, doc, queue=None):
if queue == None:
queue = self._get_queue(doc)
if queue == None:
Log.note("Document not added: Too old")
return
queue.add(doc)
def delete(self, filter):
self.es.delete(filter)
def copy(self, keys, source, sample_only_filter=None, sample_size=None, done_copy=None):
"""
:param keys: THE KEYS TO LOAD FROM source
:param source: THE SOURCE (USUALLY S3 BUCKET)
:param sample_only_filter: SOME FILTER, IN CASE YOU DO NOT WANT TO SEND EVERYTHING
:param sample_size: FOR RANDOM SAMPLE OF THE source DATA
:param done_copy: CALLBACK, ADDED TO queue, TO FINISH THE TRANSACTION
:return: LIST OF SUB-keys PUSHED INTO ES
"""
num_keys = 0
queue = None
pending = [] # FOR WHEN WE DO NOT HAVE QUEUE YET
for key in keys:
timer = Timer("Process {{key}}", param={"key": key}, verbose=DEBUG)
try:
with timer:
for rownum, line in enumerate(source.read_lines(strip_extension(key))):
if not line:
continue
if rownum > 0 and rownum % 1000 == 0:
Log.note("Ingested {{num}} records from {{key}} in bucket {{bucket}}", num=rownum, key=key, bucket=source.name)
insert_me, please_stop = fix(key, rownum, line, source, sample_only_filter, sample_size)
if insert_me == None:
continue
value = insert_me['value']
if '_id' not in value:
Log.warning("expecting an _id in all S3 records. If missing, there can be duplicates")
if queue == None:
queue = self._get_queue(insert_me)
if queue == None:
pending.append(insert_me)
if len(pending) > 1000:
if done_copy:
done_copy()
Log.error("first 1000 (key={{key}}) records for {{alias}} have no indication what index to put data", key=tuple(keys)[0], alias=self.settings.index)
continue
elif queue is DATA_TOO_OLD:
break
if pending:
queue.extend(pending)
pending = []
num_keys += 1
queue.add(insert_me)
if please_stop:
break
except Exception as e:
if KEY_IS_WRONG_FORMAT in e:
Log.warning("Could not process {{key}} because bad format. Never trying again.", key=key, cause=e)
pass
elif CAN_NOT_DECODE_JSON in e:
Log.warning("Could not process {{key}} because of bad JSON. Never trying again.", key=key, cause=e)
pass
else:
Log.warning("Could not process {{key}} after {{duration|round(places=2)}}seconds", key=key, duration=timer.duration.seconds, cause=e)
done_copy = None
if done_copy:
if queue == None:
done_copy()
elif queue is DATA_TOO_OLD:
done_copy()
else:
queue.add(done_copy)
if [p for p in pending if wrap(p).value.task.state not in ('failed', 'exception')]:
Log.error("Did not find an index for {{alias}} to place the data for key={{key}}", key=tuple(keys)[0], alias=self.settings.index)
Log.note("{{num}} keys from {{key|json}} added", num=num_keys, key=keys)
return num_keys
def fix(source_key, rownum, line, source, sample_only_filter, sample_size):
"""
:param rownum:
:param line:
:param source:
:param sample_only_filter:
:param sample_size:
:return: (row, no_more_data) TUPLE WHERE row IS {"value":<data structure>} OR {"json":<text line>}
"""
value = json2value(line)
if rownum == 0:
if len(line) > MAX_RECORD_LENGTH:
_shorten(source_key, value, source)
value = _fix(value)
if sample_only_filter and Random.int(int(1.0/coalesce(sample_size, 0.01))) != 0 and jx.filter([value], sample_only_filter):
# INDEX etl.id==0, BUT NO MORE
if value.etl.id != 0:
Log.error("Expecting etl.id==0")
row = {"value": value}
return row, True
elif len(line) > MAX_RECORD_LENGTH:
_shorten(source_key, value, source)
value = _fix(value)
elif '"resource_usage":' in line:
value = _fix(value)
row = {"value": value}
return row, False
def _shorten(source_key, value, source):
if source.name.startswith("active-data-test-result"):
value.result.subtests = [s for s in value.result.subtests if s.ok is False]
value.result.missing_subtests = True
value.repo.changeset.files = None
shorter_length = len(value2json(value))
if shorter_length > MAX_RECORD_LENGTH:
result_size = len(value2json(value.result))
if source.name == "active-data-test-result":
if result_size > MAX_RECORD_LENGTH:
Log.warning("Epic test failure in {{name}} results in big record for {{id}} of length {{length}}", id=value._id, name=source.name, length=shorter_length)
else:
pass # NOT A PROBLEM
else:
Log.warning("Monstrous {{name}} record {{id}} of length {{length}}", id=source_key, name=source.name, length=shorter_length)
def _fix(value):
try:
if value.repo._source:
value.repo = value.repo._source
if not value.build.revision12:
value.build.revision12 = value.build.revision[0:12]
if value.resource_usage:
value.resource_usage = None
return value
except Exception as e:
Log.error("unexpected problem", cause=e)
| klahnakoski/SpotManager | vendor/jx_elasticsearch/rollover_index.py | Python | mpl-2.0 | 13,558 |
import asyncio
import time
import aiohttp
from . import utils
from . import static_data
DAY = static_data.DAY
REGIONS = static_data.LEAGUE_REGIONS
CACHE_UPDATE_TIMEOUT = static_data.LEAGUE_CACHE_UPDATE_TIMEOUT
REDIS_URL = static_data.REDIS_URL
class AsyncRateLeagueAPI:
def __init__(self, api_key):
self.api_key = api_key
self.session = aiohttp.ClientSession()
def __repr__(self):
return 'ARLeagueAPI:{}'.format(self.api_key[-4:])
async def _session_get_simple(self, url, params):
async with self.session.get(url, params=params) as response:
return await response.json()
async def _session_get(self, url, params):
response = await self._session_get_simple(url, params)
while response.get('status', dict()).get('status_code') == 429:
await asyncio.sleep(1)
response = await self._session_get_simple(url, params)
return response
def _request(self, api_url, region, **kwargs):
API_URL_BASE = 'https://{platform}.api.riotgames.com/{api_url}'
api_url = api_url.format(region=region, **kwargs)
url = API_URL_BASE.format(platform=REGIONS[region], api_url=api_url)
kwargs['api_key'] = self.api_key
return self._session_get(url, params=kwargs)
def get_summoner_by_name(self, region, summoner_name):
url = '/lol/summoner/v3/summoners/by-name/{summonerName}'
return self._request(url, region, summonerName=summoner_name)
@utils.cached(timeout=CACHE_UPDATE_TIMEOUT, redis_url=REDIS_URL)
async def get_summoner_revison_date(self, region, summoner) -> int:
summoner = await self.get_summoner_by_name(region, summoner)
try:
return int(summoner['revisionDate'])
except KeyError:
return 0
async def get_revision(self, region, summoner):
out = str()
revision_date = await self.get_summoner_revison_date(region, summoner)
if revision_date:
revision_date /= 1000
days_ago = int((time.time() - revision_date)//DAY)
if days_ago == 0:
out = 'Today'
else:
out = utils.get_string_from_countable('day', days_ago) + ' ago'
else:
out = '*Never (not found)!*'
return out
| festinuz/cmcb | cmcb/league.py | Python | mit | 2,319 |
from twistedbot.plugins.base import PluginChatBase
class Help(PluginChatBase):
@property
def command_verb(self):
return "help"
@property
def help(self):
return "without argument shows aviable commands or help for specfic command"
def command(self, sender, command, args):
if not args:
msg = ["%s [COMMAND]" % self.command_verb]
msg.append(" ".join(self.world.eventregister.chat_commands.keys()))
self.send_chat_message(msg)
else:
cmd = args[0]
if cmd in self.world.eventregister.chat_commands:
self.send_chat_message(self.world.eventregister.chat_commands[cmd].help)
else:
self.send_chat_message("unknown comamnd %s" % cmd)
plugin = Help
| lukleh/TwistedBot | twistedbot/plugins/core/chat_help.py | Python | mit | 801 |
import wx
from wx import xrc
from .base import ModuleBase, bind
class AntennaModule(ModuleBase):
def __init__(self, name, pos, antenna_controller):
self._antenna_controller = antenna_controller
self._panel = None
self.title = 'Antenna ({})'.format(name)
self.grid_pos = pos
self.grid_span = (1, 1)
def load(self, res, parent):
self._panel = res.LoadPanel(parent, 'AntennaModule')
self.bind_handlers()
self._antenna_arm_status = xrc.XRCCTRL(self._panel, 'antenna_arm_status')
self._antenna_deployment_in_progress = xrc.XRCCTRL(self._panel, 'antenna_deployment_in_progress')
self._antenna1_status = xrc.XRCCTRL(self._panel, 'antenna1_status')
self._antenna2_status = xrc.XRCCTRL(self._panel, 'antenna2_status')
self._antenna3_status = xrc.XRCCTRL(self._panel, 'antenna3_status')
self._antenna4_status = xrc.XRCCTRL(self._panel, 'antenna4_status')
def root(self):
return self._panel
def update(self):
if self._antenna_controller.armed:
self._antenna_arm_status.SetLabel("Armed")
else:
self._antenna_arm_status.SetLabel("Disarmed")
if self._antenna_controller.deployment_in_progress:
self._antenna_deployment_in_progress.SetLabel("Deployment in progress")
else:
self._antenna_deployment_in_progress.SetLabel("Idle")
self._antenna1_status.SetLabel(str(self._antenna_controller.antenna_state[0]))
self._antenna2_status.SetLabel(str(self._antenna_controller.antenna_state[1]))
self._antenna3_status.SetLabel(str(self._antenna_controller.antenna_state[2]))
self._antenna4_status.SetLabel(str(self._antenna_controller.antenna_state[3]))
@bind('antenna1_deploy', wx.EVT_BUTTON, args=(0,))
@bind('antenna2_deploy', wx.EVT_BUTTON, args=(1,))
@bind('antenna3_deploy', wx.EVT_BUTTON, args=(2,))
@bind('antenna4_deploy', wx.EVT_BUTTON, args=(3,))
def _on_antenna_deploy(self, evt, antenna_id):
self._antenna_controller.antenna_state[antenna_id].deployed = True
self._antenna_controller.antenna_state[antenna_id].is_being_deployed = False
@bind('antenna1_deploy_cancel', wx.EVT_BUTTON, args=(0,))
@bind('antenna2_deploy_cancel', wx.EVT_BUTTON, args=(1,))
@bind('antenna3_deploy_cancel', wx.EVT_BUTTON, args=(2,))
@bind('antenna4_deploy_cancel', wx.EVT_BUTTON, args=(3,))
def _on_antenna_deploy_cancel(self, evt, antenna_id):
self._antenna_controller.antenna_state[antenna_id].is_being_deployed = False
class AntennasModule(ModuleBase):
def __init__(self, system):
self.title = 'Antennas'
self.grid_pos = (0, 1)
self.grid_span = (1, 2)
self._primary = AntennaModule('Primary', (0, 0), system.primary_antenna)
self._backup = AntennaModule('Backup', (0, 1), system.backup_antenna)
def load(self, res, parent):
self._panel = res.LoadPanel(parent, 'AntennasModule') # type: wx.Panel
sizer = self._panel.GetSizer() # type: wx.Sizer
self._primary.load(res, self._panel)
self._backup.load(res, self._panel)
sizer.Add(self._primary.root())
sizer.Add(self._backup.root())
self._primary.root().Layout()
self._backup.root().Layout()
def root(self):
return self._panel
def update(self):
self._primary.update()
self._backup.update()
| PW-Sat2/PWSat2OBC | integration_tests/emulator/antenna.py | Python | agpl-3.0 | 3,567 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and
from frappe import msgprint, _
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no
from erpnext.manufacturing.doctype.production_order.production_order import get_item_details
class ProductionPlanningTool(Document):
def __init__(self, arg1, arg2=None):
super(ProductionPlanningTool, self).__init__(arg1, arg2)
self.item_dict = {}
def get_so_details(self, so):
"""Pull other details from so"""
so = frappe.db.sql("""select transaction_date, customer, base_grand_total
from `tabSales Order` where name = %s""", so, as_dict = 1)
ret = {
'sales_order_date': so and so[0]['transaction_date'] or '',
'customer' : so[0]['customer'] or '',
'grand_total': so[0]['base_grand_total']
}
return ret
def get_item_details(self, item_code):
return get_item_details(item_code)
def clear_so_table(self):
self.set('sales_orders', [])
def clear_item_table(self):
self.set('items', [])
def validate_company(self):
if not self.company:
frappe.throw(_("Please enter Company"))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += " and so.transaction_date >= %(from_date)s"
if self.to_date:
so_filter += " and so.transaction_date <= %(to_date)s"
if self.customer:
so_filter += " and so.customer = %(customer)s"
if self.fg_item:
item_filter += " and item.name = %(item)s"
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.base_grand_total
from `tabSales Order` so, `tabSales Order Item` so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status != "Stopped"
and so.company = %(company)s
and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0) {0}
and (exists (select name from `tabItem` item where item.name=so_item.item_code
and (item.is_pro_applicable = 1 or item.is_sub_contracted_item = 1 {1}))
or exists (select name from `tabPacked Item` pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from `tabItem` item where item.name=pi.item_code
and (item.is_pro_applicable = 1 or item.is_sub_contracted_item = 1) {2})))
""".format(so_filter, item_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"customer": self.customer,
"item": self.fg_item,
"company": self.company
}, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.clear_so_table()
so_list = [d.sales_order for d in self.get('sales_orders')]
for r in open_so:
if cstr(r['name']) not in so_list:
pp_so = self.append('sales_orders', {})
pp_so.sales_order = r['name']
pp_so.sales_order_date = cstr(r['transaction_date'])
pp_so.customer = cstr(r['customer'])
pp_so.grand_total = flt(r['base_grand_total'])
def get_items_from_so(self):
""" Pull items from Sales Order, only proction item
and subcontracted item will be pulled from Packing item
and add items in the table
"""
items = self.get_items()
self.add_items(items)
def get_items(self):
so_list = filter(None, [d.sales_order for d in self.get('sales_orders')])
if not so_list:
msgprint(_("Please enter sales order in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and so_item.item_code = "' + self.fg_item + '"'
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - ifnull(delivered_qty, 0)) as pending_qty
from `tabSales Order Item` so_item
where parent in (%s) and docstatus = 1 and ifnull(qty, 0) > ifnull(delivered_qty, 0)
and exists (select * from `tabItem` item where item.name=so_item.item_code
and (item.is_pro_applicable = 1
or item.is_sub_contracted_item = 1)) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.fg_item:
item_condition = ' and pi.item_code = "' + self.fg_item + '"'
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - ifnull(so_item.delivered_qty, 0)) * pi.qty) / so_item.qty)
as pending_qty
from `tabSales Order Item` so_item, `tabPacked Item` pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0)
and exists (select * from `tabItem` item where item.name=pi.item_code
and (item.is_pro_applicable = 1
or item.is_sub_contracted_item = 1)) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
return items + packed_items
def add_items(self, items):
self.clear_item_table()
for p in items:
item_details = get_item_details(p['item_code'])
pi = self.append('items', {})
pi.sales_order = p['parent']
pi.warehouse = p['warehouse']
pi.item_code = p['item_code']
pi.description = item_details and item_details.description or ''
pi.stock_uom = item_details and item_details.stock_uom or ''
pi.bom_no = item_details and item_details.bom_no or ''
pi.so_pending_qty = flt(p['pending_qty'])
pi.planned_qty = flt(p['pending_qty'])
def validate_data(self):
self.validate_company()
for d in self.get('items'):
validate_bom_no(d.item_code, d.bom_no)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def raise_production_order(self):
"""It will raise production order (Draft) for all distinct FG items"""
self.validate_data()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "planned_qty")
items = self.get_distinct_items_and_boms()[1]
pro = self.create_production_order(items)
if pro:
pro = ["""<a href="#Form/Production Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in pro]
msgprint(_("{0} created").format(comma_and(pro)))
else :
msgprint(_("No Production Orders created"))
def get_distinct_items_and_boms(self):
""" Club similar BOM and item for processing
bom_dict {
bom_no: ['sales_order', 'qty']
}
"""
item_dict, bom_dict = {}, {}
for d in self.get("items"):
if d.bom_no:
bom_dict.setdefault(d.bom_no, []).append([d.sales_order, flt(d.planned_qty)])
if frappe.db.get_value("Item", d.item_code, "is_pro_applicable"):
item_dict[(d.item_code, d.sales_order, d.warehouse)] = {
"production_item" : d.item_code,
"sales_order" : d.sales_order,
"qty" : flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),
{}).get("qty")) + flt(d.planned_qty),
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"wip_warehouse" : "",
"fg_warehouse" : d.warehouse,
"status" : "Draft",
}
return bom_dict, item_dict
def create_production_order(self, items):
"""Create production order. Called from Production Planning Tool"""
from erpnext.manufacturing.doctype.production_order.production_order import OverProductionError, get_default_warehouse
warehouse = get_default_warehouse()
pro_list = []
for key in items:
pro = frappe.new_doc("Production Order")
pro.update(items[key])
pro.set_production_order_operations()
if warehouse:
pro.wip_warehouse = warehouse.get('wip_warehouse')
if not pro.fg_warehouse:
pro.fg_warehouse = warehouse.get('fg_warehouse')
frappe.flags.mute_messages = True
try:
pro.insert()
pro_list.append(pro.name)
except OverProductionError:
pass
frappe.flags.mute_messages = False
return pro_list
def download_raw_materials(self):
""" Create csv data for required raw material to produce finished goods"""
self.validate_data()
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
return self.get_csv()
def get_raw_materials(self, bom_dict):
""" Get raw materials considering sub-assembly items
{
"item_code": [qty_required, description, stock_uom, min_order_qty]
}
"""
item_list = []
for bom, so_wise_qty in bom_dict.items():
bom_wise_item_details = {}
if self.use_multi_level_bom:
# get all raw materials with sub assembly childs
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
for d in frappe.db.sql("""select fb.item_code,
ifnull(sum(ifnull(fb.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
fb.description, fb.stock_uom, it.min_order_qty
from `tabBOM Explosion Item` fb, `tabBOM` bom, `tabItem` it
where bom.name = fb.parent and it.name = fb.item_code
and is_pro_applicable = 0
and is_sub_contracted_item = 0
and is_stock_item = 1
and fb.docstatus<2 and bom.name=%s
group by item_code, stock_uom""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
# Get all raw materials considering SA items as raw materials,
# so no childs of SA items
for d in frappe.db.sql("""select bom_item.item_code,
ifnull(sum(ifnull(bom_item.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
bom_item.description, bom_item.stock_uom, item.min_order_qty
from `tabBOM Item` bom_item, `tabBOM` bom, tabItem item
where bom.name = bom_item.parent and bom.name = %s and bom_item.docstatus < 2
and bom_item.item_code = item.name
and item.is_stock_item = 1
group by item_code""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
for item, item_details in bom_wise_item_details.items():
for so_qty in so_wise_qty:
item_list.append([item, flt(item_details.qty) * so_qty[1], item_details.description,
item_details.stock_uom, item_details.min_order_qty, so_qty[0]])
self.make_items_dict(item_list)
def make_items_dict(self, item_list):
for i in item_list:
self.item_dict.setdefault(i[0], []).append([flt(i[1]), i[2], i[3], i[4], i[5]])
def get_csv(self):
item_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse',
'Quantity Requested for Purchase', 'Ordered Qty', 'Actual Qty']]
for item in self.item_dict:
total_qty = sum([flt(d[0]) for d in self.item_dict[item]])
item_list.append([item, self.item_dict[item][0][1], self.item_dict[item][0][2], total_qty])
item_qty = frappe.db.sql("""select warehouse, indented_qty, ordered_qty, actual_qty
from `tabBin` where item_code = %s""", item, as_dict=1)
i_qty, o_qty, a_qty = 0, 0, 0
for w in item_qty:
i_qty, o_qty, a_qty = i_qty + flt(w.indented_qty), o_qty + flt(w.ordered_qty), a_qty + flt(w.actual_qty)
item_list.append(['', '', '', '', w.warehouse, flt(w.indented_qty),
flt(w.ordered_qty), flt(w.actual_qty)])
if item_qty:
item_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])
return item_list
def raise_purchase_request(self):
"""
Raise Material Request if projected qty is less than qty required
Requested qty should be shortage qty considering minimum order qty
"""
self.validate_data()
if not self.purchase_request_for_warehouse:
frappe.throw(_("Please enter Warehouse for which Material Request will be raised"))
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
if self.item_dict:
self.insert_purchase_request()
def get_requested_items(self):
item_projected_qty = self.get_projected_qty()
items_to_be_requested = frappe._dict()
for item, so_item_qty in self.item_dict.items():
requested_qty = 0
total_qty = sum([flt(d[0]) for d in so_item_qty])
if total_qty > item_projected_qty.get(item, 0):
# shortage
requested_qty = total_qty - flt(item_projected_qty.get(item))
# consider minimum order qty
if requested_qty < flt(so_item_qty[0][3]):
requested_qty = flt(so_item_qty[0][3])
# distribute requested qty SO wise
for item_details in so_item_qty:
if requested_qty:
sales_order = item_details[4] or "No Sales Order"
if requested_qty <= item_details[0]:
adjusted_qty = requested_qty
else:
adjusted_qty = item_details[0]
items_to_be_requested.setdefault(item, {}).setdefault(sales_order, 0)
items_to_be_requested[item][sales_order] += adjusted_qty
requested_qty -= adjusted_qty
else:
break
# requested qty >= total so qty, due to minimum order qty
if requested_qty:
items_to_be_requested.setdefault(item, {}).setdefault("No Sales Order", 0)
items_to_be_requested[item]["No Sales Order"] += requested_qty
return items_to_be_requested
def get_projected_qty(self):
items = self.item_dict.keys()
item_projected_qty = frappe.db.sql("""select item_code, sum(projected_qty)
from `tabBin` where item_code in (%s) and warehouse=%s group by item_code""" %
(", ".join(["%s"]*len(items)), '%s'), tuple(items + [self.purchase_request_for_warehouse]))
return dict(item_projected_qty)
def insert_purchase_request(self):
items_to_be_requested = self.get_requested_items()
purchase_request_list = []
if items_to_be_requested:
for item in items_to_be_requested:
item_wrapper = frappe.get_doc("Item", item)
pr_doc = frappe.new_doc("Material Request")
pr_doc.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"requested_by": frappe.session.user,
"material_request_type": "Purchase"
})
for sales_order, requested_qty in items_to_be_requested[item].items():
pr_doc.append("items", {
"doctype": "Material Request Item",
"__islocal": 1,
"item_code": item,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": requested_qty,
"schedule_date": add_days(nowdate(), cint(item_wrapper.lead_time_days)),
"warehouse": self.purchase_request_for_warehouse,
"sales_order_no": sales_order if sales_order!="No Sales Order" else None
})
pr_doc.flags.ignore_permissions = 1
pr_doc.submit()
purchase_request_list.append(pr_doc.name)
if purchase_request_list:
pur_req = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in purchase_request_list]
msgprint(_("Material Requests {0} created").format(comma_and(pur_req)))
else:
msgprint(_("Nothing to request"))
| mbauskar/alec_frappe5_erpnext | erpnext/manufacturing/doctype/production_planning_tool/production_planning_tool.py | Python | agpl-3.0 | 14,964 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""events.py: event management"""
import pygame
class EventError(Exception):
pass
class EventManager(object):
"""Manages event subscriptions."""
_event_code_counter = pygame.NUMEVENTS
def __init__(self):
self._subscriptions = {}
self._user_subscriptions = {}
def subscribe(self, event_type, callback):
"""Subscribe to an event."""
if event_type < pygame.NUMEVENTS:
subscription_dict = self._subscriptions
else:
subscription_dict = self._user_subscriptions
if not event_type in subscription_dict:
subscription_dict[event_type] = set()
subscription_dict[event_type].add(callback)
def unsubscribe(self, event_type, callback):
"""Remove an event subscription."""
if event_type < pygame.NUMEVENTS:
subscription_dict = self._subscriptions
else:
subscription_dict = self._user_subscriptions
subscription_dict[event_type].remove(callback)
def update(self):
"""Query events and call subscribers."""
for event in pygame.event.get():
if event.type == pygame.USEREVENT:
self._handle_user_event(event)
try:
for action in self._subscriptions[event.type]:
action(event)
except KeyError:
pass
@staticmethod
def post(code, **kwargs):
if 'code' in kwargs:
raise EventError("user events may not define a code attribute")
kwargs['code'] = code
pygame.event.post(pygame.event.Event(pygame.USEREVENT, kwargs))
@staticmethod
def new_event_code():
EventManager._event_code_counter += 1
return EventManager._event_code_counter
def _handle_user_event(self, event):
try:
for action in self._user_subscriptions[event.code]:
action(event)
except KeyError:
pass | nyrocron/pathdemo | game/events.py | Python | mpl-2.0 | 2,172 |
from app import db
import datetime
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True)
email = db.Column(db.String(120), unique=True)
password_hash = db.Column(db.String(128))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=6000):
s = Serializer('SECRET_KEY', expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer('SECRET_KEY')
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return "Username: %r\nEmail: %r" % (self.username, self.email)
def to_dict(self):
return {
"id": self.id,
"username": self.username,
"email": self.email
}
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('tasks', lazy='dynamic'))
title = db.Column(db.String(100))
content = db.Column(db.String(500))
done = db.Column(db.Boolean(), default=False)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow())
def __init__(self, title, content, user, timestamp=None):
self.title = title
self.content = content
self.user = user
if timestamp is None:
timestamp = datetime.datetime.utcnow()
self.timestamp = timestamp
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"content": self.content,
"done": self.done,
"timestamp": self.timestamp,
"user_id": self.user_id
}
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('task.id'), default=0)
caption = db.Column(db.String(500))
url = db.Column(db.String(300))
def __init__(self, caption, task_id):
self.caption = caption
self.task_id = task_id
def to_dict(self):
return {
"id": self.id,
"task_id": self.task_id,
"caption": self.caption,
"url": self.url
}
| ibrahimGhailani/TodoFlask | app/models.py | Python | mit | 2,952 |
# !/usr/bin/env python
from setuptools import setup
setup(
name='shamir_bip39_2039',
version='0.0.1',
test_suite='nose2.collector.collector',
packages=['shamir_bip39_2039'],
package_data={'shamir_bip39_2039': ['english.txt']})
| klingebj/shamir_bip39_2039 | setup.py | Python | apache-2.0 | 248 |
from scipy import misc
from skimage import color
import numpy as np
import os as os
if __name__ == "__main__":
path = "USPSdata/Numerals/"
count = 0
for i in range(10):
new_path = path
new_path = new_path + str(i) + "/"
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
print "count:"
print count | JruvikaBhimani/CSE-547---Introduction-to-Machine-Learning | Project 3 - Neural Network and Tensor Flow/usps.py | Python | apache-2.0 | 1,100 |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
#print sys.path
import math
import bld_geometry
from stat_corr import *
def evaluate_building(population_density,
area='NULL',
perimeter='NULL',
height='NULL',
length='NULL',
floors=5,
window_ratio='NULL',
year_of_construction=1960,
accumulated_heating_hours=80000,
common_walls='NULL'):
# Most buildings have a rectangular floor projection. As the Golden Rule is one of the most common rules
# in construction esthetics it seems to make sense to use its ratio for width, length and - if necessary - perimeter estimations.
dimensions=bld_geometry.dimensions(area,perimeter,length)
if height=='NULL':
if floors==0: floors=5
building_height=floors*3.3
else:
floors=building_height/3.3
if window_ratio=='NULL':
window_ratio=window_wall_ratio_AVG_by_building_age_lookup.get(year_of_construction)
if common_walls=='NULL':
common_walls=common_walls_by_population_density_corr.get(population_density)
base_area=dimensions["AREA"]
base_uvalue_pres=present_base_uvalue_AVG_by_building_age_lookup.get(year_of_construction)
base_uvalue_contemp=contemporary_base_uvalue_by_building_age_lookup.get(year_of_construction)
base_loss_pres=base_area*base_uvalue_pres*0.6*accumulated_heating_hours/1000
base_loss_contemp=base_area*base_uvalue_contemp*0.6*accumulated_heating_hours/1000
wall_area=(dimensions["PERIMETER"]-common_walls*dimensions["WIDTH"])*building_height*(1-window_ratio)
wall_uvalue_pres=present_wall_uvalue_AVG_by_building_age_lookup.get(year_of_construction)
wall_uvalue_contemp=contemporary_wall_uvalue_by_building_age_lookup.get(year_of_construction)
wall_loss_pres=wall_area*wall_uvalue_pres*accumulated_heating_hours/1000
wall_loss_contemp=wall_area*wall_uvalue_contemp*accumulated_heating_hours/1000
window_area=(dimensions["PERIMETER"]-common_walls*dimensions["WIDTH"])*building_height*window_ratio
window_uvalue_pres=present_window_uvalue_AVG_by_building_age_lookup.get(year_of_construction)
window_uvalue_contemp=contemporary_window_uvalue_by_building_age_lookup.get(year_of_construction)
window_loss_pres=window_area*window_uvalue_pres*accumulated_heating_hours/1000
window_loss_contemp=window_area*window_uvalue_contemp*accumulated_heating_hours/1000
roof_area=dimensions["AREA"]
roof_uvalue_pres=present_roof_uvalue_AVG_by_building_age_lookup.get(year_of_construction)
roof_uvalue_contemp=contemporary_roof_uvalue_by_building_age_lookup.get(year_of_construction)
roof_loss_pres=roof_area*roof_uvalue_pres*accumulated_heating_hours/1000
roof_loss_contemp=roof_area*roof_uvalue_contemp*accumulated_heating_hours/1000
total_loss_pres=base_loss_pres+wall_loss_pres+window_loss_pres+roof_loss_pres
total_loss_contemp=base_loss_contemp+wall_loss_contemp+window_loss_contemp+roof_loss_contemp
volume=dimensions["AREA"]*building_height
envelope1=2*dimensions["AREA"]+dimensions["PERIMETER"]*building_height
envelope2= wall_area+base_area+window_area+roof_area
return {"WDT_AVG":dimensions["WIDTH"],
"LEN_AVG":dimensions["LENGTH"],
"HGT_AVG":building_height,
"AR_BASE":base_area,
"AR_WALL":wall_area,
"AR_WIND":window_area,
"AR_ROOF":roof_area,
"AR_ENV1":envelope1,
"AR_ENV2":envelope2,
"WAL_COM":common_walls,
"RT_WINWALL":window_ratio,
"RT_AV1":envelope1/volume,
"RT_AV2":envelope2/volume,
"UP_BASE":base_uvalue_pres,
"UP_WALL":wall_uvalue_pres,
"UP_WIND":window_uvalue_pres,
"UP_ROOF":roof_uvalue_pres,
"UC_BASE":base_uvalue_contemp,
"UC_WALL":wall_uvalue_contemp,
"UC_WIND":window_uvalue_contemp,
"UC_ROOF":roof_uvalue_contemp,
"HLP_BASE":base_loss_pres,
"HLP_WALL":wall_loss_pres,
"HLP_WIND":window_loss_pres,
"HLP_ROOF":roof_loss_pres,
"HLP_TOT":total_loss_contemp,
"HLC_BASE":base_loss_contemp,
"HLC_WALL":wall_loss_contemp,
"HLC_WIND":window_loss_contemp,
"HLC_ROOF":roof_loss_contemp,
"HLC_TOT":total_loss_pres
}
#print evaluate_building(15000,10000,year_of_construction=1970)
#round(12.34448,2)
| UdK-VPT/Open_eQuarter | mole/stat_util/energy_demand.py | Python | gpl-2.0 | 4,636 |
#!/usr/bin/env python
# This script does a deep dive into the data collected previously, and performs
# an analysis of the errors.
from __future__ import print_function
import sys
import subprocess
import json
import argparse
import hashlib
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from data_models import Base, Job, Error
parser = argparse.ArgumentParser()
parser.add_argument("config", help="The JSON config file to load.")
parser.add_argument("-v", "--verbose", help="Run verbosely",
action="store_true")
args = parser.parse_args()
config = {}
with open(args.config, 'r') as f:
config = json.load(f)
if not config.has_key('repos') or not config.has_key('database-url'):
print("Error parsing config file!")
print(config)
sys.exit(1)
engine = create_engine(config['database-url'])
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
BASE_API_URL = 'https://circleci.com/api/v1'
HEADERS = {'Accept' : 'application/json'}
def get_cmd_output(command):
output = subprocess.Popen(command.split(),
stdout=subprocess.PIPE).communicate()[0].decode('ascii')
return output.strip()
def log(line):
if args.verbose:
print(line)
def increment_point(step, hash_id):
running_joke = session.query(Error).filter(Error.repo_hash == hash_id)
running_joke = running_joke.filter(Error.step == step)
if running_joke.count() < 1:
e = Error(repo_hash=hash_id, step=step, count=1)
session.add(e)
else:
e = running_joke.first()
e.count = e.count + 1
session.add(e)
session.commit()
def process_steps(steps, hash_id):
for step in steps:
step_name = step['name']
for action in step['actions']:
if action['failed']:
increment_point(step_name, hash_id)
for repo in config['repos']:
if not repo.has_key('path') or not repo.has_key('highlight-branches'):
print("A repo entry is missing needed keys!")
print(repo)
sys.exit(1)
log("> Processing '{0}'".format(repo['path']))
if repo.has_key('hash'):
hash_id = repo['hash']
else:
hash_id = hashlib.md5(repo['path']).hexdigest()
# First, drop the table
delme = session.query(Error).delete()
q = session.query(Job).filter(Job.repo_hash == hash_id)
q = q.filter(Job.outcome == 'failed')
results = q.all()
circle_user = get_cmd_output('git -C {0} config git-circle.user'.format(
repo['path']))
circle_project = get_cmd_output(
'git -C {0} config git-circle.project'.format(repo['path']))
circle_token = get_cmd_output('git -C {0} config git-circle.token'.format(
repo['path']))
log(">--> user: {0}, project: {1}".format(circle_user, circle_project))
log("=================")
i = 0
for result in results:
i = i + 1
log("\tBuild: {0}\t\t{1}/{2}".format(result.build_number, i,
len(results)))
api_url = "{0}/project/{1}/{2}/{3}?circle-token={4}".format(
BASE_API_URL, circle_user, circle_project, result.build_number,
circle_token)
r = requests.get(api_url, headers=HEADERS)
if r.status_code != 200:
log("\t>>> ERROR RETRIEVING THAT BUILD!")
else:
data = r.json()
process_steps(data['steps'], hash_id)
| criswell/circle-stats | error-stats.py | Python | gpl-2.0 | 3,420 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/procamora/Documents/Gestor-Series/app/utils/../../app/views/ui/descarga_automatica.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setEnabled(True)
Dialog.resize(580, 405)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Iconos/Icons/fatcow/Principal.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.textEditVistas = QtWidgets.QTextEdit(Dialog)
self.textEditVistas.setEnabled(False)
self.textEditVistas.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textEditVistas.setObjectName("textEditVistas")
self.horizontalLayout.addWidget(self.textEditVistas)
self.textEditDescargadas = QtWidgets.QTextEdit(Dialog)
self.textEditDescargadas.setEnabled(False)
self.textEditDescargadas.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textEditDescargadas.setObjectName("textEditDescargadas")
self.horizontalLayout.addWidget(self.textEditDescargadas)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.progressBar = QtWidgets.QProgressBar(self.widget)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout_2.addWidget(self.progressBar)
self.pushButtonCerrar = QtWidgets.QPushButton(self.widget)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/Iconos/Icons/fatcow/cancel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButtonCerrar.setIcon(icon1)
self.pushButtonCerrar.setObjectName("pushButtonCerrar")
self.horizontalLayout_2.addWidget(self.pushButtonCerrar)
self.gridLayout.addWidget(self.widget, 1, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButtonCerrar.setText(_translate("Dialog", "Cerrar"))
import fatcow_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| procamora/Gestor-Series | app/views/ui/descarga_automatica_ui.py | Python | gpl-3.0 | 3,116 |
from ngo.urls import url
from . import views
app_name = 'app2'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^hello/(?P<name>\w+)/$', views.hello, name='hello'),
]
| naritotakizawa/ngo | tests/project2/app2/urls.py | Python | mit | 196 |
from collections import OrderedDict, namedtuple
from conans.errors import NotFoundException, ConanException
from conans.search.search import (filter_outdated, search_packages, search_recipes,
filter_by_revision)
class Search(object):
def __init__(self, cache, remote_manager, remotes):
self._cache = cache
self._remote_manager = remote_manager
self._remotes = remotes
def search_recipes(self, pattern, remote_name=None, case_sensitive=False):
ignorecase = not case_sensitive
references = OrderedDict()
if not remote_name:
references[None] = search_recipes(self._cache, pattern, ignorecase)
return references
if remote_name == 'all':
# We have to check if there is a remote called "all"
# Deprecate: 2.0 can remove this check
if 'all' not in self._remotes:
for remote in self._remotes.values():
refs = self._remote_manager.search_recipes(remote, pattern, ignorecase)
if refs:
references[remote.name] = sorted(refs)
return references
# single remote
remote = self._remotes[remote_name]
refs = self._remote_manager.search_recipes(remote, pattern, ignorecase)
references[remote.name] = sorted(refs)
return references
remote_ref = namedtuple('remote_ref', 'ordered_packages recipe_hash')
def search_packages(self, ref=None, remote_name=None, query=None, outdated=False):
""" Return the single information saved in conan.vars about all the packages
or the packages which match with a pattern
Attributes:
pattern = string to match packages
remote_name = search on another origin to get packages info
packages_pattern = String query with binary
packages properties: "arch=x86 AND os=Windows"
"""
if not remote_name:
return self._search_packages_in_local(ref, query, outdated)
if ref.revision and not self._cache.config.revisions_enabled:
raise ConanException("Revisions not enabled in the client, specify a "
"reference without revision")
if remote_name == 'all':
return self._search_packages_in_all(ref, query, outdated)
return self._search_packages_in(remote_name, ref, query, outdated)
def _search_packages_in_local(self, ref=None, query=None, outdated=False):
package_layout = self._cache.package_layout(ref, short_paths=None)
packages_props = search_packages(package_layout, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
try:
recipe_hash = package_layout.recipe_manifest().summary_hash
except IOError: # It could not exist in local
recipe_hash = None
if outdated:
ordered_packages = filter_outdated(ordered_packages, recipe_hash)
elif self._cache.config.revisions_enabled:
# With revisions, by default filter the packages not belonging to the recipe
# unless outdated is specified.
metadata = package_layout.load_metadata()
ordered_packages = filter_by_revision(metadata, ordered_packages)
references = OrderedDict()
references[None] = self.remote_ref(ordered_packages, recipe_hash)
return references
def _search_packages_in_all(self, ref=None, query=None, outdated=False):
references = OrderedDict()
# We have to check if there is a remote called "all"
# Deprecate: 2.0 can remove this check
if 'all' not in self._remotes:
for remote in self._remotes.values():
try:
packages_props = self._remote_manager.search_packages(remote, ref, query)
if packages_props:
ordered_packages = OrderedDict(sorted(packages_props.items()))
manifest, _ = self._remote_manager.get_recipe_manifest(ref, remote)
recipe_hash = manifest.summary_hash
if outdated and recipe_hash:
ordered_packages = filter_outdated(ordered_packages, recipe_hash)
references[remote.name] = self.remote_ref(ordered_packages, recipe_hash)
except NotFoundException:
continue
return references
return self._search_packages_in('all', ref, query, outdated)
def _search_packages_in(self, remote_name, ref=None, query=None, outdated=False):
remote = self._remotes[remote_name]
packages_props = self._remote_manager.search_packages(remote, ref, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
manifest, ref = self._remote_manager.get_recipe_manifest(ref, remote)
recipe_hash = manifest.summary_hash
if outdated and recipe_hash:
ordered_packages = filter_outdated(ordered_packages, recipe_hash)
references = OrderedDict()
references[remote.name] = self.remote_ref(ordered_packages, recipe_hash)
return references
| conan-io/conan | conans/client/cmd/search.py | Python | mit | 5,320 |
from django.core.urlresolvers import reverse
from livinglots_friendlyowners.views import (BaseAddFriendlyOwnerView,
BaseAddFriendlyOwnerSuccessView)
from .forms import FriendlyOwnerForm
from .models import FriendlyOwner
class AddFriendlyOwnerView(BaseAddFriendlyOwnerView):
form_class = FriendlyOwnerForm
model = FriendlyOwner
def get_success_url(self):
return reverse('friendlyowners:add_success')
class AddFriendlyOwnerSuccessView(BaseAddFriendlyOwnerSuccessView):
pass
| 596acres/livinglots-la | livinglotsla/friendlyowners/views.py | Python | gpl-3.0 | 548 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
for i in range(len(nums)-1, 0, -1):
if nums[i] == nums[i-1]:
del nums[i]
if i == 0:
break
return nums
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return sorted(list1 + list2)
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| pshanks01/learn_python | google-python-exercises/basic/list2.py | Python | gpl-3.0 | 2,300 |
#!/usr/bin/env python3
"""Home Assistant setup script."""
from datetime import datetime as dt
from setuptools import find_packages, setup
import homeassistant.const as hass_const
PROJECT_NAME = "Home Assistant"
PROJECT_PACKAGE_NAME = "homeassistant"
PROJECT_LICENSE = "Apache License 2.0"
PROJECT_AUTHOR = "The Home Assistant Authors"
PROJECT_COPYRIGHT = f" 2013-{dt.now().year}, {PROJECT_AUTHOR}"
PROJECT_URL = "https://www.home-assistant.io/"
PROJECT_EMAIL = "hello@home-assistant.io"
PROJECT_GITHUB_USERNAME = "home-assistant"
PROJECT_GITHUB_REPOSITORY = "core"
PYPI_URL = f"https://pypi.python.org/pypi/{PROJECT_PACKAGE_NAME}"
GITHUB_PATH = f"{PROJECT_GITHUB_USERNAME}/{PROJECT_GITHUB_REPOSITORY}"
GITHUB_URL = f"https://github.com/{GITHUB_PATH}"
DOWNLOAD_URL = f"{GITHUB_URL}/archive/{hass_const.__version__}.zip"
PROJECT_URLS = {
"Bug Reports": f"{GITHUB_URL}/issues",
"Dev Docs": "https://developers.home-assistant.io/",
"Discord": "https://discordapp.com/invite/c5DvZ4e",
"Forum": "https://community.home-assistant.io/",
}
PACKAGES = find_packages(exclude=["tests", "tests.*"])
REQUIRES = [
"aiohttp==3.7.4.post0",
"astral==2.2",
"async_timeout==3.0.1",
"attrs==20.3.0",
"awesomeversion==21.2.3",
"bcrypt==3.1.7",
"certifi>=2020.12.5",
"ciso8601==2.1.3",
"httpx==0.17.1",
"jinja2>=2.11.3",
"PyJWT==1.7.1",
# PyJWT has loose dependency. We want the latest one.
"cryptography==3.3.2",
"pip>=8.0.3,<20.3",
"python-slugify==4.0.1",
"pytz>=2021.1",
"pyyaml==5.4.1",
"requests==2.25.1",
"ruamel.yaml==0.15.100",
"voluptuous==0.12.1",
"voluptuous-serialize==2.4.0",
"yarl==1.6.3",
]
MIN_PY_VERSION = ".".join(map(str, hass_const.REQUIRED_PYTHON_VER))
setup(
name=PROJECT_PACKAGE_NAME,
version=hass_const.__version__,
url=PROJECT_URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
install_requires=REQUIRES,
python_requires=f">={MIN_PY_VERSION}",
test_suite="tests",
entry_points={"console_scripts": ["hass = homeassistant.__main__:main"]},
)
| w1ll1am23/home-assistant | setup.py | Python | apache-2.0 | 2,234 |
# requirements
from lib.request_handlers import *
from collections import Counter
from termcolor import *
import SocketServer
import logging
import json
import time
# logging configuration
LOG_DIRECTORY = "log/"
LOG_FILE = LOG_DIRECTORY + str(time.strftime("%Y%m%d-%H%M-")) + "manager_server.log"
logging.basicConfig(filename=LOG_FILE,level=logging.DEBUG)
# available commands
# this is a dictionary in which the keys are the available commands,
# while the values are lists of available parameters for that command
COMMANDS = {
"NewRemoteSIB" : ["owner"],
"NewVirtualMultiSIB": ["sib_list"],
"Discovery" : []
}
# classes
class ManagerServer(SocketServer.ThreadingTCPServer):
print colored("Manager> ", "blue", attrs=["bold"]) + "sib manager started!"
allow_reuse_address = True
class ManagerServerHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
# Output the received message
print colored("Manager> ", "blue", attrs=["bold"]) + "incoming connection, received the following message:"
self.server.logger.info(" Incoming connection, received the following message:")
data = json.loads(self.request.recv(1024).strip())
print data
self.server.logger.info(" " + str(data))
# Decode the request
if data.has_key("command"):
if data["command"] in COMMANDS.keys():
# debug print
print colored("Manager> ", "blue", attrs=["bold"]) + "received the command " + colored(data["command"], "cyan", attrs=['bold'])
self.server.logger.info(" Received the command " + str(data))
# check the number of arguments
if len(data.keys())-1 == len(COMMANDS[data["command"]]):
# check the arguments
cd = data.keys()
cd.remove("command")
if Counter(cd) == Counter(COMMANDS[data["command"]]):
# decode
print colored("Manager> ", "blue", attrs=["bold"]) + "calling the proper method"
if data["command"] == "NewRemoteSIB":
#TODO: passare al metodo NewRemoteSIB
#l'owner della sib e fargli inserire
#nell'ancillary sib anche questo dato
virtual_sib_id = globals()[data["command"]]()
# send a reply
self.request.sendall(json.dumps({'return':'ok', 'virtual_sib_id':virtual_sib_id}))
elif data["command"] == "Discovery":
virtual_sib_list = globals()[data["command"]]()
# send a reply
self.request.sendall(json.dumps({'return':'ok', 'virtual_sib_list':virtual_sib_list}))
elif data["command"] == "NewVirtualMultiSIB":
sib_list = data['sib_list']
virtual_multi_sib_id = globals()[data["command"]](sib_list)
# send a reply
print "ritornato dalla funzione"
self.request.sendall(json.dumps({'return':'ok', 'virtual_multi_sib_id':virtual_multi_sib_id}))
else:
# debug print
print colored("Manager> ", "red", attrs=["bold"]) + "wrong arguments"
self.server.logger.info(" Wrong arguments, skipping message...")
# send a reply
self.request.sendall(json.dumps({'return':'fail', 'cause':'wrong arguments'}))
else:
# debug print
print colored("Manager> ", "red", attrs=["bold"]) + "wrong number of arguments"
self.server.logger.info(" Wrong number of arguments, skipping message...")
# send a reply
self.request.sendall(json.dumps({'return':'fail', 'cause':'wrong number of arguments'}))
else:
# debug print
print colored("Manager> ", "red", attrs=["bold"]) + "invalid command! Skipping message..."
self.server.logger.info(" Invalid command, skipping message...")
# send a reply
self.request.sendall(json.dumps({'return':'fail', 'cause':'invalid command'}))
# debug print
print colored("Manager> ", "blue", attrs=["bold"]) + "received the command " + colored(data["command"], "cyan", attrs=['bold'])
self.server.logger.info(" Received the command " + str(data))
# send a reply
self.request.sendall(json.dumps({'return':'ok'}))
else:
# debug print
print colored("Manager> ", "red", attrs=["bold"]) + "no command supplied, skipping message"
self.server.logger.info(" No command supplied, skipping message")
# send a reply
self.request.sendall(json.dumps({'return':'fail', 'cause':'no command supplied'}))
except ZeroDivisionError:# Exception, e:
print colored("Manager> ", "red", attrs=["bold"]) + "Exception while receiving message: "# + str(e)
self.server.logger.info(" Exception while receiving message: ")# + str(e))
self.request.sendall(json.dumps({'return':'fail'}))
except Exception, e:
print colored("Manager> ", "red", attrs=["bold"]) + "Exception while receiving message: " + str(e)
self.server.logger.info(" Exception while receiving message: " + str(e))
##############################################################
#
# main program
#
##############################################################
if __name__=='__main__':
try:
# Create a logger object
logger = logging.getLogger("manager_server")
# Start the manager server
server = ManagerServer(('127.0.0.1', 17714), ManagerServerHandler)
server.logger = logger
server.logger.info(" Starting server on IP 127.0.0.1, Port 17714")
server.serve_forever()
except KeyboardInterrupt:
print colored("Manager> ", "blue", attrs=["bold"]) + "Goodbye!"
| desmovalvo/virtualsib | manager/manager_server.py | Python | lgpl-3.0 | 6,754 |
#!/usr/bin/env python
# This file defines features used by PiGaugeOBD, such as turbo timer and rev matcher.
import time
import sys
# Feature class is the base class which is used to run feature logic in a loop.
# bEnabled sets wether the feature should be enabled or not
class Feature:
def __init__(self, bEnabled):
self.enabled = bEnabled
# Update the feature, passing in the sensor list and info textbox for the feature to read and write to
def update(self, sensorList, tInfoBox):
pass
# The manual turbo timer feature lets you know when it is safe to switch off your engine.
# The base cooldown is 2 minutes, with a minute added for every hour driven, up to a maximum of 5 minutes.
class TurboTimer(Feature):
def __init__(self, bEnabled):
Feature.__init__(self, bEnabled)
# The time the engine started idling (in seconds)
self.timeStartedIdling = sys.maxint
self.currentlyIdling = False
self.idleRpm = 1000
# All in seconds
self.minCooldown = 90
self.maxCooldown = 300
self.cooldownIncrement = 80 # How much to increment the cooldown by per run time multiple
# Engine running time multiple to increase cooldown (in minutes)
self.runTimeMultiple = 60
# Calculates the optimal cooldown time in seconds
def calcCooldown(self, sensorList):
# Get engine run time
runTime = sensorList["engine_time"].value
hoursRan = runTime / self.runTimeMultiple
percentCurrentHour = float(runTime % self.runTimeMultiple) / self.runTimeMultiple
# Calculate
cooldown = self.minCooldown + (self.cooldownIncrement * hoursRan) + (self.cooldownIncrement * percentCurrentHour)
if cooldown > self.maxCooldown:
cooldown = self.maxCooldown
return cooldown
def update(self, sensorList, tInfoBox):
# Get current RPM
rpm = sensorList["rpm"].value
cooldown = self.calcCooldown(sensorList)
# Detect engine entering idle
if self.currentlyIdling == False and rpm < self.idleRpm:
self.timeStartedIdling = time.time()
self.currentlyIdling = True
# Detect engine leaving idle
if self.currentlyIdling and rpm > self.idleRpm:
self.timeStartedIdling = sys.maxint
self.currentlyIdling = False
# Start countdown after 30 seconds of idle
if self.currentlyIdling:
idlingTime = time.time() - self.timeStartedIdling
if idlingTime > 30:
if idlingTime > cooldown:
tInfoBox.AppendText("TurboTimer: SAFE.\n")
else:
timeLeft = int(cooldown - idlingTime)
tInfoBox.AppendText("TurboTimer: " + str(timeLeft) + "s\n")
| seanchapman/PiGaugeOBD | pigauge_features.py | Python | gpl-2.0 | 2,918 |
from rpitc.section import BareEntranceSection, BaseEntranceSection, BaseExitSection
class Entrance(BaseEntranceSection):
pass
class BareEntrance(BareEntranceSection):
pass
class Exit(BaseExitSection):
pass
| stefan-walluhn/RPi.TC | rpitc/station/gateway.py | Python | gpl-3.0 | 224 |
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtGui as qtg
from . import utils
class LaunchButton(qtw.QPushButton):
""" This is the actual button you push to launch the program.
"""
def __init__(self, parent, config):
"""Construct a LaunchButton"""
super().__init__(parent)
self.config = config
self.setObjectName("LaunchButton")
self.name = self.config.name
self.comment = self.config.comment
self.icon = self.config.icon
self.command = self.config.command
self.process = None
self.error_log = list()
self.output_log = list()
# Create the layouts and widgets to hold the information
toplayout = qtw.QHBoxLayout()
leftlayout = qtw.QVBoxLayout()
# The button's title
title = qtw.QLabel(self.name)
title.setObjectName("LaunchButtonTitle")
leftlayout.addWidget(title)
# The button's descriptive comment
comment = qtw.QLabel(self.comment)
comment.setSizePolicy(
qtw.QSizePolicy.Expanding,
qtw.QSizePolicy.Expanding
)
comment.setWordWrap(True)
comment.setObjectName("LaunchButtonDescription")
leftlayout.addWidget(comment)
# The button's icon, if there is one
iconpane = qtw.QLabel()
icon = (
utils.icon_anyway_you_can(
self.icon,
self.config.aggressive_icon_search
)
if self.icon else qtg.QIcon()
)
# scale the icon
pixmap = icon.pixmap(*self.config.icon_size)
if not pixmap.isNull():
pixmap = pixmap.scaled(*self.config.icon_size)
iconpane.setPixmap(pixmap)
# Add everything to layouts and layouts to the button
toplayout.addWidget(iconpane)
toplayout.addLayout(leftlayout)
self.setLayout(toplayout)
# Set the button's size from config.
self.setSizePolicy(qtw.QSizePolicy.Fixed, qtw.QSizePolicy.Fixed)
self.setMinimumSize(qtc.QSize(*self.config.launcher_size))
# Connect the callback
self.clicked.connect(self.callback)
def enable(self, exit_code):
"""Enable the button widget"""
self.setDisabled(False)
def enable_with_error(self, error_code):
"""Enable the button, but display an error."""
self.setDisabled(False)
print(self.error_log)
print(self.output_log)
qtw.QMessageBox.critical(
None,
"Command Failed!",
"Sorry, this program isn't working!"
)
def log_error(self):
if self.process:
error = bytes(self.process.readAllStandardError())
self.error_log.append(error.decode('utf-8'))
def log_output(self):
if self.process:
output = bytes(self.process.readAllStandardOutput())
self.output_log.append(output.decode('utf-8'))
def callback(self):
"""Run the button's callback function
Commands are called in a separate thread using QProcess.
This way, they can indicate to us when they are finished,
or if they ran correctly, using signals.
XDG commands in desktop files sometimes have placeholder
arguments like '%u' or '%f'.
We're going to strip these out, because they have no meaning in the
context of a button-push.
"""
self.error_log.clear()
self.output_log.clear()
self.command = ' '.join(
x for x in self.command.split()
if x not in ('%f', '%F', '%u', '%U')
)
self.process = qtc.QProcess()
# cannot be a kwarg
self.process.setWorkingDirectory(qtc.QDir.homePath())
self.process.finished.connect(self.enable)
self.process.errorOccurred.connect(self.enable_with_error)
# This should log standard error and standard output
# Doesn't always catch stuff though.
self.process.readyReadStandardError.connect(self.log_error)
self.process.readyReadStandardOutput.connect(self.log_output)
self.process.start(self.command)
if not self.process.state() == qtc.QProcess.NotRunning:
# Disable the button to prevent users clicking
# 200 times waiting on a slow program.
self.setDisabled(True)
| alandmoore/KiLauncher | kilauncher/button.py | Python | gpl-3.0 | 4,425 |
#!/usr/bin/python
#
# sg-check.py - Performs various consistency checks against a running
# Sync Gateway's public REST API. It will also provide
# some useful summary statics, including channel counts
# for those using the `channel(doc.channels)` style.
#
# Author:
# Zachary Gramana <zack@couchbase.com>
#
# Copyright (c) 2016 Couchbase, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
import ijson # `pip install ijson` if you cannot load this module.
import sys
from urllib2 import urlopen
previous = 0
current = 0
skips = 0
if len(sys.argv) == 1:
print("You must pass either a URL to a SG database, e.g. http://foo.com/db, or a path to a directory containing 'all_docs.json' and 'changes.json'")
sys.exit(1)
useHttp = sys.argv[1].startswith('http')
if useHttp:
sgRoot = sys.argv[1]
alldocsPath = sgRoot + '/_all_docs?include_docs=true&revs=true&update_seq=true'
allDocs = urlopen(alldocsPath)
else:
jsonRoot = sys.argv[1]
alldocsPath = jsonRoot + "/all_docs.json"
allDocs = open(alldocsPath)
print('Getting ' + alldocsPath)
parser = ijson.parse(allDocs)
all_seqs = []
all_channels = { 'unassigned': 0 }
update_seq = ''
inDoc = False
hasChannels = False
for prefix, event, value in parser:
if (prefix, event) == ('rows.item.update_seq', 'number'):
all_seqs.append(value)
if (prefix, event) == ('rows.item.doc', 'start_map'):
inDoc = True
hasChannels = False
if (prefix, event) == ('rows.item.doc', 'end_map'):
inDoc = False
if not hasChannels:
all_channels['unassigned'] += 1
elif prefix.endswith('rows.item.doc.channels.item'):
hasChannels = True
if value in all_channels:
all_channels[value] += 1
else:
all_channels[value] = 1
elif prefix == 'update_seq':
update_seq = str(value)
all_seqs.sort()
if useHttp:
httpRoot = sys.argv[1]
changesPath = httpRoot + '/_changes?feed=longpoll&heartbeat=300000&style=all_docs&since=&include_docs=true'
jsonFile = urlopen(changesPath)
else:
jsonRoot = sys.argv[1]
changesPath = jsonRoot + "/changes.json"
jsonFile = open(changesPath)
print('Getting ' + changesPath)
parser = ijson.parse(jsonFile)
changes_seqs = []
changes_channels = { 'unassigned': 0 }
last_seq = ''
deletes = []
user_docs = []
for prefix, event, value in parser:
if (prefix, event) == ('results.item.seq', 'number'):
changes_seqs.append(value)
current = value
if (prefix, event) == ('results.item.id', 'string'):
if str(value).startswith('_user/'):
user_docs.append(current)
if (prefix, event) == ('results.item.deleted', 'boolean'):
deletes.append(current)
if (prefix, event) == ('results.item.doc', 'start_map'):
inDoc = True
hasChannels = False
if (prefix, event) == ('results.item.doc', 'end_map'):
inDoc = False
if not hasChannels:
changes_channels['unassigned'] += 1
elif prefix.endswith('results.item.doc.channels.item'):
hasChannels = True
if value in changes_channels:
changes_channels[value] += 1
else:
changes_channels[value] = 1
elif prefix == 'last_seq':
last_seq = str(value)
changes_seqs.sort()
deletes.sort()
print('\r\n_all_docs returned ' + str(len(all_seqs)) + ' rows')
print('_changes returned ' + str(len(all_seqs)) + ' results')
for value in all_seqs:
if not value in changes_seqs:
print('all_docs seq #' + str(value) + 'not found in _changes')
for value in changes_seqs:
if not value in deletes and not value in user_docs and not value in all_seqs:
print('_changes seq #' + str(value) + ' not found in all_docs')
print('\r\nupdate_seq: ' + update_seq)
print('last_seq: ' + last_seq)
print('\r\nall_docs channel counts:')
for key in sorted(all_channels.keys()):
if (key == "unassigned"):
print('')
print(' ' + key + ': ' + str(all_channels[key]))
delCount = len(deletes)
print('\r\nchanges channel counts:')
for key in sorted(changes_channels.keys()):
if (key == "unassigned"):
print('\r\n ' + key + ': ' + str(changes_channels[key] - delCount)) #+ '(' + str() + ')')
else:
print(' ' + key + ': ' + str(changes_channels[key]))
print('\r\ntotal user doc sequences: ' + str(len(user_docs)))
print('total tombstones: ' + str(delCount) + '\r\n')
| tophatch/sync_gateway | tools/sg-check.py | Python | apache-2.0 | 5,003 |
# $Id$
#
# Copyright (C) 2008 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""piddleQt
This module implements the PIDDLE/Sping API for a Qt4 canvas
Bits have been shamelessly cobbled from piddleSVG.py
Greg Landrum (glandrum@users.sourceforge.net)
"""
"""
Functionality implemented:
x drawLine
x drawPolygon
x drawString
x drawImage
Known problems:
"""
from rdkit.sping import pid
import types
from PyQt4 import QtCore, QtGui, QtSvg
from math import *
import copy
def _ColorToQt(color):
""" convenience function for converting a sping.pid color to a Qt color
"""
if color == pid.transparent:
return None
else:
return QtGui.QColor(int(color.red*255),
int(color.green*255),
int(color.blue*255))
#class QCanvasRotText(QCanvasText):
class QCanvasRotText:
""" used to draw (UGLY) rotated text
"""
def __init__(self,txt,canvas,angle=0):
QCanvasText.__init__(self,txt,canvas)
self._angle = angle
def draw(self,qP):
qP.save()
x = self.x()
y = self.y()
theta = -self._angle
qP.rotate(theta)
qP.translate(-x,-y)
thetaR = theta*pi/180.
newX = cos(-thetaR)*x - sin(-thetaR)*y
newY = sin(-thetaR)*x + cos(-thetaR)*y
qP.translate(newX,newY)
QCanvasText.draw(self,qP)
qP.restore()
class QtCanvas( pid.Canvas ):
def __init__(self,scene,size=None,name='QtCanvas'):
if size is None:
size = scene.width(),scene.height()
self.size=size
pid.Canvas.__init__(self, size, name)
self._scene = scene
self._brush = QtGui.QBrush()
self._pen = QtGui.QPen()
self._font = QtGui.QApplication.font()
self.objs = []
self._initOutput()
self.nObjs = 0
def _initOutput(self):
for obj in self.objs:
if type(obj)==types.TupleType:
self._scene.removeItem(obj[0])
else:
self._scene.removeItem(obj)
self.objs = []
self.nObjs = 0
def _adjustFont(self,font):
if font.face:
self._font.setFamily(font.face)
self._font.setBold(font.bold)
self._font.setItalic(font.italic)
self._font.setPointSize(font.size)
self._font.setUnderline(font.underline)
# public functions
def clear(self):
self._initOutput()
def flush(self):
self._scene.update()
def save(self, file=None, format=None):
self._scene.update()
#------------- drawing methods --------------
def drawLine(self, x1,y1, x2,y2, color=None, width=None, dash=None,
**kwargs ):
"Draw a straight line between x1,y1 and x2,y2."
# set color...
if color:
if color == pid.transparent: return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
qColor = _ColorToQt(color)
if width:
w = width
else:
w = self.defaultLineWidth
self._pen.setColor(qColor)
self._pen.setWidth(w)
if dash is not None:
self._pen.setStyle(QtCore.Qt.DashLine)
#dash = [float(x)/w for x in dash]
dash = list(dash)
self._pen.setDashPattern(dash)
else:
self._pen.setStyle(QtCore.Qt.SolidLine)
l = self._scene.addLine(x1,y1,x2,y2,self._pen)
if dash is not None:
self._pen.setStyle(QtCore.Qt.SolidLine)
self.nObjs+=1
self.objs.append(l)
def drawPolygon(self, pointlist,
edgeColor=None, edgeWidth=None, fillColor=pid.transparent,
closed=0,dash=None,**kwargs):
"""drawPolygon(pointlist) -- draws a polygon
pointlist: a list of (x,y) tuples defining vertices
"""
pts = [QtCore.QPointF(x[0],x[1]) for x in pointlist]
poly = QtGui.QPolygonF(pts)
# set color for fill...
filling = 0
if fillColor:
if fillColor != pid.transparent:
filling = 1
qColor = _ColorToQt(fillColor)
self._brush.setColor(qColor)
if filling:
self._brush.setStyle(QtCore.Qt.SolidPattern)
else:
self._brush.setStyle(QtCore.Qt.NoBrush)
# set color for edge...
if not edgeColor:
edgeColor = self.defaultLineColor
qColor = _ColorToQt(edgeColor)
if qColor:
self._pen.setColor(qColor)
# set edge width...
if edgeWidth is None: edgeWidth = self.defaultLineWidth
self._pen.setWidth(edgeWidth)
self._pen.setJoinStyle(QtCore.Qt.RoundJoin)
if dash is not None:
self._pen.setStyle(QtCore.Qt.DashLine)
else:
self._pen.setStyle(QtCore.Qt.SolidLine)
if not qColor:
self._pen.setStyle(QtCore.Qt.NoPen)
poly = self._scene.addPolygon(poly,self._pen,self._brush)
self.nObjs += 1
poly.setZValue(self.nObjs)
self.objs.append(poly)
if dash is not None:
self._pen.setStyle(QtCore.Qt.SolidLine)
self.nObjs+=1
def drawString(self, s, x,y, font=None, color=None, angle=0, **kwargs):
# set color...
if color:
if color == pid.transparent: return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
if font is None:
font = self.defaultFont
qColor = _ColorToQt(color)
if font is not None:
self._adjustFont(font)
txt=self._scene.addText(s,self._font)
txt.setDefaultTextColor(qColor)
txtH = txt.boundingRect().height()
#txt.setPos(QtCore.QPointF(x,y-txtH/2))
txt.setPos(QtCore.QPointF(x,y-txtH))
#txt.setPos(QtCore.QPointF(x,y))
if angle:
txt.rotate(angle)
#if angle != 0:
# txt = QCanvasRotText(s,self._scene,angle=angle)
#else:
# txt = QCanvasText(s,self._scene)
#txt.setColor(qColor)
#txt.setVisible(1)
#txt.setX(x)
#y -= font.size
#txt.setY(y)
txt.setZValue(self.nObjs)
self.nObjs += 1
self.objs.append(txt)
def drawImage(self, image, x,y, **kwargs):
"""
"""
from io import StringIO
sio = StringIO()
image.save(sio,format='png')
base = QtGui.QPixmap()
base.loadFromData(sio.getvalue())
pix = self._scene.addPixmap(base)
pix.setPos(QtCore.QPointF(x,y))
pix.setZValue(self.nObjs)
self.nObjs += 1
self.objs.append(pix)
def stringBox(self, s, font=None):
"Return the logical width and height of the string if it were drawn \
in the current font (defaults to self.font)."
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem(s)
t.setFont(self._font)
rect = t.boundingRect()
return rect.width(),rect.height()
def stringWidth(self, s, font=None):
"Return the logical width of the string if it were drawn \
in the current font (defaults to self.font)."
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem(s)
t.setFont(self._font)
rect = t.boundingRect()
return rect.width()
def fontAscent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem('B')
t.setFont(self._font)
rect = t.boundingRect()
# FIX: this is a hack, but I can't immediately figure out how to solve the
# problem that the bounding rectangle includes the descent:
return 1.0*rect.height()
def fontDescent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QtGui.QGraphicsTextItem('B')
t.setFont(self._font)
rect1 = t.boundingRect()
t = QtGui.QGraphicsTextItem('y')
t.setFont(self._font)
rect2 = t.boundingRect()
return 1.*(rect2.height()-rect1.height())
def test(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue
canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)) )
canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)) )
canvas.defaultLineColor = black
canvas.drawLine(10,200, 20,190, color=red)
canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4 )
canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4 )
canvas.defaultLineWidth = 4
canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon )
canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 )
#canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
# color=green, angle=-45)
#canvas.drawString("This is a test!", 30,130, color=red, angle=-45)
polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1)
canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5 )
canvas.drawLine( 200,260,260,260, color=green, width=5 )
canvas.drawLine( 260,200,260,260, color=red, width=5 )
canvas.flush()
def dashtest(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue
canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)),dash=(3,3) )
canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)),dash=(3,3) )
canvas.defaultLineColor = black
canvas.drawLine(10,200, 20,190, color=red,dash=(3,3))
canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4,dash=(3,3) )
canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4,dash=(3,3) )
canvas.defaultLineWidth = 4
canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon,dash=(3,3) )
canvas.drawCurve( 20,20, 100,50, 50,100, 160,160,dash=(3,3) )
canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
color=green, angle=-45)
canvas.drawString("This is a test!", 30,130, color=red, angle=-45)
polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1,dash=(3,3))
canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5,dash=(3,3) )
canvas.drawLine( 200,260,260,260, color=green, width=5,dash=(3,3) )
canvas.drawLine( 260,200,260,260, color=red, width=5,dash=(3,3) )
canvas.flush()
if __name__=='__main__':
import sys
from rdkit.sping.pid import *
from PIL import Image
app = QtGui.QApplication(sys.argv)
w = QtGui.QGraphicsView()
scene= QtGui.QGraphicsScene(0,0,300,300)
canv = QtCanvas(scene)
test(canv)
w.setScene(scene)
w.show()
sys.exit(app.exec_())
| soerendip42/rdkit | rdkit/sping/Qt/pidQt4.py | Python | bsd-3-clause | 10,601 |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class EdiskCz(SimpleHoster):
__name__ = "EdiskCz"
__type__ = "hoster"
__version__ = "0.22"
__pattern__ = r'http://(?:www\.)?edisk\.(cz|sk|eu)/(stahni|sk/stahni|en/download)/.+'
__description__ = """Edisk.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
INFO_PATTERN = r'<span class="fl" title="(?P<N>[^"]+)">\s*.*?\((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)</h1></span>'
OFFLINE_PATTERN = r'<h3>This file does not exist due to one of the following:</h3><ul><li>'
ACTION_PATTERN = r'/en/download/(\d+/.*\.html)'
LINK_PATTERN = r'http://.*edisk\.cz.*\.html'
def setup(self):
self.multiDL = False
def process(self, pyfile):
url = re.sub("/(stahni|sk/stahni)/", "/en/download/", pyfile.url)
self.logDebug("URL:" + url)
m = re.search(self.ACTION_PATTERN, url)
if m is None:
self.error(_("ACTION_PATTERN not found"))
action = m.group(1)
self.html = self.load(url, decode=True)
self.getFileInfo()
self.html = self.load(re.sub("/en/download/", "/en/download-slow/", url))
url = self.load(re.sub("/en/download/", "/x-download/", url), post={
"action": action
})
if not re.match(self.LINK_PATTERN, url):
self.fail(_("Unexpected server response"))
self.download(url)
getInfo = create_getInfo(EdiskCz)
| mariusbaumann/pyload | module/plugins/hoster/EdiskCz.py | Python | gpl-3.0 | 1,565 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the registry flows."""
import os
from absl import app
from grr_response_client.client_actions import file_fingerprint
from grr_response_client.client_actions import searching
from grr_response_client.client_actions import standard
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import artifact
from grr_response_server import data_store
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class RegistryFlowTest(flow_test_lib.FlowTestsBaseclass):
def setUp(self):
super().setUp()
vfs_overrider = vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler)
vfs_overrider.Start()
self.addCleanup(vfs_overrider.Stop)
class TestFakeRegistryFinderFlow(RegistryFlowTest):
"""Tests for the RegistryFinder flow."""
runkey = "HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"
def RunFlow(self, client_id, keys_paths=None, conditions=None):
if keys_paths is None:
keys_paths = [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
]
if conditions is None:
conditions = []
client_mock = action_mocks.ActionMock(
searching.Find,
searching.Grep,
)
session_id = flow_test_lib.TestFlowHelper(
registry.RegistryFinder.__name__,
client_mock,
client_id=client_id,
keys_paths=keys_paths,
conditions=conditions,
creator=self.test_username)
return session_id
def testFindsNothingIfNothingMatchesTheGlob(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/NonMatch*"
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysWithSingleGlobWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeysWithTwoGlobsWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Side*",
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Mct*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithInterpolatedGlobWithoutConditions(self):
user = rdf_client.User(sid="S-1-5-20")
client_id = self.SetupClient(0, users=[user])
session_id = self.RunFlow(client_id, [
"HKEY_USERS/%%users.sid%%/Software/Microsoft/Windows/"
"CurrentVersion/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
key = ("/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run")
self.assertEqual(results[0].stat_entry.pathspec.CollapsePath(), key)
self.assertEqual(results[0].stat_entry.pathspec.path, key)
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfNothingMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10, bytes_after=10, literal=b"CanNotFindMe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfRegexMatchesNothing(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b".*CanNotFindMe.*")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesRegexMatchCondition(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b"Windows.+\\.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfModiciationTimeConditionMatchesNothing(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysIfModificationTimeConditionMatches(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithLiteralAndModificationTimeConditions(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time),
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
def testSizeCondition(self):
client_id = self.SetupClient(0)
# There are two values, one is 20 bytes, the other 53.
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type.SIZE,
size=rdf_file_finder.FileFinderSizeCondition(min_file_size=50))
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertGreater(results[0].stat_entry.st_size, 50)
class TestRegistryFlows(RegistryFlowTest):
"""Test the Run Key registry flows."""
@parser_test_lib.WithAllParsers
def testCollectRunKeyBinaries(self):
"""Read Run key from the client_fixtures to test parsing and storage."""
client_id = self.SetupClient(0, system="Windows", os_version="6.2")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(
file_fingerprint.FingerprintFile,
searching.Find,
standard.GetFileStat,
)
# Get KB initialized
session_id = flow_test_lib.TestFlowHelper(
artifact.KnowledgeBaseInitializationFlow.__name__,
client_mock,
client_id=client_id,
creator=self.test_username)
kb = flow_test_lib.GetFlowResults(client_id, session_id)[0]
client = data_store.REL_DB.ReadClientSnapshot(client_id)
client.knowledge_base = kb
data_store.REL_DB.WriteClientSnapshot(client)
with test_lib.Instrument(transfer.MultiGetFile,
"Start") as getfile_instrument:
# Run the flow in the emulated way.
flow_test_lib.TestFlowHelper(
registry.CollectRunKeyBinaries.__name__,
client_mock,
client_id=client_id,
creator=self.test_username)
# Check MultiGetFile got called for our runkey file
download_requested = False
for pathspec in getfile_instrument.args[0][0].args.pathspecs:
if pathspec.path == u"C:\\Windows\\TEMP\\A.exe":
download_requested = True
self.assertTrue(download_requested)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| google/grr | grr/server/grr_response_server/flows/general/registry_test.py | Python | apache-2.0 | 12,992 |
"""
kombu.syn
=========
"""
from __future__ import absolute_import
import sys
__all__ = ['detect_environment']
_environment = None
def blocking(fun, *args, **kwargs):
return fun(*args, **kwargs)
def select_blocking_method(type):
pass
def _detect_environment():
## -eventlet-
if 'eventlet' in sys.modules:
try:
from eventlet.patcher import is_monkey_patched as is_eventlet
import socket
if is_eventlet(socket):
return 'eventlet'
except ImportError:
pass
# -gevent-
if 'gevent' in sys.modules:
try:
from gevent import socket as _gsocket
import socket
if socket.socket is _gsocket.socket:
return 'gevent'
except ImportError:
pass
return 'default'
def detect_environment():
global _environment
if _environment is None:
_environment = _detect_environment()
return _environment
| mozilla/firefox-flicks | vendor-local/lib/python/kombu/syn.py | Python | bsd-3-clause | 996 |
#!/usr/bin/env python
import msshcopyid.cli
if __name__ == '__main__':
msshcopyid.cli.main()
| samuel-phan/mssh-copy-id | msshcopyid/bin/mssh-copy-id.py | Python | mit | 100 |
"""
Only works for 'basic type' properties (bool, int and float)!
Multi-dimensional arrays (like array of vectors) will be flattened into seq.
"""
collection.foreach_get(attr, some_seq)
# Python equivalent
for i in range(len(seq)):
some_seq[i] = getattr(collection[i], attr)
| Passtechsoft/TPEAlpGen | blender/doc/python_api/examples/bpy.types.bpy_prop_collection.foreach_get.py | Python | gpl-3.0 | 281 |
print ("project euler problem 5 find the smallest number divisible by each number smaller than twenty")
print 16*9*5*7*11*13*17*19
| Greh/Project-Euler | euler5.py | Python | mit | 132 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables.
See the [Variables](https://www.tensorflow.org/guide/variables) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
return gen_state_ops.variable_v2(
shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
return ref.is_initialized(name=name)
@tf_export(v1=["assign_sub"])
def assign_sub(ref, value, use_locking=None, name=None):
"""Update `ref` by subtracting `value` from it.
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Unlike `tf.math.subtract`, this op does not broadcast. `ref` and `value`
must have the same shape.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`,
`complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be
from a `Variable` node.
value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to
be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`. If True, the
subtraction will be protected by a lock; otherwise the behavior is
undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value)
@tf_export(v1=["assign_add"])
def assign_add(ref, value, use_locking=None, name=None):
"""Update `ref` by adding `value` to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Unlike `tf.math.add`, this op does not broadcast. `ref` and `value` must have
the same shape.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`,
`complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be
from a `Variable` node.
value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to
be added to the variable.
use_locking: An optional `bool`. Defaults to `False`. If True, the addition
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value)
@tf_export(v1=["assign"])
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update `ref` by assigning `value` to it.
This operation outputs a Tensor that holds the new value of `ref` after
the value has been assigned. This makes it easier to chain operations that
need to use the reset value.
Args:
ref: A mutable `Tensor`. Should be from a `Variable` node. May be
uninitialized.
value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to
be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`. If true, the
operation will validate that the shape of 'value' matches the shape of the
Tensor being assigned to. If false, 'ref' will take on the shape of
'value'.
use_locking: An optional `bool`. Defaults to `True`. If True, the assignment
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of `ref` after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
@tf_export(v1=["count_up_to"])
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(ref, limit, name=None):
r"""Increments 'ref' until it reaches 'limit'.
Args:
ref: A Variable. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.count_up_to(ref, limit=limit, name=name)
return gen_state_ops.resource_count_up_to(
ref.handle, limit, T=ref.dtype, name=name)
@tf_export(v1=["scatter_update"])
def scatter_update(ref, indices, updates, use_locking=True, name=None):
# pylint: disable=line-too-long
r"""Applies sparse updates to a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entries in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_update(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_update"])
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
r"""Applies sparse `updates` to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.compat.v1.scatter_nd_update(ref, indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(update)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A Variable.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional `bool`. Defaults to `True`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The value of the variable after the update.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_update(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_add"])
def scatter_add(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Adds sparse updates to the variable referenced by `resource`.
This operation computes
```python
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the updated value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_add(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_add"])
def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that addition would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.compat.v1.scatter_nd_add(ref, indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_add(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_sub"])
def scatter_sub(ref, indices, updates, use_locking=False, name=None):
r"""Subtracts sparse updates to a variable reference.
```python
# Scalar indices
ref[indices, ...] -= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]` or
`updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%"
src="https://www.tensorflow.org/images/ScatterSub.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to subtract from `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_sub(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_sub"])
def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
with 8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = tf.compat.v1.scatter_nd_sub(ref, indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_sub(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_sub( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_mul"])
def scatter_mul(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Multiplies sparse updates into a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] *= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] *= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to multiply to `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the operation
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_mul(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_div"])
def scatter_div(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Divides a variable reference by sparse updates.
This operation computes
```python
# Scalar indices
ref[indices, ...] /= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] /= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions divide.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of values
that `ref` is divided by.
use_locking: An optional `bool`. Defaults to `False`. If True, the operation
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_div(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_max"])
def scatter_max(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Reduces sparse updates into a variable reference using the `max` operation.
This operation computes
# Scalar indices
ref[indices, ...] = max(ref[indices, ...], updates[...])
# Vector indices (for each i)
ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...],
updates[i, ..., j, ...])
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions combine.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png"
alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `half`,
`bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a
`Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to reduce into `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the update
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_max(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_min"])
def scatter_min(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Reduces sparse updates into a variable reference using the `min` operation.
This operation computes
# Scalar indices
ref[indices, ...] = min(ref[indices, ...], updates[...])
# Vector indices (for each i)
ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...],
updates[i, ..., j, ...])
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions combine.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png"
alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `half`,
`bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a
`Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to reduce into `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the update
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_min(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["batch_scatter_update"])
@deprecation.deprecated(
"2018-11-29", "Use the batch_scatter_update method of Variable instead.")
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
"""Generalization of `tf.compat.v1.scatter_update` to axis different than 0.
Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates`
have a series of leading dimensions that are the same for all of them, and the
updates are performed on the last dimension of indices. In other words, the
dimensions should be the following:
`num_prefix_dims = indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`updates.shape = indices.shape + var.shape[batch_dim:]`
where
`updates.shape[:num_prefix_dims]`
`== indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]`
When indices is a 1D tensor, this operation is equivalent to
`tf.compat.v1.scatter_update`.
To avoid this operation there would be 2 alternatives:
1) Reshaping the variable by merging the first `ndims` dimensions. However,
this is not possible because `tf.reshape` returns a Tensor, which we
cannot use `tf.compat.v1.scatter_update` on.
2) Looping over the first `ndims` of the variable and using
`tf.compat.v1.scatter_update` on the subtensors that result of slicing the
first
dimension. This is a valid option for `ndims = 1`, but less efficient than
this implementation.
See also `tf.compat.v1.scatter_update` and `tf.compat.v1.scatter_nd_update`.
Args:
ref: `Variable` to scatter onto.
indices: Tensor containing indices as described above.
updates: Tensor of updates to apply to `ref`.
use_locking: Boolean indicating whether to lock the writing operation.
name: Optional scope name string.
Returns:
Ref to `variable` after it has been modified.
Raises:
ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are
not the same.
"""
with ops.name_scope(name):
indices = ops.convert_to_tensor(indices, name="indices")
indices_shape = array_ops.shape(indices)
indices_dimensions = indices.get_shape().ndims
if indices_dimensions is None:
raise ValueError("batch_gather does not allow indices with unknown "
"shape.")
nd_indices = array_ops.expand_dims(indices, axis=-1)
nd_indices_list = []
# Scatter ND requires indices to have an additional dimension, in which the
# coordinates of the updated things are specified. For this to be adapted to
# the scatter_update with several leading dimensions, we simply make use of
# a tf.range for all the leading dimensions followed by concat of all the
# coordinates we created with the original indices.
# For example if indices.shape = [2, 3, 4], we should generate the following
# indices for tf.compat.v1.scatter_nd_update:
# nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
# nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
# nd_indices[:, :, 2] = indices
for dimension in range(indices_dimensions - 1):
# In this loop we generate the following for the example (one for each
# iteration).
# nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
# nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
# This is done at every iteration with a tf.range over the size of the
# i-th dimension and using broadcasting over the desired shape.
dimension_size = indices_shape[dimension]
shape_to_broadcast = [1] * (indices_dimensions + 1)
shape_to_broadcast[dimension] = dimension_size
dimension_range = array_ops.reshape(
gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)
if dimension_range.dtype.base_dtype != nd_indices.dtype:
dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype)
nd_indices_list.append(
dimension_range * array_ops.ones_like(nd_indices))
# Add the original indices at the end, as described above, and concat.
nd_indices_list.append(nd_indices)
final_indices = array_ops.concat(nd_indices_list, axis=-1)
return scatter_nd_update(
ref, final_indices, updates, use_locking=use_locking)
| ghchinoy/tensorflow | tensorflow/python/ops/state_ops.py | Python | apache-2.0 | 35,124 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Keystone Identity Server - CLI Management Interface
"""
import sys
import logging
import optparse # deprecated in 2.7, in favor of argparse
import keystone
from keystone.common import config
from keystone.manage import api
import keystone.backends as db
# CLI feature set
OBJECTS = ['user', 'tenant', 'role', 'service',
'endpointTemplates', 'token', 'endpoint', 'credentials']
ACTIONS = ['add', 'list', 'disable', 'delete', 'grant',
'revoke']
# Messages
OBJECT_NOT_SPECIFIED = 'No object type specified for first argument'
ACTION_NOT_SPECIFIED = 'No action specified for second argument'
ID_NOT_SPECIFIED = 'No ID specified for third argument'
SUPPORTED_OBJECTS = "Supported objects: %s" % (", ".join(OBJECTS))
SUPPORTED_ACTIONS = "Supported actions: %s" % (", ".join(ACTIONS))
ACTION_NOT_SUPPORTED = 'Action not supported for %s'
class RaisingOptionParser(optparse.OptionParser):
def error(self, msg):
self.print_usage(sys.stderr)
raise optparse.OptParseError(msg)
def parse_args(args=None):
usage = """
Usage: keystone-manage [options] type action [id [attributes]]
type : %s
action : %s
id : name or id
attributes : depending on type...
users : password, tenant
tokens : user, tenant, expiration
role list [tenant] will list roles granted on that tenant
options
-c | --config-file : config file to use
-d | --debug : debug mode
Example: keystone-manage user add Admin P@ssw0rd
""" % (", ".join(OBJECTS), ", ".join(ACTIONS))
# Initialize a parser for our configuration paramaters
parser = RaisingOptionParser(usage, version='%%prog %s'
% keystone.version())
_common_group = config.add_common_options(parser)
config.add_log_options(parser)
# Parse command-line and load config
(options, args) = config.parse_options(parser, args)
_config_file, conf = config.load_paste_config('admin', options, args)
config.setup_logging(options, conf)
db.configure_backends(conf.global_conf)
return args
def process(*args):
# Check arguments
if len(args) == 0:
raise optparse.OptParseError(OBJECT_NOT_SPECIFIED)
else:
object_type = args[0]
if object_type not in OBJECTS:
raise optparse.OptParseError(SUPPORTED_OBJECTS)
if len(args) == 1:
raise optparse.OptParseError(ACTION_NOT_SPECIFIED)
else:
action = args[1]
if action not in ACTIONS:
raise optparse.OptParseError(SUPPORTED_ACTIONS)
if action not in ['list']:
if len(args) == 2:
raise optparse.OptParseError(ID_NOT_SPECIFIED)
else:
object_id = args[2]
# Helper functions
def require_args(args, min, msg):
"""Ensure there are at least `min` arguments"""
if len(args) < min:
raise optparse.OptParseError(msg)
optional_arg = (lambda args, x: len(args) > x and args[x] or None)
def print_table(header_row, rows):
"""Prints a lists of lists as table in a human readable format"""
print "\t".join(header_row)
print '-' * 79
rows = [[str(col) for col in row] for row in rows]
print "\n".join(["\t".join(row) for row in rows])
# Execute command
if (object_type, action) == ('user', 'add'):
require_args(args, 4, 'No password specified for fourth argument')
if api.add_user(name=object_id, password=args[3],
tenant=optional_arg(args, 4)):
print "SUCCESS: User %s created." % object_id
elif (object_type, action) == ('user', 'list'):
print_table(('id', 'name', 'enabled', 'tenant'), api.list_users())
elif (object_type, action) == ('user', 'disable'):
if api.disable_user(name=object_id):
print "SUCCESS: User %s disabled." % object_id
elif object_type == 'user':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('users'))
elif (object_type, action) == ('tenant', 'add'):
if api.add_tenant(name=object_id):
print "SUCCESS: Tenant %s created." % object_id
elif (object_type, action) == ('tenant', 'list'):
print_table(('id', 'name', 'enabled'), api.list_tenants())
elif (object_type, action) == ('tenant', 'disable'):
if api.disable_tenant(name=object_id):
print "SUCCESS: Tenant %s disabled." % object_id
elif object_type == 'tenant':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('tenants'))
elif (object_type, action) == ('role', 'add'):
if api.add_role(name=object_id):
print "SUCCESS: Role %s created successfully." % object_id
elif (object_type, action) == ('role', 'list'):
tenant = optional_arg(args, 2)
if tenant:
# print with users
print 'Role assignments for tenant %s' % tenant
print_table(('User', 'Role'), api.list_roles(tenant=tenant))
else:
# print without tenants
print_table(('id', 'name'), api.list_roles())
elif (object_type, action) == ('role', 'grant'):
require_args(args, 4, "Missing arguments: role grant 'role' 'user' "
"'tenant (optional)'")
tenant = optional_arg(args, 4)
if api.grant_role(object_id, args[3], tenant):
print("SUCCESS: Granted %s the %s role on %s." %
(args[3], object_id, tenant))
elif object_type == 'role':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('roles'))
elif (object_type, action) == ('endpointTemplates', 'add'):
require_args(args, 9, "Missing arguments: endpointTemplates add "
"'region' 'service' 'publicURL' 'adminURL' 'internalURL' "
"'enabled' 'global'")
if api.add_endpoint_template(region=args[2], service=args[3],
public_url=args[4], admin_url=args[5], internal_url=args[6],
enabled=args[7], is_global=args[8]):
print("SUCCESS: Created EndpointTemplates for %s pointing to %s." %
(args[3], args[4]))
elif (object_type, action) == ('endpointTemplates', 'list'):
tenant = optional_arg(args, 2)
if tenant:
print 'Endpoints for tenant %s' % tenant
print_table(('service', 'region', 'Public URL'),
api.list_tenant_endpoints(tenant))
else:
print 'All EndpointTemplates'
print_table(('service', 'region', 'Public URL'),
api.list_endpoint_templates())
elif object_type == 'endpointTemplates':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % (
'endpointTemplates'))
elif (object_type, action) == ('endpoint', 'add'):
require_args(args, 4, "Missing arguments: endPoint add tenant "
"endPointTemplate")
if api.add_endpoint(tenant=args[2], endpoint_template=args[3]):
print("SUCCESS: Endpoint %s added to tenant %s." %
(args[3], args[2]))
elif object_type == 'endpoint':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('endpoints'))
elif (object_type, action) == ('token', 'add'):
require_args(args, 6, 'Creating a token requires a token id, user, '
'tenant, and expiration')
if api.add_token(token=object_id, user=args[3], tenant=args[4],
expires=args[5]):
print "SUCCESS: Token %s created." % (object_id,)
elif (object_type, action) == ('token', 'list'):
print_table(('token', 'user', 'expiration', 'tenant'),
api.list_tokens())
elif (object_type, action) == ('token', 'delete'):
if api.delete_token(token=object_id):
print 'SUCCESS: Token %s deleted.' % (object_id,)
elif object_type == 'token':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('tokens'))
elif (object_type, action) == ('service', 'add'):
require_args(args, 4, "Missing arguments: service add name "
"type")
type = optional_arg(args, 3)
desc = optional_arg(args, 4)
if api.add_service(name=object_id, type=type, desc=desc):
print "SUCCESS: Service %s created successfully." % (object_id,)
elif (object_type, action) == ('service', 'list'):
print_table(('id', 'name', 'type'), api.list_services())
elif object_type == 'service':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('services'))
elif (object_type, action) == ('credentials', 'add'):
require_args(args, 6, 'Creating a credentials requires a type, key, '
'secret, and tenant_id (id is user_id)')
if api.add_credentials(user=object_id, type=args[3], key=args[4],
secrete=args[5], tenant=optional_arg(args, 6)):
print "SUCCESS: Credentials %s created." % object_id
elif object_type == 'credentials':
raise optparse.OptParseError(ACTION_NOT_SUPPORTED % ('credentials'))
else:
# Command recognized but not handled: should *never* reach this
raise NotImplementedError()
def main(args=None):
try:
process(*parse_args(args))
except optparse.OptParseError as exc:
print >> sys.stderr, exc
sys.exit(2)
except Exception as exc:
try:
info = exc.args[1]
except IndexError:
print "ERROR: %s" % (exc,)
logging.error(str(exc))
else:
print "ERROR: %s: %s" % (exc.args[0], info)
logging.error(exc.args[0], exc_info=info)
raise exc
if __name__ == '__main__':
try:
main()
except Exception as exc:
sys.exit(1)
| rcbops/keystone-buildpackage | keystone/manage/__init__.py | Python | apache-2.0 | 10,543 |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.tts import TTS, TTSValidator
from mycroft.tts.remote_tts import RemoteTTSTimeoutException
from mycroft.util.log import LOG
from mycroft.util.format import pronounce_number
from mycroft.tts import cache_handler
from mycroft.util import play_wav, get_cache_directory
from requests_futures.sessions import FuturesSession
from requests.exceptions import (
ReadTimeout, ConnectionError, ConnectTimeout, HTTPError
)
from urllib import parse
from .mimic_tts import VISIMES
import math
import base64
import os
import re
import json
# Heuristic value, caps character length of a chunk of text to be spoken as a
# work around for current Mimic2 implementation limits.
_max_sentence_size = 170
def _break_chunks(l, n):
""" Yield successive n-sized chunks
Args:
l (list): text (str) to split
chunk_size (int): chunk size
"""
for i in range(0, len(l), n):
yield " ".join(l[i:i + n])
def _split_by_chunk_size(text, chunk_size):
""" Split text into word chunks by chunk_size size
Args:
text (str): text to split
chunk_size (int): chunk size
Returns:
list: list of text chunks
"""
text_list = text.split()
if len(text_list) <= chunk_size:
return [text]
if chunk_size < len(text_list) < (chunk_size * 2):
return list(_break_chunks(
text_list,
int(math.ceil(len(text_list) / 2))
))
elif (chunk_size * 2) < len(text_list) < (chunk_size * 3):
return list(_break_chunks(
text_list,
int(math.ceil(len(text_list) / 3))
))
elif (chunk_size * 3) < len(text_list) < (chunk_size * 4):
return list(_break_chunks(
text_list,
int(math.ceil(len(text_list) / 4))
))
else:
return list(_break_chunks(
text_list,
int(math.ceil(len(text_list) / 5))
))
def _split_by_punctuation(chunks, puncs):
"""splits text by various punctionations
e.g. hello, world => [hello, world]
Args:
chunks (list or str): text (str) to split
puncs (list): list of punctuations used to split text
Returns:
list: list with split text
"""
if isinstance(chunks, str):
out = [chunks]
else:
out = chunks
for punc in puncs:
splits = []
for t in out:
# Split text by punctuation, but not embedded punctuation. E.g.
# Split: "Short sentence. Longer sentence."
# But not at: "I.B.M." or "3.424", "3,424" or "what's-his-name."
splits += re.split(r'(?<!\.\S)' + punc + r'\s', t)
out = splits
return [t.strip() for t in out]
def _add_punctuation(text):
""" Add punctuation at the end of each chunk.
Mimic2 expects some form of punctuation at the end of a sentence.
"""
punctuation = ['.', '?', '!', ';']
if len(text) >= 1 and text[-1] not in punctuation:
return text + '.'
else:
return text
def _sentence_chunker(text):
""" Split text into smaller chunks for TTS generation.
NOTE: The smaller chunks are needed due to current Mimic2 TTS limitations.
This stage can be removed once Mimic2 can generate longer sentences.
Args:
text (str): text to split
chunk_size (int): size of each chunk
split_by_punc (bool, optional): Defaults to True.
Returns:
list: list of text chunks
"""
if len(text) <= _max_sentence_size:
return [_add_punctuation(text)]
# first split by punctuations that are major pauses
first_splits = _split_by_punctuation(
text,
puncs=[r'\.', r'\!', r'\?', r'\:', r'\;']
)
# if chunks are too big, split by minor pauses (comma, hyphen)
second_splits = []
for chunk in first_splits:
if len(chunk) > _max_sentence_size:
second_splits += _split_by_punctuation(chunk,
puncs=[r'\,', '--', '-'])
else:
second_splits.append(chunk)
# if chunks are still too big, chop into pieces of at most 20 words
third_splits = []
for chunk in second_splits:
if len(chunk) > _max_sentence_size:
third_splits += _split_by_chunk_size(chunk, 20)
else:
third_splits.append(chunk)
return [_add_punctuation(chunk) for chunk in third_splits]
class Mimic2(TTS):
def __init__(self, lang, config):
super(Mimic2, self).__init__(
lang, config, Mimic2Validator(self)
)
try:
LOG.info("Getting Pre-loaded cache")
cache_handler.main(config['preloaded_cache'])
LOG.info("Successfully downloaded Pre-loaded cache")
except Exception as e:
LOG.error("Could not get the pre-loaded cache ({})"
.format(repr(e)))
self.url = config['url']
self.session = FuturesSession()
def _save(self, data):
""" Save WAV files in tmp
Args:
data (byes): WAV data
"""
with open(self.filename, 'wb') as f:
f.write(data)
def _play(self, req):
""" Play WAV file after saving to tmp
Args:
req (object): requests object
"""
if req.status_code == 200:
self._save(req.content)
play_wav(self.filename).communicate()
else:
LOG.error(
'%s Http Error: %s for url: %s' %
(req.status_code, req.reason, req.url))
def _requests(self, sentence):
"""create asynchronous request list
Args:
chunks (list): list of text to synthesize
Returns:
list: list of FutureSession objects
"""
url = self.url + parse.quote(sentence)
req_route = url + "&visimes=True"
return self.session.get(req_route, timeout=5)
def viseme(self, phonemes):
""" Maps phonemes to appropriate viseme encoding
Args:
phonemes (list): list of tuples (phoneme, time_start)
Returns:
list: list of tuples (viseme_encoding, time_start)
"""
visemes = []
for pair in phonemes:
if pair[0]:
phone = pair[0].lower()
else:
# if phoneme doesn't exist use
# this as placeholder since it
# is the most common one "3"
phone = 'z'
vis = VISIMES.get(phone)
vis_dur = float(pair[1])
visemes.append((vis, vis_dur))
return visemes
def _prepocess_sentence(sentence):
""" Split sentence in chunks better suited for mimic2. """
return _sentence_chunker(sentence)
def get_tts(self, sentence, wav_file):
""" Generate (remotely) and play mimic2 WAV audio
Args:
sentence (str): Phrase to synthesize to audio with mimic2
wav_file (str): Location to write audio output
"""
LOG.debug("Generating Mimic2 TSS for: " + str(sentence))
try:
req = self._requests(sentence)
results = req.result().json()
audio = base64.b64decode(results['audio_base64'])
vis = results['visimes']
with open(wav_file, 'wb') as f:
f.write(audio)
except (ReadTimeout, ConnectionError, ConnectTimeout, HTTPError):
raise RemoteTTSTimeoutException(
"Mimic 2 server request timed out. Falling back to mimic")
return (wav_file, vis)
def save_phonemes(self, key, phonemes):
"""
Cache phonemes
Args:
key: Hash key for the sentence
phonemes: phoneme string to save
"""
cache_dir = get_cache_directory("tts/" + self.tts_name)
pho_file = os.path.join(cache_dir, key + ".pho")
try:
with open(pho_file, "w") as cachefile:
cachefile.write(json.dumps(phonemes))
except Exception:
LOG.exception("Failed to write {} to cache".format(pho_file))
def load_phonemes(self, key):
"""
Load phonemes from cache file.
Args:
Key: Key identifying phoneme cache
"""
pho_file = os.path.join(get_cache_directory("tts/" + self.tts_name),
key + ".pho")
if os.path.exists(pho_file):
try:
with open(pho_file, "r") as cachefile:
phonemes = json.load(cachefile)
return phonemes
except Exception as e:
LOG.error("Failed to read .PHO from cache ({})".format(e))
return None
class Mimic2Validator(TTSValidator):
def __init__(self, tts):
super(Mimic2Validator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
# TODO
pass
def get_tts_class(self):
return Mimic2
| Dark5ide/mycroft-core | mycroft/tts/mimic2_tts.py | Python | apache-2.0 | 9,665 |
import json
from collections import OrderedDict
from MoviesTemplateCJ import *
class MoviesCollectionTemplateCJ:
"""simple template to represent Collection+JSON Responses"""
def __init__(self, root):
self.container = dict()
self.href = root + 'movies'
self.version = '1.0'
self.links = []
self.template = {'data': MoviesTemplateCJ.create()}
self.queries = []
self.items = []
#queries
movie_name_query = {
'href': root + 'movies',
'rel': 'search',
'rt': 'movie',
'name': 'movie-search',
'prompt': 'Movie-Search By Name',
'data': [dict(name='name', value='', prompt='Name')]
}
self.queries.append(movie_name_query)
def to_json(self):
collection = OrderedDict([
('href', self.href),
('version', self.version),
])
if len(self.links) > 0:
collection['links'] = self.links
if len(self.items) > 0:
collection['items'] = self.items
if len(self.queries) > 0:
collection['queries'] = self.queries
collection['template'] = self.template;
self.container['collection'] = collection
return json.dumps(self.container)
def to_json_pretty(self):
collection = OrderedDict([
('href', self.href),
('version', self.version),
])
if len(self.links) > 0:
collection['links'] = self.links
if len(self.items) > 0:
collection['items'] = self.items
if len(self.queries) > 0:
collection['queries'] = self.queries
collection['template'] = self.template;
self.container['collection'] = collection
return json.dumps(self.container, sort_keys=False, indent=4, separators=(',', ': '))
| bryanbarnard/RESTfulMoviesinPython | src/python/server/templates/MoviesCollectionTemplateCJ.py | Python | mit | 1,890 |
#! /usr/bin/python
import sys,socket
from threading import Thread
PORT=50607
USAGE="\n \t usage: tcpchat.py server|client <ip address of server>"
if len(sys.argv)==1:
print USAGE
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def ClientRecv(sock):
while True:
data = sock.recv(1024)
if not data: sys.exit(0)
if str(data)=="stop":
sys.exit(0)
print data, "\n"
def ClientSend(sock):
while 1:
message = raw_input(">>>")
str(message)
sock.sendall(message)
print "\n \t Welcome to TCP chat"
if sys.argv[1]=="server":
if len(sys.argv)<3:
print "\n \t Please specify your IP address"
print USAGE
else:
HOST=sys.argv[2]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
while True:
SocketAddr=s.getsockname()
print "listening at ", SocketAddr
client, ClientAddr = s.accept()
print "\n Accepted connection from", ClientAddr
print "\n Connected is establishde between ", client.getsockname(), "and", client.getpeername()
message = client.recv(16)
print "Client's 16 bit message is", repr(message)
client.sendall("\nClosing Connection")
message = client.recv(16)
print "Client's 16 bit message is", repr(message)
client.close()
print "Socket Closed"
elif sys.argv[1]=="client":
if len(sys.argv)<3:
print "\n \t Please specify your IP address"
print USAGE
else:
HOST=sys.argv[2]
s.connect((HOST,PORT))
print "\n Connected"
ClientAddr=s.getsockname()
print "\nclient has been assigned the address ", ClientAddr
Thread(target=ClientRecv,args=(s,)).start()
ClientSend(s)
Thread(target=ClientRecv,args=(s,)).stop()
else:
print USAGE | ghoshbishakh/filedrop | tests/tcpchat.py | Python | gpl-2.0 | 1,725 |
import github3
from tests.utils import BaseCase, load
class TestThread(BaseCase):
def __init__(self, methodName='runTest'):
super(TestThread, self).__init__(methodName)
self.thread = github3.notifications.Thread(load('notification'))
self.api = ("https://api.github.com/notifications/threads/6169361")
def test_subscription(self):
self.response('subscription')
self.get(self.api + '/subscription')
assert isinstance(self.thread.subscription(),
github3.notifications.Subscription)
self.mock_assertions()
class TestSubscription(BaseCase):
def __init__(self, methodName='runTest'):
super(TestSubscription, self).__init__(methodName)
self.subscription = github3.notifications.Subscription(
load('subscription'))
self.api = ("https://api.github.com/notifications/threads/5864188/"
"subscription")
def test_set(self):
self.response('subscription')
self.put(self.api)
self.conf = {'data': {'subscribed': True, 'ignored': False}}
assert self.subscription.set(True, False) is None
self.mock_assertions()
| h4ck3rm1k3/github3.py | tests/test_notifications.py | Python | bsd-3-clause | 1,197 |
# -*- coding: utf-8 -*-
#
# application.py
#
# Copyright © 2016-2017 Antergos
#
# This file is part of whither.
#
# whither is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# whither is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with whither; If not, see <http://www.gnu.org/licenses/>.
""" Wrapper for QMainWindow """
# Standard Lib
import sys
# 3rd-Party Libs
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
# This Lib
from whither.base.objects import Application
class QtApplication(Application):
def __init__(self, *args, **kwargs) -> None:
super().__init__(name='_app', *args, **kwargs)
self.widget = QApplication(sys.argv)
self.is_qt, self.is_gtk = True, False
self.desktop = self.widget.desktop()
self.widget.setAttribute(Qt.AA_EnableHighDpiScaling)
def _set_window_size_position(self) -> None:
if self._config.window.no_desktop_env is False:
return
self._main_window.widget.setGeometry(self.desktop.availableGeometry())
def run(self) -> int:
super().run()
self.logger.debug('Setting window size and position')
self._set_window_size_position()
return self.widget.exec_()
| Antergos/whither | whither/toolkits/qt/application.py | Python | gpl-3.0 | 1,921 |
# -*- coding: utf-8 -*-
# Copyright 2010 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module contains code for defining and managing custom
commands.
'''
import gtk
import logging
from zim.gui.applications import CustomToolManager
from zim.gui.widgets import Dialog, IconButton, IconChooserButton
from zim.fs import File
logger = logging.getLogger('zim.gui')
class CustomToolManagerDialog(Dialog):
def __init__(self, ui):
Dialog.__init__(self, ui, _('Custom Tools'), buttons=gtk.BUTTONS_CLOSE) # T: Dialog title
self.set_help(':Help:Custom Tools')
self.manager = CustomToolManager()
self.add_help_text(_(
'You can configure custom tools that will appear\n'
'in the tool menu and in the tool bar or context menus.'
)) # T: help text in "Custom Tools" dialog
hbox = gtk.HBox(spacing=5)
self.vbox.add(hbox)
self.listview = CustomToolList(self.manager)
hbox.add(self.listview)
vbox = gtk.VBox(spacing=5)
hbox.pack_start(vbox, False)
for stock, handler, data in (
(gtk.STOCK_ADD, self.__class__.on_add, None),
(gtk.STOCK_EDIT, self.__class__.on_edit, None),
(gtk.STOCK_DELETE, self.__class__.on_delete, None),
(gtk.STOCK_GO_UP, self.__class__.on_move, -1),
(gtk.STOCK_GO_DOWN, self.__class__.on_move, 1),
):
button = IconButton(stock) # TODO tooltips for icon button
if data:
button.connect_object('clicked', handler, self, data)
else:
button.connect_object('clicked', handler, self)
vbox.pack_start(button, False)
def on_add(self):
properties = EditCustomToolDialog(self).run()
if properties:
self.manager.create(**properties)
self.listview.refresh()
def on_edit(self):
name = self.listview.get_selected()
if name:
tool = self.manager.get_tool(name)
properties = EditCustomToolDialog(self, tool=tool).run()
if properties:
tool.update(**properties)
tool.write()
self.listview.refresh()
def on_delete(self):
name = self.listview.get_selected()
if name:
self.manager.delete(name)
self.listview.refresh()
def on_move(self, step):
name = self.listview.get_selected()
if name:
i = self.manager.index(name)
self.manager.reorder(name, i + step)
self.listview.refresh()
self.listview.select(i + step)
class CustomToolList(gtk.TreeView):
PIXBUF_COL = 0
TEXT_COL = 1
NAME_COL = 2
def __init__(self, manager):
self.manager = manager
model = gtk.ListStore(gtk.gdk.Pixbuf, str, str)
# PIXBUF_COL, TEXT_COL, NAME_COL
gtk.TreeView.__init__(self, model)
self.get_selection().set_mode(gtk.SELECTION_BROWSE)
self.set_headers_visible(False)
cr = gtk.CellRendererPixbuf()
column = gtk.TreeViewColumn('_pixbuf_', cr, pixbuf=self.PIXBUF_COL)
self.append_column(column)
cr = gtk.CellRendererText()
column = gtk.TreeViewColumn('_text_', cr, markup=self.TEXT_COL)
self.append_column(column)
self.refresh()
def get_selected(self):
model, iter = self.get_selection().get_selected()
if model and iter:
return model[iter][self.NAME_COL]
else:
return None
def select(self, i):
path = (i, )
self.get_selection().select_path(path)
def refresh(self):
from zim.gui.widgets import encode_markup_text
model = self.get_model()
model.clear()
for tool in self.manager:
pixbuf = tool.get_pixbuf(gtk.ICON_SIZE_MENU)
text = '<b>%s</b>\n%s' % (encode_markup_text(tool.name), encode_markup_text(tool.comment))
model.append((pixbuf, text, tool.key))
class EditCustomToolDialog(Dialog):
def __init__(self, ui, tool=None):
Dialog.__init__(self, ui, _('Edit Custom Tool')) # T: Dialog title
self.set_help(':Help:Custom Tools')
self.vbox.set_spacing(12)
if tool:
name = tool.name
comment = tool.comment
execcmd = tool.execcmd
readonly = tool.isreadonly
toolbar = tool.showintoolbar
replaceselection = tool.replaceselection
else:
name = ''
comment = ''
execcmd = ''
readonly = False
toolbar = False
replaceselection = False
self.add_form((
('Name', 'string', _('Name')), # T: Input in "Edit Custom Tool" dialog
('Comment', 'string', _('Description')), # T: Input in "Edit Custom Tool" dialog
('X-Zim-ExecTool', 'string', _('Command')), # T: Input in "Edit Custom Tool" dialog
), {
'Name': name,
'Comment': comment,
'X-Zim-ExecTool': execcmd,
}, trigger_response=False)
# FIXME need ui builder to take care of this as well
self.iconbutton = IconChooserButton(stock=gtk.STOCK_EXECUTE)
if tool and tool.icon and tool.icon != gtk.STOCK_EXECUTE:
try:
self.iconbutton.set_file(File(tool.icon))
except Exception as error:
logger.exception('Could not load: %s', tool.icon)
label = gtk.Label(_('Icon') + ':') # T: Input in "Edit Custom Tool" dialog
label.set_alignment(0.0, 0.5)
hbox = gtk.HBox()
i = self.form.get_property('n-rows')
self.form.attach(label, 0, 1, i, i + 1, xoptions=0)
self.form.attach(hbox, 1, 2, i, i + 1)
hbox.pack_start(self.iconbutton, False)
self.form.add_inputs((
('X-Zim-ReadOnly', 'bool', _('Command does not modify data')), # T: Input in "Edit Custom Tool" dialog
('X-Zim-ReplaceSelection', 'bool', _('Output should replace current selection')), # T: Input in "Edit Custom Tool" dialog
('X-Zim-ShowInToolBar', 'bool', _('Show in the toolbar')), # T: Input in "Edit Custom Tool" dialog
))
self.form.update({
'X-Zim-ReadOnly': readonly,
'X-Zim-ReplaceSelection': replaceselection,
'X-Zim-ShowInToolBar': toolbar,
})
self.add_help_text(_('''\
The following parameters will be substituted
in the command when it is executed:
<tt>
<b>%f</b> the page source as a temporary file
<b>%d</b> the attachment directory of the current page
<b>%s</b> the real page source file (if any)
<b>%p</b> the page name
<b>%n</b> the notebook location (file or folder)
<b>%D</b> the document root (if any)
<b>%t</b> the selected text or word under cursor
<b>%T</b> the selected text including wiki formatting
</tt>
''') ) # T: Short help text in "Edit Custom Tool" dialog. The "%" is literal - please include the html formatting
def do_response_ok(self):
fields = self.form.copy()
fields['Icon'] = self.iconbutton.get_file() or None
self.result = fields
return True
| Osndok/zim-desktop-wiki | zim/gui/customtools.py | Python | gpl-2.0 | 6,194 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtaildocs', '0002_initial_data'),
('wagtailcore', '0010_change_page_owner_to_null_on_delete'),
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EventIndexPage',
fields=[
('page_ptr', models.OneToOneField(to='wagtailcore.Page', primary_key=True, auto_created=True, parent_link=True, serialize=False)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(to='wagtailcore.Page', primary_key=True, auto_created=True, parent_link=True, serialize=False)),
('date_from', models.DateField(verbose_name='Start date')),
('date_to', models.DateField(blank=True, verbose_name='End date', null=True, help_text='Leave this empty if the event is on a single day')),
('time_from', models.TimeField(blank=True, verbose_name='Start time', null=True)),
('time_to', models.TimeField(blank=True, verbose_name='End time', null=True)),
('event_type', models.CharField(choices=[('meeting', 'Committee Meeting'), ('event', 'Event'), ('group', 'Group Activity'), ('private', 'Private'), ('other', 'Other')], max_length=255)),
('group', models.CharField(blank=True, verbose_name='Group/Committee', choices=[('propfin', 'Property & Finance Committee'), ('fundr', 'Fundraising Committee'), ('comms', 'Communications Committee')], max_length=255)),
('speaker', models.CharField(blank=True, max_length=255)),
('location', models.CharField(blank=True, max_length=255)),
('cost', models.CharField(blank=True, max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
('link_page', models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True)),
('page', modelcluster.fields.ParentalKey(to='events.EventPage', related_name='related_links')),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
bases=(models.Model,),
),
]
| linuxsoftware/blue-wagtail-site | events/migrations/0002_eventindexpage_eventindexpagerelatedlink_eventpage_eventpagerelatedlink.py | Python | mit | 4,072 |
## \file
## \ingroup tutorial_pyroot
## This macro is an example of graphs in log scales with annotations.
##
## The presented results
## are predictions of invariant cross-section of Direct Photons produced
## at RHIC energies, based on the universality of scaling function H(z).
##
##
## These Figures were published in JINR preprint E2-98-64, Dubna,
## 1998 and submitted to CPC.
##
## \macro_image
## \macro_code
##
## \authors Michael Tokarev, Elena Potrebenikova (JINR Dubna)
import ROOT
from array import array
from math import *
NMAX = 20
Z = array( 'f', [0.]*NMAX )
HZ = array( 'f', [0.]*NMAX )
PT = array( 'f', [0.]*NMAX )
INVSIG = array( 'f', [0.]*NMAX )
NLOOP = 0
saves = {}
#_______________________________________________________________________________
def zdemo():
global NLOOP
global Z, HZ, PT, INVSIG
global saves
# Create a new canvas.
c1 = ROOT.TCanvas( 'zdemo', 'Monte Carlo Study of Z scaling', 10, 40, 800, 600 )
c1.Range( 0, 0, 25, 18 )
c1.SetFillColor( 40 )
saves[ 'c1' ] = c1 # prevent deteletion at end of zdemo
pl = ROOT.TPaveLabel( 1, 16.3, 24, 17.5,
'Z-scaling of Direct Photon Productions in pp Collisions at RHIC Energies', 'br' )
pl.SetFillColor(18)
pl.SetTextFont(32)
pl.SetTextColor(49)
pl.Draw()
saves[ 'pl' ] = pl
t = ROOT.TLatex()
t.SetTextFont(32)
t.SetTextColor(1)
t.SetTextSize(0.03)
t.SetTextAlign(12)
t.DrawLatex( 3.1, 15.5, 'M.Tokarev, E.Potrebenikova ')
t.DrawLatex( 14., 15.5, 'JINR preprint E2-98-64, Dubna, 1998 ')
saves[ 't' ] = t
pad1 = ROOT.TPad( 'pad1', 'This is pad1', 0.02, 0.02, 0.48, 0.83, 33 )
pad2 = ROOT.TPad( 'pad2', 'This is pad2', 0.52, 0.02, 0.98, 0.83, 33 )
pad1.Draw()
pad2.Draw()
saves[ 'pad1' ] = pad1; saves[ 'pad2' ] = pad2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 63
dens = 1.766
tgrad = 90.
ptmin = 4.
ptmax = 24.
delp = 2.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
pad1.cd()
pad1.Range( -0.255174, -19.25, 2.29657, -6.75 )
pad1.SetLogx()
pad1.SetLogy()
# create a 2-d histogram to define the range
pad1.DrawFrame( 1, 1e-18, 110, 1e-8 )
pad1.GetFrame().SetFillColor( 19 )
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.20, 0.45, 'Ed^{3}#sigma/dq^{3}' )
t.DrawLatex( 0.18, 0.40, '(barn/Gev^{2})' )
t.SetTextSize( 0.045 )
t.SetTextColor( ROOT.kBlue )
t.DrawLatex( 0.22, 0.260, '#sqrt{s} = 63(GeV)' )
t.SetTextColor( ROOT.kRed )
t.DrawLatex( 0.22, 0.205,'#sqrt{s} = 200(GeV)' )
t.SetTextColor( 6 )
t.DrawLatex( 0.22, 0.15, '#sqrt{s} = 500(GeV)' )
t.SetTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.6, 0.06, 'q_{T} (Gev/c)' )
saves[ 't2' ] = t # note the label that is used!
gr1 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr1.SetLineColor( 38 )
gr1.SetMarkerColor( ROOT.kBlue )
gr1.SetMarkerStyle( 21 )
gr1.SetMarkerSize( 1.1 )
gr1.Draw( 'LP' )
saves[ 'gr1' ] = gr1
#
# Cross-section of direct photon production in pp collisions at 200 GeV vs Pt
#
energ = 200
dens = 2.25
tgrad = 90.
ptmin = 4.
ptmax = 64.
delp = 6.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr2 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr2.SetLineColor( 38 )
gr2.SetMarkerColor( ROOT.kRed )
gr2.SetMarkerStyle( 29 )
gr2.SetMarkerSize( 1.5 )
gr2.Draw( 'LP' )
saves[ 'gr2' ] = gr2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 500
dens = 2.73
tgrad = 90.
ptmin = 4.
ptmax = 104.
delp = 10.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr3 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr3.SetLineColor( 38 )
gr3.SetMarkerColor( 6 )
gr3.SetMarkerStyle( 8 )
gr3.SetMarkerSize( 1.1 )
gr3.Draw( 'LP' )
saves[ 'gr3' ] = gr3
dum = array( 'f', [0.] )
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kBlue )
graph.SetMarkerStyle( 21 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 1.e-16 )
graph.Draw( 'LP' )
saves[ 'graph' ] = graph
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kRed )
graph.SetMarkerStyle( 29 )
graph.SetMarkerSize( 1.5 )
graph.SetPoint( 0, 1.7, 2.e-17 )
graph.Draw( 'LP' )
saves[ 'graph2' ] = graph # note the label that is used!
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( 6 )
graph.SetMarkerStyle( 8 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 4.e-18)
graph.Draw( 'LP' )
saves[ 'graph3' ] = graph # note the label that is used!
pad2.cd()
pad2.Range( -0.43642, -23.75, 3.92778, -6.25 )
pad2.SetLogx()
pad2.SetLogy()
pad2.DrawFrame( 1, 1e-22, 3100, 1e-8 )
pad2.GetFrame().SetFillColor( 19 )
gr = ROOT.TGraph( NLOOP, Z, HZ )
gr.SetTitle( 'HZ vs Z' )
gr.SetFillColor( 19 )
gr.SetLineColor( 9 )
gr.SetMarkerColor( 50 )
gr.SetMarkerStyle( 29 )
gr.SetMarkerSize( 1.5 )
gr.Draw( 'LP' )
saves[ 'gr' ] = gr
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.70, 0.55, 'H(z)' )
t.DrawLatex( 0.68, 0.50, '(barn)' )
t.SetTextSize( 0.045 )
t.SetTextColor( 46 )
t.DrawLatex( 0.20, 0.30, '#sqrt{s}, GeV' )
t.DrawLatex( 0.22, 0.26, '63' )
t.DrawLatex( 0.22, 0.22, '200' )
t.DrawLatex( 0.22, 0.18, '500' )
t.SetTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.88, 0.06, 'z' )
saves[ 't3' ] = t # note the label that is used!
c1.Modified()
c1.Update()
#_______________________________________________________________________________
def hz_calc( ENERG, DENS, TGRAD, PTMIN, PTMAX, DELP ):
global NLOOP
global Z, HZ, PT, INVSIG
CSEFT= 1.
GM1 = 0.00001
GM2 = 0.00001
A1 = 1.
A2 = 1.
ALX = 2.
BETA = 1.
KF1 = 8.E-7
KF2 = 5.215
MN = 0.9383
DEGRAD=0.01745329
# print 'ENR= %f DENS= %f PTMIN= %f PTMAX= %f DELP= %f ' % (ENERG,DENS,PTMIN,PTMAX,DELP)
DNDETA= DENS
MB1 = MN*A1
MB2 = MN*A2
EB1 = ENERG/2.*A1
EB2 = ENERG/2.*A2
M1 = GM1
M2 = GM2
THET = TGRAD*DEGRAD
NLOOP = int((PTMAX-PTMIN)/DELP)
for I in range(NLOOP):
PT[I]=PTMIN+I*DELP
PTOT = PT[I]/sin(THET)
ETOT = sqrt(M1*M1 + PTOT*PTOT)
PB1 = sqrt(EB1*EB1 - MB1*MB1)
PB2 = sqrt(EB2*EB2 - MB2*MB2)
P2P3 = EB2*ETOT+PB2*PTOT*cos(THET)
P1P2 = EB2*EB1+PB2*PB1
P1P3 = EB1*ETOT-PB1*PTOT*cos(THET)
X1 = P2P3/P1P2
X2 = P1P3/P1P2
Y1 = X1+sqrt(X1*X2*(1.-X1)/(1.-X2))
Y2 = X2+sqrt(X1*X2*(1.-X2)/(1.-X1))
S = (MB1*MB1)+2.*P1P2+(MB2*MB2)
SMIN = 4.*((MB1*MB1)*(X1*X1) +2.*X1*X2*P1P2+(MB2*MB2)*(X2*X2))
SX1 = 4.*( 2*(MB1*MB1)*X1+2*X2*P1P2)
SX2 = 4.*( 2*(MB2*MB2)*X2+2*X1*P1P2)
SX1X2= 4.*(2*P1P2)
DELM = pow((1.-Y1)*(1.-Y2),ALX)
Z[I] = sqrt(SMIN)/DELM/pow(DNDETA,BETA)
Y1X1 = 1. +X2*(1-2.*X1)/(2.*(Y1-X1)*(1.-X2))
Y1X2 = X1*(1-X1)/(2.*(Y1-X1)*(1.-X2)*(1.-X2))
Y2X1 = X2*(1-X2)/(2.*(Y2-X2)*(1.-X1)*(1.-X1))
Y2X2 = 1. +X1*(1-2.*X2)/(2.*(Y2-X2)*(1.-X1))
Y2X1X2= Y2X1*( (1.-2.*X2)/(X2*(1-X2)) -( Y2X2-1.)/(Y2-X2))
Y1X1X2= Y1X2*( (1.-2.*X1)/(X1*(1-X1)) -( Y1X1-1.)/(Y1-X1))
KX1=-DELM*(Y1X1*ALX/(1.-Y1) + Y2X1*ALX/(1.-Y2))
KX2=-DELM*(Y2X2*ALX/(1.-Y2) + Y1X2*ALX/(1.-Y1))
ZX1=Z[I]*(SX1/(2.*SMIN)-KX1/DELM)
ZX2=Z[I]*(SX2/(2.*SMIN)-KX2/DELM)
H1=ZX1*ZX2
HZ[I]=KF1/pow(Z[I],KF2)
INVSIG[I]=(HZ[I]*H1*16.)/S
# run if loaded as script
if __name__ == '__main__':
zdemo()
| krafczyk/root | tutorials/pyroot/zdemo.py | Python | lgpl-2.1 | 8,180 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import six.moves.urllib.parse as urlparse
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
FOLDER_DELIMITER = "/"
CHUNK_SIZE = getattr(settings, 'SWIFT_FILE_TRANSFER_CHUNK_SIZE', 512 * 1024)
# Swift ACL
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
class Container(base.APIDictWrapper):
pass
class StorageObject(base.APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
class PseudoFolder(base.APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return 0
@property
def content_type(self):
return "application/pseudo-folder"
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
def swift_api(request):
endpoint = base.url_for(request, 'object-store')
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
cacert=cacert,
insecure=insecure,
auth_version="2.0")
@profiler.trace
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
@profiler.trace
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
@profiler.trace
def swift_get_containers(request, marker=None, prefix=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
prefix=prefix,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
@profiler.trace
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = swift_endpoint + '/' + parameters
ts_float = float(headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).isoformat()
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
@profiler.trace
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
headers = _metadata_to_header(metadata)
swift_api(request).put_container(name, headers=headers)
return Container({'name': name})
@profiler.trace
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
@profiler.trace
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
objects, more = swift_get_objects(request, name)
if objects:
error_msg = _("The container cannot be deleted "
"since it is not empty.")
exc = exceptions.Conflict(error_msg)
raise exc
swift_api(request).delete_container(name)
return True
@profiler.trace
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
@profiler.trace
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
@profiler.trace
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
@profiler.trace
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
content_length=size,
headers=headers)
obj_info = {'name': object_name, 'bytes': size, 'etag': etag}
return StorageObject(obj_info, container_name)
@profiler.trace
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
# Make sure the folder name doesn't already exist.
if swift_object_exists(request, container_name, pseudo_folder_name):
name = pseudo_folder_name.strip('/')
raise exceptions.AlreadyExists(name, 'pseudo-folder')
headers = {}
etag = swift_api(request).put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name,
'etag': etag
}
return PseudoFolder(obj_info, container_name)
@profiler.trace
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
@profiler.trace
def swift_delete_folder(request, container_name, object_name):
objects, more = swift_get_objects(request, container_name,
prefix=object_name)
# In case the given object is pseudo folder,
# it can be deleted only if it is empty.
# swift_get_objects will return at least
# one object (i.e container_name) even if the
# given pseudo folder is empty. So if swift_get_objects
# returns more than one object then only it will be
# considered as non empty folder.
if len(objects) > 1:
error_msg = _("The pseudo folder cannot be deleted "
"since it is not empty.")
exc = exceptions.Conflict(error_msg)
raise exc
swift_api(request).delete_object(container_name, object_name)
return True
@profiler.trace
def swift_get_object(request, container_name, object_name, with_data=True,
resp_chunk_size=CHUNK_SIZE):
if with_data:
headers, data = swift_api(request).get_object(
container_name, object_name, resp_chunk_size=resp_chunk_size)
else:
data = None
headers = swift_api(request).head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = datetime.utcfromtimestamp(ts_float).isoformat()
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': timestamp,
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
@profiler.trace
def swift_get_capabilities(request):
try:
return swift_api(request).get_capabilities()
# NOTE(tsufiev): Ceph backend currently does not support '/info', even
# some Swift installations do not support it (see `expose_info` docs).
except swiftclient.exceptions.ClientException:
return {}
| BiznetGIO/horizon | openstack_dashboard/api/swift.py | Python | apache-2.0 | 13,395 |
CFNoQuitButton=256
CFPageButton=16
CFQuicktalker=4
CFQuitButton=32
CFReversed=64
CFSndOpenchat=128
CFSpeech=1
CFThought=2
CFTimeout=8
CCNormal = 0
CCNoChat = 1
CCNonPlayer = 2
CCSuit = 3
CCToonBuilding = 4
CCSuitBuilding = 5
CCHouseBuilding = 6
CCSpeedChat = 7
CCFreeChat = 8
NAMETAG_COLORS = {
CCNormal: (
# Normal FG BG
((0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.3, 0.3, 0.7, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.5, 0.5, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCNoChat: (
# Normal FG BG
((0.6, 0.3, 0.0, 0.65), (1.0, 1.0, 1.0, 1.0), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((1.0, 0.5, 0.5, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((1.0, 0.5, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.6, 0.3, 0.0, 0.65), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCNonPlayer: (
# Normal FG BG
((0.8, 0.4, 0.0, 0.5), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.8, 0.4, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.8, 0.4, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.8, 0.4, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCSuit: (
# Normal FG BG
((0.0, 0.0, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.2, 0.2, 0.2, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 0.7), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.2, 0.2, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCSuitBuilding: (
# Normal FG BG
((0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.5, 0.5, 0.5, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.7, 0.7, 0.7, 1.0), (1.0, 1.0, 1.0, 0.7), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCToonBuilding: (
# Normal FG BG
((0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCHouseBuilding: (
# Normal FG BG
((0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.2, 0.2, 0.5, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.5, 0.5, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.0, 0.6, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCSpeedChat: (
# Normal FG BG
((0.0, 0.6, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.0, 0.5, 0.0, 1.0), (0.5, 0.5, 0.5, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.0, 0.7, 0.2, 1.0), (1.0, 1.0, 1.0, 0.7), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.0, 0.35, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
CCFreeChat: (
# Normal FG BG
((0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Click FG BG
((0.2, 0.2, 0.5, 1.0), (0.2, 0.2, 0.2, 0.6), # Name
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Hover FG BG
((0.5, 0.5, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0), # Name
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
# Disable FG BG
((0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5), # Name
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)), # Chat
),
}
ARROW_COLORS = {
CCSuit: (0.5, 0.5, 0.5, 1.0),
CCNonPlayer: (1.0, 0.4, 0.0, 1.0),
CCNoChat: (1.0, 0.4, 0.0, 1.0),
CCSpeedChat: (0.0, 0.6, 0.2, 1.0),
}
DEFAULT_WORDWRAPS = {
CCNormal: 7.5,
CCNoChat: 7.5,
CCNonPlayer: 7.5,
CCSuit: 7.5,
CCToonBuilding: 8.5,
CCSuitBuilding: 8.5,
CCHouseBuilding: 10.0,
CCSpeedChat: 7.5,
CCFreeChat: 7.5
}
WTNormal = 0
WTQuickTalker = 1
WTSystem = 2
WTBattleSOS = 3
WTEmote = 4
WTToontownBoardingGroup = 5
WHISPER_COLORS = {
WTNormal: (
# Normal FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.6, 0.8, 0.6)),
# Click FG BG
((1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
# Hover FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.9, 0.6)),
# Disable FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.8, 0.6)),
),
WTQuickTalker: (
# Normal FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.6, 0.8, 0.6)),
# Click FG BG
((1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
# Hover FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.9, 0.6)),
# Disable FG BG
((0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.8, 0.6)),
),
WTSystem: (
# Normal FG BG
((0.0, 0.0, 0.0, 1.0), (0.8, 0.3, 0.6, 0.6)),
# Click FG BG
((1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
# Hover FG BG
((0.0, 0.0, 0.0, 1.0), (0.8, 0.4, 1.0, 0.6)),
# Disable FG BG
((0.0, 0.0, 0.0, 1.0), (0.8, 0.3, 0.6, 0.6)),
),
# TODO: WTBattleSOS
WTEmote: (
# Normal FG BG
((0.0, 0.0, 0.0, 1.0), (0.9, 0.5, 0.1, 0.6)),
# Click FG BG
((1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
# Hover FG BG
((0.0, 0.0, 0.0, 1.0), (0.9, 0.6, 0.2, 0.6)),
# Disable FG BG
((0.0, 0.0, 0.0, 1.0), (0.9, 0.6, 0.1, 0.6)),
),
# TODO: WTToontownBoardingGroup
}
| silly-wacky-3-town-toon/SOURCE-COD | otp/nametag/NametagConstants.py | Python | apache-2.0 | 8,694 |
from office365.sharepoint.fields.field import Field
class FieldCalculated(Field):
"""
Specifies a calculated field in a list. To set properties, call the Update method (section 3.2.5.38.2.1.5).
The NoCrawl and SchemaXmlWithResourceTokens properties are not included in the default scalar property set
for this type.
"""
@property
def currency_locale_id(self):
"""
:rtype: int or None
"""
return self.properties.get('CurrencyLocaleId', None)
@property
def formula(self):
"""
:rtype: str or None
"""
return self.properties.get('Formula', None)
@formula.setter
def formula(self, val):
"""Sets a value that specifies the Formula.
:type val: str
"""
self.set_property('Formula', val)
| vgrem/Office365-REST-Python-Client | office365/sharepoint/fields/field_calculated.py | Python | mit | 831 |
AuthorizedException = (
BufferError,
ArithmeticError,
AssertionError,
AttributeError,
EnvironmentError,
EOFError,
LookupError,
MemoryError,
ReferenceError,
RuntimeError,
SystemError,
TypeError,
ValueError
)
| ainafp/nilearn | nilearn/_utils/exceptions.py | Python | bsd-3-clause | 311 |
#!/usr/bin/env python
import sys
first = [ line.rstrip() for line in open(sys.argv[1],"r") ]
second = [ line.rstrip() for line in open(sys.argv[2],"r") ]
for f in first:
if f in second:
pass
else:
print f | mclarke-icr/scripts | subtractLists.py | Python | gpl-3.0 | 242 |
# Generated by Django 2.0.13 on 2020-04-02 23:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myuw', '0016_myuw_notice_group'),
]
operations = [
migrations.AddField(
model_name='myuwnotice',
name='is_critical',
field=models.BooleanField(default=False),
),
]
| uw-it-aca/myuw | myuw/migrations/0017_critical_notice.py | Python | apache-2.0 | 393 |
import random
import math
import numpy as np
import pygame
from pygame.color import THECOLORS
import pymunk
from pymunk.vec2d import Vec2d
from pymunk.pygame_util import draw
# PyGame init
width = 1000
height = 700
pygame.init()
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
# Turn off alpha since we don't use it.
screen.set_alpha(None)
# Showing sensors and redrawing slows things down.
show_sensors = True
draw_screen = True
class GameState:
def __init__(self):
# Global-ish.
self.crashed = 0
self.caught = 0
# Physics stuff.
self.space = pymunk.Space()
self.space.gravity = pymunk.Vec2d(0., 0.)
# Create the car.
self.create_car(100, 100, 0.5)
# Record steps.
self.num_steps = 0
# Create walls.
static = [
pymunk.Segment(
self.space.static_body,
(0, 1), (0, height), 1),
pymunk.Segment(
self.space.static_body,
(1, height), (width, height), 1),
pymunk.Segment(
self.space.static_body,
(width-1, height), (width-1, 1), 1),
pymunk.Segment(
self.space.static_body,
(1, 1), (width, 1), 1)
]
for s in static:
s.friction = 1.
s.group = 1
s.collision_type = 1
s.color = THECOLORS['red']
self.space.add(static)
# Create some obstacles, semi-randomly.
# We'll create three and they'll move around to prevent over-fitting.
# self.obstacles = []
# self.obstacles.append(self.create_obstacle(200, 350, 70))
# self.obstacles.append(self.create_obstacle(700, 200, 80))
# self.obstacles.append(self.create_obstacle(600, 600, 70))
# Create a cat.
self.create_cat()
# self.create_dog()
def create_obstacle(self, x, y, r):
c_body = pymunk.Body(pymunk.inf, pymunk.inf)
c_shape = pymunk.Circle(c_body, r)
c_shape.elasticity = 1.0
c_body.position = x, y
c_shape.color = THECOLORS["blue"]
self.space.add(c_body, c_shape)
return c_body
# def create_cat(self):
# inertia = pymunk.moment_for_circle(1, 0, 14, (0, 0))
# self.cat_body = pymunk.Body(1, inertia)
# self.cat_body.position = 50, height - 100
# self.cat_shape = pymunk.Circle(self.cat_body, 35)
# self.cat_shape.color = THECOLORS["orange"]
# self.cat_shape.elasticity = 1.0
# self.cat_shape.angle = 0.5
# direction = Vec2d(1, 0).rotated(self.cat_body.angle)
# self.space.add(self.cat_body, self.cat_shape)
# def create_dog(self):
# inertia = pymunk.moment_for_circle(1, 0, 14, (0, 0))
# self.dog_body = pymunk.Body(1, inertia)
# self.dog_body.position = 900, height - 600
# self.dog_shape = pymunk.Circle(self.dog_body, 40)
# self.dog_shape.color = THECOLORS["yellow"]
# self.dog_shape.elasticity = 2.0
# self.dog_shape.angle = 0.5
# direction = Vec2d(1, 0).rotated(self.dog_body.angle)
# self.space.add(self.dog_body, self.dog_shape)
def create_cat(self):
inertia = pymunk.moment_for_circle(1, 0, 14, (0, 0))
self.cat_body = pymunk.Body(1, inertia)
self.cat_body.position = 50, height - 100
self.cat_shape = pymunk.Circle(self.cat_body, 30)
self.cat_shape.color = THECOLORS["orange"]
self.cat_shape.elasticity = 2.0
self.cat_shape.angle = 0.5
moving_direction = Vec2d(1, 0).rotated(self.cat_body.angle)
self.cat_body.apply_impulse(moving_direction)
self.space.add(self.cat_body, self.cat_shape)
def create_car(self, x, y, r):
inertia = pymunk.moment_for_circle(1, 0, 14, (0, 0))
self.car_body = pymunk.Body(1, inertia)
self.car_body.position = x, y
self.car_shape = pymunk.Circle(self.car_body, 25)
self.car_shape.color = THECOLORS["green"]
self.car_shape.elasticity = 2.0
self.car_body.angle = r
driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
self.car_body.apply_impulse(driving_direction)
self.space.add(self.car_body, self.car_shape)
def frame_step(self, action, action2):
if action == 0: # Turn left.
self.car_body.angle -= .2
elif action == 1: # Turn right.
self.car_body.angle += .2
elif action2 == 0: # Turn right.
self.cat_body.angle += .2
elif action2 == 1: # Turn right.
self.cat_body.angle += .2
# Move obstacles.
# if self.num_steps % 20 == 0:
# self.move_obstacles()
# Move cat.
# if self.num_steps % 5 == 0:
# self.move_cat()
# if self.num_steps % 5 == 0:
# self.move_dog()
driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
self.car_body.velocity = 100 * driving_direction
moving_direction = Vec2d(1, 0).rotated(self.cat_body.angle)
self.cat_body.velocity = 80 * moving_direction
# Update the screen and stuff.
screen.fill(THECOLORS["black"])
draw(screen, self.space)
self.space.step(1./30) #original 1./10
if draw_screen:
pygame.display.flip()
clock.tick()
# Get the current location and the readings there.
x, y = self.car_body.position
# x_cat, y_cat = self.cat_body.position
readings = self.get_sonar_readings(x, y, self.car_body.angle)
# readings.append([x,y])
# readings.append([x_cat,y_cat])
state = np.array([readings])
print readings
# Set the reward.
# Car crashed when any reading == 1
if self.car_is_crashed(readings):
self.crashed = 1
reward = -500
self.recover_from_crash(driving_direction)
# elif self.cat_is_caught(readings):
# self.caught = 1
# reward = 500
# self.recover_from_caught(driving_direction)
elif self.cat_is_caught(readings):
self.caught = 1
reward = 500
self.recover_from_caught(moving_direction)
elif readings[0][1] == -5 or readings[1][1] == -5 or readings[2][1] == -5:
reward = 50 - int(self.sum_readings(readings) / 10)
else:
# Higher readings are better, so return the sum.
reward = -12 + int(self.sum_readings(readings) / 10)
print("current reward: %s" % reward)
self.num_steps += 1
return reward, state
def move_obstacles(self):
# Randomly move obstacles around.
for obstacle in self.obstacles:
speed = random.randint(1, 5)
direction = Vec2d(1, 0).rotated(self.car_body.angle + random.randint(-2, 2))
obstacle.velocity = speed * direction
def move_cat(self):
speed = random.randint(50, 100)
self.cat_body.angle -= random.randint(-1, 1)
direction = Vec2d(1, 0).rotated(self.cat_body.angle)
self.cat_body.velocity = speed * direction
def move_dog(self):
speed = random.randint(40, 60)
self.dog_body.angle -= random.randint(-1, 1)
direction = Vec2d(1, 0).rotated(self.dog_body.angle)
self.dog_body.velocity = speed * direction
def car_is_crashed(self, readings):
if (readings[0][0]==1 and readings[0][1] != -5) \
or (readings[1][0] == 1 and readings[1][1] != -5 ) \
or (readings[2][0] == 1 and readings[2][1] != -5 ) :
return 1
else:
return 0
def cat_is_caught(self, readings):
if (readings[0][0]==1 and readings[0][1] == -5) \
or (readings[1][0] == 1 and readings[1][1] == -5 ) \
or (readings[2][0] == 1 and readings[2][1] == -5 ) :
return True
else:
return False
def recover_from_crash(self, driving_direction):
"""
We hit something, so recover.
"""
while self.crashed:
# Go backwards.
self.car_body.velocity = -100 * driving_direction
self.crashed = False
for i in range(10):
self.car_body.angle += .2 # Turn a little.
screen.fill(THECOLORS["red"]) # Red is scary!
draw(screen, self.space)
self.space.step(1./10)
if draw_screen:
pygame.display.flip()
clock.tick()
def recover_from_caught(self, moving_direction):
"""
We hit something, so recover.
"""
while self.caught:
# Go backwards.
self.cat_body.position = random.randint(1,1000), random.randint(1,700)
#self.car_body.velocity = -100 * driving_direction
self.caught = False
for i in range(10):
self.car_body.angle += .2 # Turn a little.
screen.fill(THECOLORS["green"]) # green is satisfying!
draw(screen, self.space)
self.space.step(1./10)
if draw_screen:
pygame.display.flip()
clock.tick()
def sum_readings(self, readings):
"""Sum the number of non-zero readings."""
readings = np.asarray(readings)
a = np.transpose(readings)
#print a[0],a[1]
p1 = np.array(self.cat_body.position)
p2 = np.array(self.car_body.position)
tot = 0
#dis = np.dot(a[0][:3], a[1][:3]) - np.linalg.norm(p1-p2)/100
# return np.dot(a[0], a[1])
return sum (a[0][:3])
def get_sonar_readings(self, x, y, angle):
readings = []
"""
Instead of using a grid of boolean(ish) sensors, sonar readings
simply return N "distance" readings, one for each sonar
we're simulating. The distance is a count of the first non-zero
reading starting at the object. For instance, if the fifth sensor
in a sonar "arm" is non-zero, then that arm returns a distance of 5.
"""
# Make our arms.
arm_left = self.make_sonar_arm(x, y)
arm_middle = arm_left
arm_right = arm_left
# Rotate them and get readings.
readings.append(self.get_arm_distance(arm_left, x, y, angle, 0.75))
readings.append(self.get_arm_distance(arm_middle, x, y, angle, 0))
readings.append(self.get_arm_distance(arm_right, x, y, angle, -0.75))
if show_sensors:
pygame.display.update()
return readings
def get_arm_distance(self, arm, x, y, angle, offset):
# Used to count the distance.
i = 0
obs = 0
# Look at each point and see if we've hit something.
for point in arm:
i += 1
# Move the point to the right spot.
rotated_p = self.get_rotated_point(
x, y, point[0], point[1], angle + offset
)
# Check if we've hit something. Return the current i (distance)
# if we did.
if rotated_p[0] <= 0 or rotated_p[1] <= 0 \
or rotated_p[0] >= width or rotated_p[1] >= height:
return i, self.get_track_or_not(obs) # Sensor is off the screen.
else:
obs = screen.get_at(rotated_p)
if self.get_track_or_not(obs) != 0:
if show_sensors:
pygame.draw.circle(screen, (255, 255, 255), (rotated_p), 10)
return i, self.get_track_or_not(obs)
if show_sensors:
pygame.draw.circle(screen, (255, 255, 255), (rotated_p), 2)
# Return the distance for the arm.
return i, self.get_track_or_not(obs)
def make_sonar_arm(self, x, y):
spread = 10 # Default spread.
distance = 20 # Gap before first sensor.
arm_points = []
# Make an arm. We build it flat because we'll rotate it about the
# center later.
for i in range(1, 40):
arm_points.append((distance + x + (spread * i), y))
return arm_points
def get_rotated_point(self, x_1, y_1, x_2, y_2, radians):
# Rotate x_2, y_2 around x_1, y_1 by angle.
x_change = (x_2 - x_1) * math.cos(radians) + \
(y_2 - y_1) * math.sin(radians)
y_change = (y_1 - y_2) * math.cos(radians) - \
(x_1 - x_2) * math.sin(radians)
new_x = x_change + x_1
new_y = height - (y_change + y_1)
return int(new_x), int(new_y)
def get_track_or_not(self, reading):
if reading == THECOLORS['black']:
return 0
elif reading == THECOLORS['yellow']:
return 5
elif reading == THECOLORS['blue']:
return 1
elif reading == THECOLORS['orange']:
return -5
else :
return 1 # red
if __name__ == "__main__":
game_state = GameState()
while True:
game_state.frame_step((random.randint(0, 2)))
| coreknowledge2016/multi-agent-hrl | carmunk.py | Python | mit | 13,313 |
# coding: utf-8
#
# This file is part of Progdupeupl.
#
# Progdupeupl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progdupeupl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Progdupeupl. If not, see <http://www.gnu.org/licenses/>.
"""Module for handling forums, their topics and answers."""
| progdupeupl/pdp_website | pdp/forum/__init__.py | Python | agpl-3.0 | 768 |
#
# usage: python k44.py {file name} {number}
#
import sys
import pydot
from k41 import *
from k42 import get_relation_pairs
if __name__ == '__main__':
fn, nos = sys.argv[1], int(sys.argv[2])
sl = load_cabocha(fn)
pl = get_relation_pairs([sl[nos-1]])
g = pydot.graph_from_edges(pl)
g.write_png('result.png', prog='dot')
| WatsonDNA/nlp100 | chap05/k44.py | Python | unlicense | 343 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, throw, _
from frappe.utils import scrub_urls
import email.utils
import inlinestyler.utils
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None):
"""send an html email as multipart with attachments and all"""
emailobj = EMail(sender, recipients, subject)
if (not '<br>' in msg) and (not '<p>' in msg) and (not '<div' in msg):
msg = msg.replace('\n', '<br>')
emailobj.set_html(msg, text_content, footer=footer, print_html=print_html, formatted=formatted)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=[], subject='', alternative=0, reply_to=None):
from email.mime.multipart import MIMEMultipart
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = recipients.split(',')
# remove null
recipients = filter(None, (r.strip() for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = []
self.html_set = False
def set_html(self, message, text_content = None, footer=None, print_html=None, formatted=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(text_content)
else:
self.set_html_as_text(message)
self.set_part_html(formatted)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message):
from email.mime.text import MIMEText
part = MIMEText(message, 'html', 'utf-8')
self.msg_multipart.attach(part)
def set_html_as_text(self, html):
"""return html2text"""
import HTMLParser
from frappe.utils.email_lib.html2text import html2text
try:
self.set_text(html2text(html))
except HTMLParser.HTMLParseError:
pass
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None):
"""add attachment"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=%s" % fname).encode('utf-8'))
self.msg_root.attach(part)
def validate(self):
"""validate the email ids"""
from frappe.utils import validate_email_add
def _validate(email):
"""validate an email field"""
if email and not validate_email_add(email):
throw(_("{0} is not a valid email id").format(email))
return email
if not self.sender:
self.sender = frappe.db.get_value('Outgoing Email Settings', None,
'auto_email_id') or frappe.conf.get('auto_email_id') or None
if not self.sender:
msgprint(_("Please specify 'Auto Email Id' in Setup > Outgoing Email Settings"))
if not "expires_on" in frappe.conf:
msgprint(_("Alternatively, you can also specify 'auto_email_id' in site_config.json"))
raise frappe.ValidationError
self.sender = _validate(self.sender)
self.reply_to = _validate(self.reply_to)
for e in self.recipients + (self.cc or []):
_validate(e.strip())
def make(self):
"""build into msg_root"""
self.msg_root['Subject'] = self.subject.encode("utf-8")
self.msg_root['From'] = self.sender.encode("utf-8")
self.msg_root['To'] = ', '.join([r.strip() for r in self.recipients]).encode("utf-8")
self.msg_root['Date'] = email.utils.formatdate()
if not self.reply_to:
self.reply_to = self.sender
self.msg_root['Reply-To'] = self.reply_to.encode("utf-8")
if self.cc:
self.msg_root['CC'] = ', '.join([r.strip() for r in self.cc]).encode("utf-8")
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None):
message = scrub_urls(message)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"footer": get_footer(footer),
"title": subject,
"print_html": print_html
})
# if in a test case, do not inline css
if frappe.local.flags.in_test:
return rendered_email
return inlinestyler.utils.inline_css(rendered_email)
def get_footer(footer=None):
"""append a footer (signature)"""
footer = footer or ""
# control panel
footer += frappe.db.get_default('mail_footer') or ''
# hooks
for f in frappe.get_hooks("mail_footer"):
footer += frappe.get_attr(f)
footer += "<!--unsubscribe link here-->"
return footer
| cadencewatches/frappe | frappe/utils/email_lib/email_body.py | Python | mit | 7,076 |
#!/usr/bin/env python
import os, sys
import json
from collections import namedtuple
Key = namedtuple("Key", ["name","version"])
class InstrumentationBase(object):
"""
Pre-computed patterns
"""
def __init__(self, name, version, description, supported=[]):
self.enable = 'y'
self.name = name
self.version = version
self.description = description
self.support = supported + [name]
self.initialize()
def initialize(self):
return
def config(self, what='get', params=None):
return
| pingali/dgit | dgitcore/plugins/instrumentation.py | Python | isc | 568 |
def main(request, response):
response.headers.set("Content-Security-Policy", "default-src 'none'")
response.headers.set("X-Content-Security-Policy", "default-src 'none'")
response.headers.set("X-WebKit-CSP", "default-src 'none'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_none_script</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src 'none'"/>
<meta charset="utf-8"/>
<script>
function show_fail() {
document.getElementById("log").innerHTML = "FAIL";
}
</script>
</head>
<body onload="show_fail()">
<p>Test passes if text "PASS" appears below.</p>
<div id="log">PASS</div>
</body>
</html> """
| kaixinjxq/web-testing-service | wts/tests/csp/csp_default-src_none_script-manual.py | Python | bsd-3-clause | 2,381 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
""" Test script for running python commands within MantidPlot.
This will test the interface to SliceViewer widgets.
Note: the SliceViewerPythonInterfaceTest.py offers
more tests of specific functions. This module
tests (primarily) the plotSlice() helper methods that is available
only within mantidplot
"""
import sys
import os
import unittest
import mantidplottests
from mantidplottests import *
from mantid.kernel import *
import time
CreateMDWorkspace(Dimensions='3',Extents='0,10,0,10,0,10',Names='x,y,z',Units='m,m,m',SplitInto='5',MaxRecursionDepth='20',OutputWorkspace='mdw')
FakeMDEventData(InputWorkspace="mdw", UniformParams="1e5")
FakeMDEventData(InputWorkspace="mdw", PeakParams="1e4, 2,4,6, 1.5")
BinMD(InputWorkspace="mdw", OutputWorkspace="uniform", AxisAligned=True, AlignedDim0="x,0,10,30", AlignedDim1="y,0,10,30", AlignedDim2="z,0,10,30", IterateEvents="1", Parallel="0")
CreateMDWorkspace(Dimensions='3',Extents='0,10,0,10,0,10',Names='x,y,z',Units='m,m,m',SplitInto='5',MaxRecursionDepth='20',OutputWorkspace='empty')
CreateMDWorkspace(Dimensions='4',Extents='0,10,0,10,0,10,0,10',Names='x,y,z,e',Units='m,m,m,meV',SplitInto='5',MaxRecursionDepth='20',OutputWorkspace='md4')
class MantidPlotSliceViewerTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
closeAllSliceViewers()
pass
def test_plotSlice(self):
""" Basic plotSlice() usage """
svw = plotSlice('uniform')
self.assertEqual(svw.getSlicer().getWorkspaceName(), "uniform")
def test_mouseMove(self):
""" Move the mouse over the slice viewer """
svw = plotSlice('uniform')
svw.setSlicePoint(2, 2.5)
moveMouseToCentre(svw._getHeldObject())
def test_plotSlice_empty(self):
""" Plot slice on an empty workspace """
svw = plotSlice('empty')
self.assertEqual(svw.getSlicer().getWorkspaceName(), "empty")
def test_closingWindowIsSafe(self):
svw = plotSlice('uniform', label='closing!')
svw.close()
def test_methods_pass_through(self):
""" Methods called on SliceViewerWindow pass-through to the SliceViewer widget"""
svw = plotSlice('uniform')
svw.setSlicePoint(0, 2.5)
self.assertAlmostEqual(svw.getSlicePoint(0), 2.5, 3)
svw.setXYDim("z", "x")
self.assertEqual(svw.getDimX(), 2)
self.assertEqual(svw.getDimY(), 0)
def test_plot4D_workspace(self):
svw = plotSlice('md4')
svw.setSlicePoint(2, 2.5)
svw.setSlicePoint(3, 7.5)
self.assertAlmostEqual(svw.getSlicePoint(2), 2.5, 3)
self.assertAlmostEqual(svw.getSlicePoint(3), 7.5, 3)
svw.setXYDim("z", "e")
self.assertEqual(svw.getDimX(), 2)
self.assertEqual(svw.getDimY(), 3)
def test_plotSlice_arguments(self):
""" Pass arguments to plotSlice """
svw = plotSlice('uniform', label='test_label', xydim=[1,2],
slicepoint=[2.5, 0, 0], colormin=20, colormax=5000, colorscalelog=True,
limits=[2, 8, 3, 9])
self.assertEqual(svw.getLabel(), "test_label")
self.assertEqual(svw.getDimX(), 1)
self.assertEqual(svw.getDimY(), 2)
self.assertAlmostEqual(svw.getSlicePoint(0), 2.5, 3)
self.assertAlmostEqual(svw.getColorScaleMin(), 20, 2)
self.assertAlmostEqual(svw.getColorScaleMax(), 5000, 2)
assert svw.getColorScaleLog()
self.assertEqual(svw.getXLimits(), [2,8])
self.assertEqual(svw.getYLimits(), [3,9])
def test_plotSlice_arguments2(self):
""" Another way to pass xydim """
svw = plotSlice('uniform', xydim=["y", "z"])
self.assertEqual(svw.getDimX(), 1)
self.assertEqual(svw.getDimY(), 2)
def test_getSliceViewer(self):
""" Retrieving an open SliceViewer """
svw1 = plotSlice('uniform')
svw2 = getSliceViewer('uniform')
assert svw2 is not None
self.assertEqual(svw2.getSlicer().getWorkspaceName(), "uniform")
def test_getSliceViewer_failure(self):
""" Retrieving an open SliceViewer, cases where it fails """
self.assertRaises(Exception, getSliceViewer, 'nonexistent')
svw = plotSlice('uniform', label='alabel')
self.assertRaises(Exception, getSliceViewer, 'uniform', 'different_label')
def test_saveImage(self):
""" Save the rendered image """
svw = plotSlice('uniform')
svw.setSlicePoint(2,6.0)
dest = get_screenshot_dir()
if not dest is None:
filename = "SliceViewerSaveImage"
filepath = os.path.join(dest, filename+".png")
# Remove any old file
if os.path.isfile(filepath):
os.remove(filepath)
# Save
svw.saveImage(filepath)
self.assertEquals(os.path.isfile(filepath), True,
"Screenshot was not written out as expected.")
if os.path.isfile(filepath):
os.remove(filepath)
def test_showLine(self):
svw = plotSlice('uniform')
svw.setSlicePoint(2,6.0)
liner = svw.showLine([1,1], [7,9], width=0.88, num_bins=200)
self.assertTrue( not (liner is None), "Returns a LineViewer proxy object")
# Plot the X units
liner.setPlotAxis(2);
liner.apply()
# Check that the values are there
self.assertEqual(liner.getNumBins(), 200)
# Length of 10 with 200 bins = 0.05 width
self.assertAlmostEqual(liner.getBinWidth(), 0.05, 3)
# Width was set
# TODO: The new behavior for constructor is center+/-(width/2)
# but setPlanarWidth and getPlanarWidth still have old behavior.
# This will be fixed in a later mantid release.
self.assertAlmostEqual(liner.getPlanarWidth(), 0.44, 3)
# Now turn it off
svw.toggleLineMode(False)
self.assertFalse( liner.isVisible(), "LineViewer was hidden")
def test_showPeakOverlays(self):
import PyQt4.QtGui
qLab = CreateMDWorkspace(Dimensions='3',EventType='MDEvent',Extents='-10,10,-10,10,-10,10',Names='Q_lab_x,Q_lab_y,Q_lab_z',Units='A,B,C')
FakeMDEventData(InputWorkspace=qLab, PeakParams=[1000, 1, 1, 1, 1])
qLab = BinMD(InputWorkspace=qLab, AxisAligned=True, AlignedDim0="Q_lab_x,-10,10,100", AlignedDim1="Q_lab_y,-10,10,100", AlignedDim2="Q_lab_z,-10,10,100", IterateEvents="1", Parallel="0")
SetSpecialCoordinates(qLab, 'Q (lab frame)')
pathToInstrument = os.path.join(config["instrumentDefinition.directory"], 'CRISP_Definition.xml') # Note that the instrument doesn't matter. Choose a small one.
instWS = LoadEmptyInstrument(Filename=pathToInstrument) # Only do this so that we can provide the parameter to CreatePeaksWorkspace
pw = CreatePeaksWorkspace(InstrumentWorkspace=instWS, NumberOfPeaks=1)
peak = pw.getPeak(0)
peak.setQLabFrame(V3D(1, 1, 1), 1)
svw = plotSlice(qLab.name(), slicepoint=[1, 1, 1], colormin=1, colormax=5000, colorscalelog=True)
sv = svw.getSlicer()
# Show the PeaksOverlays
allPeaksPresenters = sv.setPeaksWorkspaces([pw.name()])
# Get first peaks presenter.
peaksPresenter = allPeaksPresenters.getPeaksPresenter(pw.name())
# Set the Foreground Colour
peaksPresenter.setForegroundColor(QtGui.QColor(255, 0, 0, 255))
# Zoom in on peak.
peaksPresenter.zoomToPeak(0)
# Hide it
peaksPresenter.setShown(False)
# Now clear the PeaksOverlays
sv.clearPeaksWorkspaces()
def test_integrateMDHistoWorkspace(self):
CreateMDWorkspace(Dimensions=4, Extents='-10,10,-10,10,-10,10,-10,10', Names='a,b,c,d', Units='u,u,u,u', OutputWorkspace='ws')
FakeMDEventData(InputWorkspace='ws', PeakParams='10000,0,0,0,0,1')
CutMD(InputWorkspace='ws', P1Bin='1', P2Bin='1', P3Bin='1', P4Bin='1', OutputWorkspace='ws2', NoPix=True)
svw = plotSlice('ws2')
sv = svw.getSlicer()
lv = svw.getLiner()
lv.setStartXY(-3, 0)
lv.setEndXY(3, 0)
lv.setPlanarWidth(2)
lv.apply()
time.sleep(1)
lws = mtd['ws2_line']
ranHisto = False
for alg in lws.getHistory():
if alg.name() == "IntegrateMDHistoWorkspace":
ranHisto = True
break
self.assertTrue(ranHisto, "IntegrateMDHistoWorkspace was used when possible")
# Run the unit tests
mantidplottests.runTests(MantidPlotSliceViewerTest)
| mganeva/mantid | MantidPlot/test/MantidPlotSliceViewerTest.py | Python | gpl-3.0 | 8,849 |
bl_info = {
"name": "MSG",
"author": "jameswilddev",
"version": (0, 0, 0),
"blender": (2, 7, 0),
"location": "File > Export > MasSplat Geometry (.msg)",
"description": "Export triangles as MasSplat Geometry (.msg)",
"category": "Import-Export"
}
import bpy, struct, math, random
class ExportMSG(bpy.types.Operator):
bl_idname = "export.msg"
bl_label = "Export MSG"
filepath = bpy.props.StringProperty(name="File Path", description="The path to a file to export to.", maxlen=1024, default="")
def execute(self, context):
vertices = []
indices = []
for obj in bpy.context.selected_objects:
if obj.type != "MESH":
continue
mesh = obj.to_mesh(bpy.context.scene, True, "PREVIEW")
mesh.transform(obj.matrix_world)
colLayer = mesh.vertex_colors[0]
i = 0
for poly in mesh.polygons:
if len(poly.loop_indices) != 3:
raise RuntimeError("Please triangulate your meshes")
for index in poly.loop_indices:
vertex = mesh.vertices[mesh.loops[index].vertex_index].co
color = colLayer.data[i].color
built = ((vertex[0], vertex[2], vertex[1]), (color[0], color[1], color[2]))
if not built in vertices:
vertices.append(built)
indices.append(vertices.index(built))
i = i + 1
file = open(self.properties.filepath, "wb")
file.write(struct.pack("H", len(vertices)))
file.write(struct.pack("H", len(indices) // 3))
for vertex in vertices:
for axis in vertex[0]:
file.write(struct.pack("f", axis))
for vertex in vertices:
for channel in vertex[1]:
file.write(struct.pack("B", int(channel * 255)))
for index in indices:
file.write(struct.pack("H", index))
file.close()
return {"FINISHED"}
def invoke(self, context, event):
wm = context.window_manager
self.properties.filepath = ""
wm.fileselect_add(self)
return {"RUNNING_MODAL"}
def menu_func(self, context):
self.layout.operator(ExportMSG.bl_idname, text="MasSplat Geometry (.msg)")
def register():
bpy.utils.register_class(ExportMSG)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_class(ExportMSG)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register() | jameswilddev/WalkingSimulator | tools/blenderMSG.py | Python | mit | 2,526 |
# -*- coding: utf-8 -*-
'''
Using the state crime dataset separately plot the effect of the each
variable on the on the outcome, murder rate while accounting for the effect
of all other variables in the model visualized with a grid of partial
regression plots.
'''
from statsmodels.graphics.regressionplots import plot_partregress_grid
import statsmodels.api as sm
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
fig = plt.figure(figsize=(8, 6))
crime_data = sm.datasets.statecrime.load_pandas()
results = smf.ols('murder ~ hs_grad + urban + poverty + single',
data=crime_data.data).fit()
plot_partregress_grid(results, fig=fig)
plt.show()
| statsmodels/statsmodels.github.io | v0.11.1/plots/graphics_regression_partregress_grid.py | Python | bsd-3-clause | 683 |
# Code Coverage
# ---------------
# Achieve full statement coverage and parameter value coverage for
# strings, integers, and booleans on this enhanced Queue class.
#
# You will need to:
# 1) Write your test code in the test function.
# 2) Press submit. The grader will tell you if you
# fail to cover any specific part of the code.
# 3) Update your test function until you cover the
# entire code base.
# This specific Queue class can enqueue not only integers,
# but strings and booleans as well. You will need to provide
# parameter value coverage for these data types by adding
# each type of data into your Queue.
#
# Furthermore, this Queue class has the additional methods
# clear() and enqueueall(). The enqueueall method takes
# in a list or tuple and enqueues each item of the collection
# individually, returning True if all enqueues succeed, and
# False if the number of items in the collection will overfill
# the Queue.
# Enhanced Queue class
class Queue:
def __init__(self,size_max):
assert size_max > 0
self.max = size_max
self.head = 0
self.tail = 0
self.size = 0
self.data = {}
def __str__(self):
return str(self.data)
def clear(self):
self.__init__(self.max)
def empty(self):
return self.size == 0
def full(self):
return self.size == self.max
def enqueue(self,x):
if type(x) is not int and type(x) is not str and type(x) is not bool:
return False
if self.size == self.max:
return False
self.data[self.tail] = x
self.size += 1
self.tail += 1
if self.tail == self.max:
self.tail = 0
return True
def enqueueall(self, c):
if type(c) is tuple or type(c) is list:
if not self.size + len(c) > self.max:
for itm in c:
self.enqueue(itm)
return True
return False
def dequeue(self):
if self.size == 0:
return None
x = self.data[self.head]
self.size -= 1
self.head += 1
if self.head == self.max:
self.head = 0
return x
def checkRep(self):
assert self.tail >= 0
assert self.tail < self.max
assert self.head >= 0
assert self.head < self.max
if self.tail > self.head:
assert (self.tail-self.head) == self.size
if self.tail < self.head:
assert (self.head-self.tail) == (self.max-self.size)
if self.head == self.tail:
assert (self.size==0) or (self.size==self.max)
# Provide full statement and parameter value coverage of the Queue class
def test():
q = Queue(2)
q.checkRep()
q.dequeue()
q.clear()
q.enqueue(4.3)
q.checkRep()
q.enqueue("dsaaf")
q.checkRep()
q.enqueue(2414)
q.checkRep()
q.dequeue()
q.enqueue(True)
q.enqueue("dsaaf")
q.empty()
q.full()
q.enqueueall((4324,'3432'))
q.dequeue()
q.checkRep()
q.dequeue()
q.checkRep()
q.enqueueall((432,324))
q.checkRep()
str(q)
| Quexint/Assignment-Driven-Learning | OCW/[Udacity]Software_Testing/src/6.CodeCoverage.py | Python | mit | 3,205 |
import os
from core.unknownSubstance import UnknownSubstance
class Languages(UnknownSubstance):
def __init__(self):
self.Neutral = 'Neutral'
self.Arabic = 'Arabic'
self.Chinese_Simplified = 'Chinese (Simplified)'
self.Chinese_Traditional = 'Chinese (Traditional)'
self.Czech = 'Czech'
self.Danish = 'Danish'
self.Dutch = 'Dutch'
self.English = 'English'
self.Finnish = 'Finnish'
self.French = 'French'
self.German = 'German'
self.Greek = 'Greek'
self.Hebrew = 'Hebrew'
self.Hungarian = 'Hungarian'
self.Italian = 'Italian'
self.Japanese = 'Japanese'
self.Korean = 'Korean'
self.Norwegian = 'Norwegian'
self.Polish = 'Polish'
self.Portuguese_Brazil = 'Portuguese (Brazil)'
self.Portuguese_Portugal = 'Portuguese (Portugal)'
self.Russian = 'Russian'
self.Spanish = 'Spanish'
self.Swedish = 'Swedish'
self.Turkish = 'Turkish'
languages = {}
languages[os.sep + 'Neutral' + os.sep] = self.Neutral
languages[os.sep + 'Arabic' + os.sep] = self.Arabic
languages[os.sep + 'Chinese (Simplified)' + os.sep] = (
self.Chinese_Simplified)
languages[os.sep + 'Chinese (Traditional)' + os.sep] = (
self.Chinese_Traditional)
languages[os.sep + 'Czech' + os.sep] = self.Czech
languages[os.sep + 'Danish' + os.sep] = self.Danish
languages[os.sep + 'Dutch' + os.sep] = self.Dutch
languages[os.sep + 'English' + os.sep] = self.English
languages[os.sep + 'Finnish' + os.sep] = self.Finnish
languages[os.sep + 'French' + os.sep] = self.French
languages[os.sep + 'German' + os.sep] = self.German
languages[os.sep + 'Greek' + os.sep] = self.Greek
languages[os.sep + 'Hebrew' + os.sep] = self.Hebrew
languages[os.sep + 'Hungarian' + os.sep] = self.Hungarian
languages[os.sep + 'Italian' + os.sep] = self.Italian
languages[os.sep + 'Japanese (Japan)' + os.sep] = self.Japanese
languages[os.sep + 'Japanese' + os.sep] = self.Japanese
languages[os.sep + 'Korean' + os.sep] = self.Korean
languages[os.sep + 'Norwegian'] = self.Norwegian
languages[os.sep + 'Polish' + os.sep] = self.Polish
languages[os.sep + 'Portuguese (Brazil)' + os.sep] = (
self.Portuguese_Brazil)
languages[os.sep + 'Portuguese (Portugal)' + os.sep] = (
self.Portuguese_Portugal)
languages[os.sep + 'Russian' + os.sep] = self.Russian
languages[os.sep + 'Spanish (Traditional Sort)' + os.sep] = (
self.Spanish)
languages[os.sep + 'Swedish' + os.sep] = self.Swedish
languages[os.sep + 'Turkish' + os.sep] = self.Turkish
self.mCalligraphicLanguages = dict(languages)
languages[os.sep + 'neutral' + os.sep] = self.Neutral
languages[os.sep + 'arabic' + os.sep] = self.Arabic
languages[os.sep + 'chinese (simplified)' + os.sep] = (
self.Chinese_Simplified)
languages[os.sep + 'chinese (traditional)' + os.sep] = (
self.Chinese_Traditional)
languages[os.sep + 'czech' + os.sep] = self.Czech
languages[os.sep + 'danish' + os.sep] = self.Danish
languages[os.sep + 'dutch' + os.sep] = self.Dutch
languages[os.sep + 'english' + os.sep] = self.English
languages[os.sep + 'finnish' + os.sep] = self.Finnish
languages[os.sep + 'french' + os.sep] = self.French
languages[os.sep + 'german' + os.sep] = self.German
languages[os.sep + 'greek' + os.sep] = self.Greek
languages[os.sep + 'hebrew' + os.sep] = self.Hebrew
languages[os.sep + 'hungarian' + os.sep] = self.Hungarian
languages[os.sep + 'italian' + os.sep] = self.Italian
languages[os.sep + 'japanese (japan)' + os.sep] = self.Japanese
languages[os.sep + 'japanese' + os.sep] = self.Japanese
languages[os.sep + 'korean' + os.sep] = self.Korean
languages[os.sep + 'norwegian'] = self.Norwegian
languages[os.sep + 'polish' + os.sep] = self.Polish
languages[os.sep + 'portuguese (brazil)' + os.sep] = (
self.Portuguese_Brazil)
languages[os.sep + 'portuguese (portugal)' + os.sep] = (
self.Portuguese_Portugal)
languages[os.sep + 'russian' + os.sep] = self.Russian
languages[os.sep + 'spanish (traditional sort)' + os.sep] = (
self.Spanish)
languages[os.sep + 'swedish' + os.sep] = self.Swedish
languages[os.sep + 'turkish' + os.sep] = self.Turkish
languages[os.sep + 'NEUTRAL' + os.sep] = self.Neutral
languages[os.sep + 'ARABIC' + os.sep] = self.Arabic
languages[os.sep + 'CHINESE (SIMPLIFIED)' + os.sep] = (
self.Chinese_Simplified)
languages[os.sep + 'CHINESE (TRADITIONAL)' + os.sep] = (
self.Chinese_Traditional)
languages[os.sep + 'CZECH' + os.sep] = self.Czech
languages[os.sep + 'DANISH' + os.sep] = self.Danish
languages[os.sep + 'DUTCH' + os.sep] = self.Dutch
languages[os.sep + 'ENGLISH' + os.sep] = self.English
languages[os.sep + 'FINNISH' + os.sep] = self.Finnish
languages[os.sep + 'FRENCH' + os.sep] = self.French
languages[os.sep + 'GERMAN' + os.sep] = self.German
languages[os.sep + 'GREEK' + os.sep] = self.Greek
languages[os.sep + 'HEBREW' + os.sep] = self.Hebrew
languages[os.sep + 'HUNGARIAN' + os.sep] = self.Hungarian
languages[os.sep + 'ITALIAN' + os.sep] = self.Italian
languages[os.sep + 'JAPANESE (JAPAN)' + os.sep] = self.Japanese
languages[os.sep + 'JAPANESE' + os.sep] = self.Japanese
languages[os.sep + 'KOREAN' + os.sep] = self.Korean
languages[os.sep + 'NORWEGIAN'] = self.Norwegian
languages[os.sep + 'POLISH' + os.sep] = self.Polish
languages[os.sep + 'PORTUGUESE (BRAZIL)' + os.sep] = (
self.Portuguese_Brazil)
languages[os.sep + 'PORTUGUESE (PORTUGAL)' + os.sep] = (
self.Portuguese_Portugal)
languages[os.sep + 'RUSSIAN' + os.sep] = self.Russian
languages[os.sep + 'SPANISH (TRADITIONAL SORT)' + os.sep] = (
self.Spanish)
languages[os.sep + 'SWEDISH' + os.sep] = self.Swedish
languages[os.sep + 'TURKISH' + os.sep] = self.Turkish
languages[os.sep + 'NEU' + os.sep] = self.Neutral
languages[os.sep + 'ARA' + os.sep] = self.Arabic
languages[os.sep + 'CHS' + os.sep] = self.Chinese_Simplified
languages[os.sep + 'CHT' + os.sep] = self.Chinese_Traditional
languages[os.sep + 'CSY' + os.sep] = self.Czech
languages[os.sep + 'DAN' + os.sep] = self.Danish
languages[os.sep + 'NLD' + os.sep] = self.Dutch
languages[os.sep + 'ENU' + os.sep] = self.English
languages[os.sep + 'FIN' + os.sep] = self.Finnish
languages[os.sep + 'FRA' + os.sep] = self.French
languages[os.sep + 'DEU' + os.sep] = self.German
languages[os.sep + 'ELL' + os.sep] = self.Greek
languages[os.sep + 'HEB' + os.sep] = self.Hebrew
languages[os.sep + 'HUN' + os.sep] = self.Hungarian
languages[os.sep + 'ITA' + os.sep] = self.Italian
languages[os.sep + 'JPN' + os.sep] = self.Japanese
languages[os.sep + 'KOR' + os.sep] = self.Korean
languages[os.sep + 'NOR' + os.sep] = self.Norwegian
languages[os.sep + 'PLK' + os.sep] = self.Polish
languages[os.sep + 'PTB' + os.sep] = self.Portuguese_Brazil
languages[os.sep + 'PTG' + os.sep] = self.Portuguese_Portugal
languages[os.sep + 'RUS' + os.sep] = self.Russian
languages[os.sep + 'ESN' + os.sep] = self.Spanish
languages[os.sep + 'SVE' + os.sep] = self.Swedish
languages[os.sep + 'TRK' + os.sep] = self.Turkish
languages[os.sep + 'neu' + os.sep] = self.Neutral
languages[os.sep + 'ara' + os.sep] = self.Arabic
languages[os.sep + 'chs' + os.sep] = self.Chinese_Simplified
languages[os.sep + 'cht' + os.sep] = self.Chinese_Traditional
languages[os.sep + 'csy' + os.sep] = self.Czech
languages[os.sep + 'dan' + os.sep] = self.Danish
languages[os.sep + 'nld' + os.sep] = self.Dutch
languages[os.sep + 'enu' + os.sep] = self.English
languages[os.sep + 'fin' + os.sep] = self.Finnish
languages[os.sep + 'fra' + os.sep] = self.French
languages[os.sep + 'deu' + os.sep] = self.German
languages[os.sep + 'ell' + os.sep] = self.Greek
languages[os.sep + 'heb' + os.sep] = self.Hebrew
languages[os.sep + 'hun' + os.sep] = self.Hungarian
languages[os.sep + 'ita' + os.sep] = self.Italian
languages[os.sep + 'jpn' + os.sep] = self.Japanese
languages[os.sep + 'kor' + os.sep] = self.Korean
languages[os.sep + 'nor' + os.sep] = self.Norwegian
languages[os.sep + 'plk' + os.sep] = self.Polish
languages[os.sep + 'ptb' + os.sep] = self.Portuguese_Brazil
languages[os.sep + 'ptg' + os.sep] = self.Portuguese_Portugal
languages[os.sep + 'rus' + os.sep] = self.Russian
languages[os.sep + 'esn' + os.sep] = self.Spanish
languages[os.sep + 'sve' + os.sep] = self.Swedish
languages[os.sep + 'trk' + os.sep] = self.Turkish
self.mLanguages = languages
def getLanguage(self, aPath):
language = UnknownSubstance.getItemByPath(self.mLanguages, aPath)
if language is not None:
return language
return UnknownSubstance.unknown('UNKNOWN LANGUAGE', aPath)
def getPathKey(self, aValue):
return UnknownSubstance.getKeyPathByValue(
self.mCalligraphicLanguages, aValue)
| TheVice/WinUpdatesManager | Python/core/languages.py | Python | apache-2.0 | 9,869 |
import itertools
import json
import os
import zipfile
from collections import defaultdict
from cStringIO import StringIO
from datetime import datetime
from slyd.projecttemplates import templates
REQUIRED_FILES = {'setup.py', 'scrapy.cfg', 'extractors.json', 'items.json',
'project.json', 'spiders/__init__.py', 'spiders/settings.py'}
FILE_TEMPLATES = {
'extractors.json': '{}',
'items.json': '{}',
'project.json': templates['PROJECT'],
'scrapy.cfg': templates['SCRAPY'],
'setup.py': templates['SETUP'],
'spiders/__init__.py': '',
'spiders/settings.py': templates['SETTINGS']
}
class ProjectArchiver(object):
required_files = frozenset(REQUIRED_FILES)
file_templates = FILE_TEMPLATES
def __init__(self, project, version=None, required_files=None):
if version is None:
version = (0, 10)
self.separator = os.path.sep
self.version = version
self.project = project
if required_files is not None:
self.required_files = required_files
def archive(self, spiders=None):
"""
Zip the contents or a subset of the contents in this project together
"""
zbuff = StringIO()
self._archive = zipfile.ZipFile(zbuff, "w", zipfile.ZIP_DEFLATED)
self._add_files(spiders)
self._archive.close()
zbuff.reset()
return zbuff
def _add_files(self, spiders):
"""
Add all selected spiders and other files to the project
"""
now = datetime.now().timetuple()[:6]
extractors = self.read_file('extractors.json', deserialize=True) or {}
files, all_files, spider_templates = self._paths(spiders)
seen_files = set()
for file_path in files:
if file_path in seen_files:
continue
if (file_path.startswith('spiders/') and
file_path.endswith('.json')):
path, contents, added = self._add_spider(file_path,
spider_templates,
extractors)
seen_files.update(added)
if contents is not None:
self._add_file(file_path, contents, now)
else:
self._add_file(file_path, self.read_file(file_path), now)
seen_files.add(file_path)
missing = (set(self.file_templates) & self.required_files) - seen_files
for file_path in missing:
self._add_file(file_path, self.file_templates[file_path], now)
def _add_file(self, filename, contents, tstamp):
"""
Add a file to the zip archive.
"""
if filename is None or contents in (None, 'null'):
return
fileinfo = zipfile.ZipInfo(filename, tstamp)
fileinfo.external_attr = 0666 << 16L
self._archive.writestr(fileinfo, contents, zipfile.ZIP_DEFLATED)
def _add_spider(self, file_path, templates, extractors):
"""
Add a spider or template to the archive. If the slybot version is less
than 0.10 a spider and all of its templates are added as a single file.
"""
if self.version > (0, 9):
data = self.read_file(file_path, deserialize=True)
added = {file_path}
else:
file_path, data, added = self._add_legacy_spider(file_path,
templates,
extractors)
if data is not None and data.get('deleted'):
return self._deleted_spider(file_path, data, templates)
spider_content = json.dumps(data, sort_keys=True, indent=4)
return file_path, spider_content, added
def _add_legacy_spider(self, file_path, templates, extractors):
"""
Build a legacy spider and add all templates to a single spider object
"""
spider = self._spider_name(file_path)
file_path = self._spider_path(file_path)
spider_data = self.read_file(file_path, deserialize=True)
if spider_data.get('deleted'):
return file_path, spider_data, {file_path}
names = set(spider_data.pop('template_names', []))
spider_templates = [tp for tp in templates.get(spider, [])
if self._name(tp) in names]
loaded_templates, added = self._spider_templates(spider_templates,
extractors)
added.add(file_path)
spider_data['templates'] = loaded_templates
return file_path, spider_data, added
def _deleted_spider(self, file_path, spider_data, templates):
"""
Add information about a deleted spider.
"""
spider = self._spider_name(file_path)
file_path = self._spider_path(file_path)
added = {file_path}
added.update(set(templates.get(spider, [])))
if self.ignore_deleted:
return None, None, added
spider_content = json.dumps(spider_data, sort_keys=True, indent=4)
return file_path, spider_content, added
def _spider_templates(self, spider_templates, extractors):
"""
Find all templates for a legacy spider and combine them into a single
list.
"""
templates, added = [], set()
for template_path in spider_templates:
added.add(template_path)
existing = {}
template = self.read_file(template_path, deserialize=True)
if template is None:
continue
template_extractors = template.get('extractors', {})
for field, eids in template_extractors.items():
existing[field] = [eid for eid in eids
if eid in extractors]
template['extractors'] = existing
templates.append(template)
return templates, added
def _spider_name(self, file_path):
"""
Get the name of a spider for a template or spider path.
"""
split = file_path.split(self.separator)
if len(split) > 2:
return split[1]
return split[1][:-5]
def _name(self, file_path):
"""
Get the name for the current json path
"""
split = file_path.split(self.separator)
if split[-1].endswith('.json'):
return split[-1][:-5]
return ''
def _spider_path(self, file_path):
if len(file_path.split(self.separator)) > 2:
return 'spiders/%s.json' % self._spider_name(file_path)
return file_path
def _paths(self, spiders):
"""
Build a collection of paths needed to build the archive.
"""
if spiders is None or spiders == '*':
all_files = self.list_files()
return all_files, all_files, self._template_paths(None, all_files)
if isinstance(spiders, basestring):
spiders = [spiders]
spider_paths = set('spiders/%s.json' % spider for spider in spiders)
all_files = self.list_files()
template_paths = self._template_paths(spiders, all_files)
if self.version > (0, 9):
templates = set(itertools.chain(*template_paths.itervalues()))
spider_paths = spider_paths | templates
files = list(set(spider_paths) | self.required_files)
return files, all_files, template_paths
def _template_paths(self, spiders, files):
"""
Map all template paths to the corresponding spider.
"""
spider_templates = defaultdict(list)
for file_path in files:
split_file_path = file_path.split('/')
if len(split_file_path) > 2 and (spiders is None or
split_file_path[1] in spiders):
spider_templates[split_file_path[1]].append(file_path)
return spider_templates
def list_files(self):
raise NotImplementedError
def read_file(self, filename, deserialize=False):
raise NotImplementedError
class FileSystemProjectArchiver(ProjectArchiver):
def __init__(self, project, version=None, required_files=None,
base_dir='.'):
self.base_dir = os.path.join(base_dir, '')
super(FileSystemProjectArchiver, self).__init__(project, version,
required_files)
self.separator = os.path.sep
def list_files(self):
file_paths = []
project_dir = os.path.join(self.base_dir, self.project)
for dir, _, files in os.walk(project_dir):
dir = dir.split(project_dir)[1]
dir = dir[1:] if dir.startswith(os.path.sep) else dir
for filename in files:
if filename.endswith(('.json', '.cfg', '.py')):
file_paths.append(os.path.join(dir, filename))
return file_paths
def read_file(self, filename, deserialize=False):
file_path = os.path.join(self.base_dir, self.project, filename)
if not os.path.isfile(file_path):
return
with open(file_path, 'r') as f:
contents = f.read()
if deserialize and contents:
return json.loads(contents)
return contents
class GitProjectArchiver(ProjectArchiver):
def __init__(self, project, version=None, ignore_deleted=True,
required_files=None, branch='master'):
self.branch = branch
self.ignore_deleted = ignore_deleted
super(GitProjectArchiver, self).__init__(project, version,
required_files)
self.separator = '/'
def list_files(self):
return list(set(self.project.list_files_for_branch('master')) |
set(self.project.list_files_for_branch(self.branch)))
def read_file(self, filename, deserialize=False):
contents = self.project.file_contents_for_branch(filename, self.branch)
if contents is None and self.branch != 'master':
contents = self.project.file_contents_for_branch(filename,
'master')
if contents is None and not self.ignore_deleted:
contents = json.dumps({'deleted': True})
if deserialize and contents is not None:
return json.loads(contents)
return contents
| nju520/portia | slyd/slyd/utils/download.py | Python | bsd-3-clause | 10,547 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 15:32:58 2015
@author: hanbre
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xray
import datetime
import netCDF4
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib.pylab import *
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import seaborn as sns
from IPython import embed
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def read_data(id_in):
data = xray.open_dataset(id_in)
return data
def plotter(vm,x,y):
#fig=figure()
print('plotter')
xx,yy=np.meshgrid(x,y)
if shape(xx)!=shape(vm):
vm=vm.transpose()
gases = ['O3','HCL','CL','CLY','']
if var in gases:
CF = contourf(x,y,vm,linspace(np.amin(vm.values),np.amax(vm.values),10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),np.amax(vm.values),10),colors='k')
elif var == 'T':
CF = contourf(x,y,vm,linspace(np.amin(vm.values),400,10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),400,10),colors='k')
else:
norm = MidpointNormalize(midpoint=0)
CF=contourf(x,y,vm,np.linspace(np.amin(vm.values),np.amax(vm.values),1000),norm=norm,cmap='seismic')
CS=contour(x, y, vm,10,colors='k')
xlabel(x.units);ylabel(y.units)
clb = colorbar(CF); clb.set_label('('+v.units+')')
#title=('{0} at {1}={2} and {3}={4}'.format(var,getattr(v,pvar1)[p1],getattr(v,pvar1)[p1].values,getattr(v,pvar2)[p2],getattr(v,pvar2)[p2].values))
#close(fig)
return
def meaner(v,mvars):
vm = v.mean(dim=mvars)
return vm
def pointextr(v,pvar1,p1,pvar2,p2,pvars):
vm = v[pvars]
return vm
if __name__=='__main__':
i=0
#case_id = id_in.split('/')
with open(sys.argv[1], 'r') as file_in:
header=next(file_in)
for line in file_in:
i+=1
l=line.strip('\n').split(' ')
id_in=l[0]
ds=read_data(id_in)
typ = l[1]
print(typ)
var = l[2]
xvar = l[3]; yvar = l[4]
v=getattr(ds,var)
x=getattr(ds,xvar)
y=getattr(ds,yvar)
if typ == 'm':
print('here')
mvar1 = l[5]; mvar2 = l[6]
if size(v.dims)==4:
mvars = [mvar1,mvar2]
else:
mvars = [mvar1]
vm=meaner(v,mvars)
savestring = '{0}{1}{2}{3}{4}{5}{6}.png'.format(id_in,typ,var,xvar,yvar,mvar1,mvar2)
print(savestring)
elif typ == 'p':
print('there')
pvar1=l[5]; p1=int(l[7])
pvar2=l[6]; p2=int(l[8])
pvars = {pvar1: p1, pvar2: p2}
vm=pointextr(v,pvar1,p1,pvar2,p2,pvars)
savestring = '{0}{1}{2}{3}{4}{5}{6}{7}{8}.png'.format(id_in,typ,var,xvar,yvar,pvar1,pvar2,p1,p2)
print(savestring)
xis = axes([0.09, 0.1, 0.85, 0.82], axisbg = 'white')
fig = figure(num = i, figsize=(10.,5.), dpi=None, facecolor='w', edgecolor='k')
plotter(vm,x,y)
if yvar == 'lev':
print('log=True')
xis.set_yscale("log")
savefig(savestring,dpi=100, facecolor='w', edgecolor='w', orientation='portrait')
print('again')
close(fig)
del(ds)
| hansbrenna/NetCDF_postprocessor | plotter3.py | Python | gpl-3.0 | 4,021 |
#!/usr/bin/env python
# coding: utf-8
import os
import re
from setuptools import setup
path = os.path.dirname(__file__)
desc_fd = os.path.join(path, 'README.rst')
hist_fd = os.path.join(path, 'HISTORY.rst')
long_desc = ''
short_desc = 'A Kerberos authentication handler for python-requests'
if os.path.isfile(desc_fd):
with open(desc_fd) as fd:
long_desc = fd.read()
if os.path.isfile(hist_fd):
with open(hist_fd) as fd:
long_desc = '\n\n'.join([long_desc, fd.read()])
def get_version():
"""
Simple function to extract the current version using regular expressions.
"""
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
with open('requests_kerberos/__init__.py') as fd:
matches = list(filter(lambda x: x, map(reg.match, fd)))
if not matches:
raise RuntimeError(
'Could not find the version information for requests_kerberos'
)
return matches[0].group(1)
setup(
name='requests-kerberos',
description=short_desc,
long_description=long_desc,
author='Ian Cordasco, Cory Benfield, Michael Komitee',
author_email='graffatcolmingov@gmail.com',
url='https://github.com/requests/requests-kerberos',
packages=['requests_kerberos'],
package_data={'': ['LICENSE', 'AUTHORS']},
include_package_data=True,
version=get_version(),
install_requires=[
'requests>=1.1.0',
],
extras_require={
':sys_platform=="win32"': ['kerberos-sspi>=0.2'],
':sys_platform!="win32"': ['pykerberos>=1.1.8,<2.0.0'],
},
test_suite='test_requests_kerberos',
tests_require=['mock'],
)
| rbcarson/requests-kerberos | setup.py | Python | isc | 1,640 |
"""
`SQLite`_ database specific implementations of changeset classes.
.. _`SQLite`: http://www.sqlite.org/
"""
try: # Python 3
from collections import MutableMapping as DictMixin
except ImportError: # Python 2
from UserDict import DictMixin
from copy import copy
import re
from sqlalchemy.databases import sqlite as sa_base
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
from migrate import exceptions
from migrate.changeset import ansisql
SQLiteSchemaGenerator = sa_base.SQLiteDDLCompiler
class SQLiteCommon(object):
def _not_supported(self, op):
raise exceptions.NotSupportedError("SQLite does not support "
"%s; see http://www.sqlite.org/lang_altertable.html" % op)
class SQLiteHelper(SQLiteCommon):
def _filter_columns(self, cols, table):
"""Splits the string of columns and returns those only in the table.
:param cols: comma-delimited string of table columns
:param table: the table to check
:return: list of columns in the table
"""
columns = []
for c in cols.split(","):
if c in table.columns:
# There was a bug in reflection of SQLite columns with
# reserved identifiers as names (SQLite can return them
# wrapped with double quotes), so strip double quotes.
columns.extend(c.strip(' "'))
return columns
def _get_constraints(self, table):
"""Retrieve information about existing constraints of the table
This feature is needed for recreate_table() to work properly.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
constraints = []
for name, cols in re.findall(UNIQUE_PATTERN, data):
# Filter out any columns that were dropped from the table.
columns = self._filter_columns(cols, table)
if columns:
constraints.extend(UniqueConstraint(*columns, name=name))
FKEY_PATTERN = "CONSTRAINT (\w+) FOREIGN KEY \(([^\)]+)\)"
for name, cols in re.findall(FKEY_PATTERN, data):
# Filter out any columns that were dropped from the table.
columns = self._filter_columns(cols, table)
if columns:
constraints.extend(ForeignKeyConstraint(*columns, name=name))
return constraints
def recreate_table(self, table, column=None, delta=None,
omit_constraints=None):
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing constraints
for constraint in self._get_constraints(table):
table.append_constraint(constraint)
# omit given constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_constraints is None or cons.name not in omit_constraints
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def visit_column(self, delta):
if isinstance(delta, DictMixin):
column = delta.result_column
table = self._to_table(delta.table)
else:
column = delta
table = self._to_table(column.table)
self.recreate_table(table,column,delta)
class SQLiteColumnGenerator(SQLiteSchemaGenerator,
ansisql.ANSIColumnGenerator,
# at the end so we get the normal
# visit_column by default
SQLiteHelper,
SQLiteCommon
):
"""SQLite ColumnGenerator"""
def _modify_table(self, table, column, delta):
columns = ' ,'.join(map(
self.preparer.format_column,
[c for c in table.columns if c.name!=column.name]))
return ('INSERT INTO %%(table_name)s (%(cols)s) '
'SELECT %(cols)s from migration_tmp')%{'cols':columns}
def visit_column(self,column):
if column.foreign_keys:
SQLiteHelper.visit_column(self,column)
else:
super(SQLiteColumnGenerator,self).visit_column(column)
class SQLiteColumnDropper(SQLiteHelper, ansisql.ANSIColumnDropper):
"""SQLite ColumnDropper"""
def _modify_table(self, table, column, delta):
columns = ' ,'.join(map(self.preparer.format_column, table.columns))
return 'INSERT INTO %(table_name)s SELECT ' + columns + \
' from migration_tmp'
def visit_column(self,column):
# For SQLite, we *have* to remove the column here so the table
# is re-created properly.
column.remove_from_table(column.table,unset_table=False)
super(SQLiteColumnDropper,self).visit_column(column)
class SQLiteSchemaChanger(SQLiteHelper, ansisql.ANSISchemaChanger):
"""SQLite SchemaChanger"""
def _modify_table(self, table, column, delta):
return 'INSERT INTO %(table_name)s SELECT * from migration_tmp'
def visit_index(self, index):
"""Does not support ALTER INDEX"""
self._not_supported('ALTER INDEX')
class SQLiteConstraintGenerator(ansisql.ANSIConstraintGenerator, SQLiteHelper, SQLiteCommon):
def visit_migrate_primary_key_constraint(self, constraint):
tmpl = "CREATE UNIQUE INDEX %s ON %s ( %s )"
cols = ', '.join(map(self.preparer.format_column, constraint.columns))
tname = self.preparer.format_table(constraint.table)
name = self.get_constraint_name(constraint)
msg = tmpl % (name, tname, cols)
self.append(msg)
self.execute()
def _modify_table(self, table, column, delta):
return 'INSERT INTO %(table_name)s SELECT * from migration_tmp'
def visit_migrate_foreign_key_constraint(self, *p, **k):
self.recreate_table(p[0].table)
def visit_migrate_unique_constraint(self, *p, **k):
self.recreate_table(p[0].table)
class SQLiteConstraintDropper(ansisql.ANSIColumnDropper,
SQLiteHelper,
ansisql.ANSIConstraintCommon):
def _modify_table(self, table, column, delta):
return 'INSERT INTO %(table_name)s SELECT * from migration_tmp'
def visit_migrate_primary_key_constraint(self, constraint):
tmpl = "DROP INDEX %s "
name = self.get_constraint_name(constraint)
msg = tmpl % (name)
self.append(msg)
self.execute()
def visit_migrate_foreign_key_constraint(self, *p, **k):
self.recreate_table(p[0].table, omit_constraints=[p[0].name])
def visit_migrate_check_constraint(self, *p, **k):
self._not_supported('ALTER TABLE DROP CONSTRAINT')
def visit_migrate_unique_constraint(self, *p, **k):
self.recreate_table(p[0].table, omit_constraints=[p[0].name])
# TODO: technically primary key is a NOT NULL + UNIQUE constraint, should add NOT NULL to index
class SQLiteDialect(ansisql.ANSIDialect):
columngenerator = SQLiteColumnGenerator
columndropper = SQLiteColumnDropper
schemachanger = SQLiteSchemaChanger
constraintgenerator = SQLiteConstraintGenerator
constraintdropper = SQLiteConstraintDropper
| odubno/microblog | venv/lib/python2.7/site-packages/migrate/changeset/databases/sqlite.py | Python | bsd-3-clause | 7,967 |
"""Simple App configuration for hs_core module."""
from django.apps import AppConfig
class HSCoreAppConfig(AppConfig):
"""Configures options for hs_core app."""
name = "hs_core"
def ready(self):
"""On application ready, import receivers for Django signals."""
from . import receivers # noqa
| hydroshare/hydroshare | hs_core/apps.py | Python | bsd-3-clause | 325 |
#
#
# Copyright (C) 2006, 2007, 2008, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Fake hypervisor
"""
import os
import os.path
import logging
from ganeti import utils
from ganeti import constants
from ganeti import errors
from ganeti import objects
from ganeti import pathutils
from ganeti.hypervisor import hv_base
class FakeHypervisor(hv_base.BaseHypervisor):
"""Fake hypervisor interface.
This can be used for testing the ganeti code without having to have
a real virtualisation software installed.
"""
PARAMETERS = {
constants.HV_MIGRATION_MODE: hv_base.MIGRATION_MODE_CHECK,
}
CAN_MIGRATE = True
_ROOT_DIR = pathutils.RUN_DIR + "/fake-hypervisor"
def __init__(self):
hv_base.BaseHypervisor.__init__(self)
utils.EnsureDirs([(self._ROOT_DIR, constants.RUN_DIRS_MODE)])
def ListInstances(self, hvparams=None):
"""Get the list of running instances.
"""
return os.listdir(self._ROOT_DIR)
def GetInstanceInfo(self, instance_name, hvparams=None):
"""Get instance properties.
@type instance_name: string
@param instance_name: the instance name
@type hvparams: dict of strings
@param hvparams: hvparams to be used with this instance
@return: tuple of (name, id, memory, vcpus, stat, times)
"""
file_name = self._InstanceFile(instance_name)
if not os.path.exists(file_name):
return None
try:
fh = open(file_name, "r")
try:
inst_id = fh.readline().strip()
memory = utils.TryConvert(int, fh.readline().strip())
vcpus = utils.TryConvert(int, fh.readline().strip())
stat = hv_base.HvInstanceState.RUNNING
times = 0
return (instance_name, inst_id, memory, vcpus, stat, times)
finally:
fh.close()
except IOError, err:
raise errors.HypervisorError("Failed to list instance %s: %s" %
(instance_name, err))
def GetAllInstancesInfo(self, hvparams=None):
"""Get properties of all instances.
@type hvparams: dict of strings
@param hvparams: hypervisor parameter
@return: list of tuples (name, id, memory, vcpus, stat, times)
"""
data = []
for file_name in os.listdir(self._ROOT_DIR):
try:
fh = open(utils.PathJoin(self._ROOT_DIR, file_name), "r")
inst_id = "-1"
memory = 0
vcpus = 1
stat = hv_base.HvInstanceState.SHUTDOWN
times = -1
try:
inst_id = fh.readline().strip()
memory = utils.TryConvert(int, fh.readline().strip())
vcpus = utils.TryConvert(int, fh.readline().strip())
stat = hv_base.HvInstanceState.RUNNING
times = 0
finally:
fh.close()
data.append((file_name, inst_id, memory, vcpus, stat, times))
except IOError, err:
raise errors.HypervisorError("Failed to list instances: %s" % err)
return data
@classmethod
def _InstanceFile(cls, instance_name):
"""Compute the instance file for an instance name.
"""
return utils.PathJoin(cls._ROOT_DIR, instance_name)
def _IsAlive(self, instance_name):
"""Checks if an instance is alive.
"""
file_name = self._InstanceFile(instance_name)
return os.path.exists(file_name)
def _MarkUp(self, instance, memory):
"""Mark the instance as running.
This does no checks, which should be done by its callers.
"""
file_name = self._InstanceFile(instance.name)
fh = file(file_name, "w")
try:
fh.write("0\n%d\n%d\n" %
(memory,
instance.beparams[constants.BE_VCPUS]))
finally:
fh.close()
def _MarkDown(self, instance_name):
"""Mark the instance as running.
This does no checks, which should be done by its callers.
"""
file_name = self._InstanceFile(instance_name)
utils.RemoveFile(file_name)
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance.
For the fake hypervisor, it just creates a file in the base dir,
creating an exception if it already exists. We don't actually
handle race conditions properly, since these are *FAKE* instances.
"""
if self._IsAlive(instance.name):
raise errors.HypervisorError("Failed to start instance %s: %s" %
(instance.name, "already running"))
try:
self._MarkUp(instance, self._InstanceStartupMemory(instance))
except IOError, err:
raise errors.HypervisorError("Failed to start instance %s: %s" %
(instance.name, err))
def StopInstance(self, instance, force=False, retry=False, name=None,
timeout=None):
"""Stop an instance.
For the fake hypervisor, this just removes the file in the base
dir, if it exist, otherwise we raise an exception.
"""
assert(timeout is None or force is not None)
if name is None:
name = instance.name
if not self._IsAlive(name):
raise errors.HypervisorError("Failed to stop instance %s: %s" %
(name, "not running"))
self._MarkDown(name)
def RebootInstance(self, instance):
"""Reboot an instance.
For the fake hypervisor, this does nothing.
"""
return
def BalloonInstanceMemory(self, instance, mem):
"""Balloon an instance memory to a certain value.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type mem: int
@param mem: actual memory size to use for instance runtime
"""
if not self._IsAlive(instance.name):
raise errors.HypervisorError("Failed to balloon memory for %s: %s" %
(instance.name, "not running"))
try:
self._MarkUp(instance, mem)
except EnvironmentError, err:
raise errors.HypervisorError("Failed to balloon memory for %s: %s" %
(instance.name, utils.ErrnoOrStr(err)))
def GetNodeInfo(self, hvparams=None):
"""Return information about the node.
See L{BaseHypervisor.GetLinuxNodeInfo}.
"""
result = self.GetLinuxNodeInfo()
# substract running instances
all_instances = self.GetAllInstancesInfo()
result["memory_free"] -= min(result["memory_free"],
sum([row[2] for row in all_instances]))
return result
@classmethod
def GetInstanceConsole(cls, instance, primary_node, node_group,
hvparams, beparams):
"""Return information for connecting to the console of an instance.
"""
return objects.InstanceConsole(instance=instance.name,
kind=constants.CONS_MESSAGE,
message=("Console not available for fake"
" hypervisor"))
def Verify(self, hvparams=None):
"""Verify the hypervisor.
For the fake hypervisor, it just checks the existence of the base
dir.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters to be verified against; not used
for fake hypervisors
@return: Problem description if something is wrong, C{None} otherwise
"""
if os.path.exists(self._ROOT_DIR):
return None
else:
return "The required directory '%s' does not exist" % self._ROOT_DIR
@classmethod
def PowercycleNode(cls, hvparams=None):
"""Fake hypervisor powercycle, just a wrapper over Linux powercycle.
@type hvparams: dict of strings
@param hvparams: hypervisor params to be used on this node
"""
cls.LinuxPowercycle()
def AcceptInstance(self, instance, info, target):
"""Prepare to accept an instance.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type info: string
@param info: instance info, not used
@type target: string
@param target: target host (usually ip), on this node
"""
if self._IsAlive(instance.name):
raise errors.HypervisorError("Can't accept instance, already running")
def MigrateInstance(self, cluster_name, instance, target, live):
"""Migrate an instance.
@type cluster_name: string
@param cluster_name: name of the cluster
@type instance: L{objects.Instance}
@param instance: the instance to be migrated
@type target: string
@param target: hostname (usually ip) of the target node
@type live: boolean
@param live: whether to do a live or non-live migration
"""
logging.debug("Fake hypervisor migrating %s to %s (live=%s)",
instance, target, live)
def FinalizeMigrationDst(self, instance, info, success):
"""Finalize the instance migration on the target node.
For the fake hv, this just marks the instance up.
@type instance: L{objects.Instance}
@param instance: instance whose migration is being finalized
@type info: string/data (opaque)
@param info: migration information, from the source node
@type success: boolean
@param success: whether the migration was a success or a failure
"""
if success:
self._MarkUp(instance, self._InstanceStartupMemory(instance))
else:
# ensure it's down
self._MarkDown(instance.name)
def PostMigrationCleanup(self, instance):
"""Clean-up after a migration.
To be executed on the source node.
@type instance: L{objects.Instance}
@param instance: the instance that was migrated
"""
pass
def FinalizeMigrationSource(self, instance, success, live):
"""Finalize the instance migration on the source node.
@type instance: L{objects.Instance}
@param instance: the instance that was migrated
@type success: bool
@param success: whether the migration succeeded or not
@type live: bool
@param live: whether the user requested a live migration or not
"""
# pylint: disable=W0613
if success:
self._MarkDown(instance.name)
def GetMigrationStatus(self, instance):
"""Get the migration status
The fake hypervisor migration always succeeds.
@type instance: L{objects.Instance}
@param instance: the instance that is being migrated
@rtype: L{objects.MigrationStatus}
@return: the status of the current migration (one of
L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
progress info that can be retrieved from the hypervisor
"""
return objects.MigrationStatus(status=constants.HV_MIGRATION_COMPLETED)
| leshchevds/ganeti | lib/hypervisor/hv_fake.py | Python | bsd-2-clause | 11,785 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mock batch system interface, reading submitted jobs from a history file.
"""
# Copyright (C) 2011, 2012 ETH Zurich and University of Zurich. All rights reserved.
#
# Authors:
# Riccardo Murri <riccardo.murri@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
__docformat__ = 'reStructuredText'
__version__ = '$Revision$'
# stdlib imports
import csv
import time
# local imports
from vmmad import log
from vmmad.orchestrator import JobInfo
class JobsFromFile(object):
"""
Mock batch system interface, replaying submitted jobs info from a CSV file.
"""
def __init__(self, filename, timer=time.time, start_time=-1):
"""
Construct a `JobsFromFile` object.
Read job history from `filename`, which must be a CSV file
with the following format:
- the first line is a header line with the column names;
- column names are all uppercase;
- the number and type of columns is abritrary,
but at least these columns *must* be present:
+ ``JOBID``: a string uniquely identifying the job
+ ``SUBMITTED_AT``: time the job was submitted, as a UNIX epoch
+ ``RUN_DURATION``: duration of the job, in seconds
Only jobs that were submitted after `start_time` are loaded;
if `start_time` is `None`, then all jobs are loaded and the
`.start_time` attribute is set to the first submitted job in
the list.
Passing of time is controlled via the `timer` parameter: this
must be a callable that returns the 'current time' in UNIX
epoch format (just like Pythons `time.time`).
:param str filename: The CSV file to read job history from.
:param timer: A function returning the (possibly simulated) current time in UNIX epoch format.
:param int start_time: Replay start time, as a UNIX epoch.
"""
self.timer = timer
self.time_last_checked = 0
# auto-detect CSV delimiter, etc.
with open(filename, 'r') as input_file:
sample = input_file.read(1024)
input_file.seek(0)
rows = csv.DictReader(input_file,
dialect = csv.Sniffer().sniff(sample))
# load all jobs into memory, ordered by descending
# submission time, so that the last item is the next job
# to be submitted and we can just .pop() it
self.future_jobs = list(
sorted((JobInfo(jobid=row['JOBID'],
state=JobInfo.PENDING,
submitted_at=float(row['SUBMITTED_AT']),
duration=float(row['RUN_DURATION']))
for row in rows if float(row['SUBMITTED_AT']) > start_time),
# sort list of jobs by submission time
cmp=(lambda x,y: cmp(x.submitted_at, y.submitted_at)),
reverse=True))
log.info("Loaded %d jobs from file '%s'", len(self.future_jobs), filename)
assert (len(self.future_jobs) < 2
or (self.future_jobs[0].submitted_at >= self.future_jobs[1].submitted_at))
# if `start_time` has not been set, then use earliest job
# submission time as starting point
if start_time == -1:
self.start_time = self.future_jobs[-1].submitted_at
else:
self.start_time = start_time
# list of jobs currently in the (simulated) batch system
self.jobs = [ ]
def get_sched_info(self):
"""
Return a list of `JobInfo` objects representing the jobs in
the batch queue system.
Each invocation of `get_sched_info` returns the list of jobs
that are either running or submitted to the batch system in
the interval between the 'current time' (as returned by the
`timer` function) and the time of the previous invocation.
"""
now = self.timer()
# add jobs that were submitted since last check
for job in self.next_jobs(self.time_last_checked, now):
self.jobs.append(job)
self.time_last_checked = now
# remove jobs that have terminated since
for i, job in enumerate(self.jobs):
if job.submitted_at + job.duration < now:
del self.jobs[i]
return self.jobs
def next_jobs(self, since, until):
assert until > since
while len(self.future_jobs) > 0:
job = self.future_jobs.pop()
if until >= job.submitted_at:
if job.submitted_at >= since:
yield job
else:
# job is in the past (relative to the Simulator's concept of time) so ignore it
pass
else:
# job is in the future: put it back for next round
self.future_jobs.append(job)
raise StopIteration
| uzh/vm-mad | vmmad/batchsys/replay.py | Python | apache-2.0 | 5,584 |
#
# Copyright 2009-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import collections
import os
import errno
import logging
import glob
import fnmatch
import re
import sd
import storage_exception as se
import fileUtils
import fileVolume
import misc
import outOfProcess as oop
from remoteFileHandler import Timeout
from persistentDict import PersistentDict, DictValidator
from vdsm import constants
from vdsm.utils import stripNewLines
import supervdsm
import mount
REMOTE_PATH = "REMOTE_PATH"
FILE_SD_MD_FIELDS = sd.SD_MD_FIELDS.copy()
# TBD: Do we really need this key?
FILE_SD_MD_FIELDS[REMOTE_PATH] = (str, str)
METADATA_PERMISSIONS = 0o660
# On file domains IDS and LEASES volumes don't need a fixed size (they are
# allocated as we use them)
FILE_SPECIAL_VOLUME_SIZES_MIB = sd.SPECIAL_VOLUME_SIZES_MIB.copy()
FILE_SPECIAL_VOLUME_SIZES_MIB.update({
sd.IDS: 0,
sd.LEASES: 0,
})
# Specific stat(2) block size as defined in the man page
ST_BYTES_PER_BLOCK = 512
_MOUNTLIST_IGNORE = ('/' + sd.BLOCKSD_DIR, '/' + sd.GLUSTERSD_DIR)
getProcPool = oop.getGlobalProcPool
def validateDirAccess(dirPath):
try:
getProcPool().fileUtils.validateAccess(dirPath)
supervdsm.getProxy().validateAccess(
constants.QEMU_PROCESS_USER,
(constants.DISKIMAGE_GROUP, constants.METADATA_GROUP), dirPath,
(os.R_OK | os.X_OK))
except OSError as e:
if e.errno == errno.EACCES:
raise se.StorageServerAccessPermissionError(dirPath)
raise
return True
def validateFileSystemFeatures(sdUUID, mountDir):
try:
# Don't unlink this file, we don't have the cluster lock yet as it
# requires direct IO which is what we are trying to test for. This
# means that unlinking the file might cause a race. Since we don't
# care what the content of the file is, just that we managed to
# open it O_DIRECT.
testFilePath = os.path.join(mountDir, "__DIRECT_IO_TEST__")
oop.getProcessPool(sdUUID).directTouch(testFilePath)
except OSError as e:
if e.errno == errno.EINVAL:
log = logging.getLogger("Storage.fileSD")
log.error("Underlying file system doesn't support"
"direct IO")
raise se.StorageDomainTargetUnsupported()
raise
def getDomUuidFromMetafilePath(metafile):
# Metafile path has pattern:
# /rhev/data-center/mnt/export-path/sdUUID/dom_md/metadata
metaList = os.path.normpath(metafile).split('/')
return metaList[-3]
class FileMetadataRW(object):
"""
FileSDMetadata implements metadata extractor/committer over a simple file
"""
def __init__(self, metafile):
# FileSDMetadata is kept in the file
self._metafile = metafile
self._sdUUID = getDomUuidFromMetafilePath(metafile)
self._oop = oop.getProcessPool(self._sdUUID)
def readlines(self):
try:
return stripNewLines(self._oop.directReadLines(self._metafile))
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
return []
def writelines(self, metadata):
for i, line in enumerate(metadata):
if isinstance(line, unicode):
line = line.encode('utf-8')
metadata[i] = line
metadata = [i + '\n' for i in metadata]
tmpFilePath = self._metafile + ".new"
try:
self._oop.writeLines(tmpFilePath, metadata)
except IOError as e:
if e.errno != errno.ESTALE:
raise
self._oop.writeLines(tmpFilePath, metadata)
self._oop.os.rename(tmpFilePath, self._metafile)
FileSDMetadata = lambda metafile: DictValidator(
PersistentDict(FileMetadataRW(metafile)), FILE_SD_MD_FIELDS)
class FileStorageDomainManifest(sd.StorageDomainManifest):
def __init__(self, domainPath, metadata=None):
# Using glob might look like the simplest thing to do but it isn't
# If one of the mounts is stuck it'll cause the entire glob to fail
# and you wouldn't be able to access any domain
self.log.debug("Reading domain in path %s", domainPath)
self.mountpoint = os.path.dirname(domainPath)
self.remotePath = os.path.basename(self.mountpoint)
self.metafile = os.path.join(domainPath, sd.DOMAIN_META_DATA,
sd.METADATA)
sdUUID = os.path.basename(domainPath)
domaindir = os.path.join(self.mountpoint, sdUUID)
sd.StorageDomainManifest.__init__(self, sdUUID, domaindir)
if metadata is None:
metadata = FileSDMetadata(self.metafile)
self.replaceMetadata(metadata)
if not self.oop.fileUtils.pathExists(self.metafile):
raise se.StorageDomainMetadataNotFound(self.sdUUID, self.metafile)
def getReadDelay(self):
stats = misc.readspeed(self.metafile, 4096)
return stats['seconds']
def getVSize(self, imgUUID, volUUID):
""" Returns file volume size in bytes. """
volPath = os.path.join(self.mountpoint, self.sdUUID, 'images',
imgUUID, volUUID)
return self.oop.os.stat(volPath).st_size
def getLeasesFilePath(self):
return os.path.join(self.getMDPath(), sd.LEASES)
def getIdsFilePath(self):
return os.path.join(self.getMDPath(), sd.IDS)
class FileStorageDomain(sd.StorageDomain):
manifestClass = FileStorageDomainManifest
def __init__(self, domainPath):
manifest = self.manifestClass(domainPath)
# We perform validation here since filesystem features are relevant to
# construction of an actual Storage Domain. Direct users of
# FileStorageDomainManifest should call this explicitly if required.
validateFileSystemFeatures(manifest.sdUUID, manifest.mountpoint)
sd.StorageDomain.__init__(self, manifest)
self.imageGarbageCollector()
self._registerResourceNamespaces()
@property
def supportsSparseness(self):
"""
This property advertises whether the storage domain supports
sparseness or not.
"""
return True
def setMetadataPermissions(self):
procPool = oop.getProcessPool(self.sdUUID)
for metaFile in (sd.LEASES, sd.IDS, sd.INBOX, sd.OUTBOX):
try:
fpath = os.path.join(self.getMDPath(), metaFile)
procPool.os.chmod(fpath, METADATA_PERMISSIONS)
except Exception as e:
raise se.StorageDomainMetadataCreationError(
"Lease permission change file '%s' failed: %s"
% (metaFile, e))
def prepareMailbox(self):
for mailboxFile in (sd.INBOX, sd.OUTBOX):
mailboxByteSize = (FILE_SPECIAL_VOLUME_SIZES_MIB[mailboxFile] *
constants.MEGAB)
mailboxFilePath = os.path.join(self.domaindir,
sd.DOMAIN_META_DATA, mailboxFile)
try:
mailboxStat = self.oop.os.stat(mailboxFilePath)
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
prevMailboxFileSize = None
else:
prevMailboxFileSize = mailboxStat.st_size
if (prevMailboxFileSize is None
or prevMailboxFileSize < mailboxByteSize):
self.log.info('preparing storage domain %s mailbox file %s '
'(%s bytes)', mailboxFile, mailboxByteSize)
self.oop.truncateFile(
mailboxFilePath, mailboxByteSize, METADATA_PERMISSIONS)
@classmethod
def _prepareMetadata(cls, domPath, sdUUID, domainName, domClass,
remotePath, storageType, version):
"""
Prepare all domain's special volumes and metadata
"""
# create domain metadata folder
metadataDir = os.path.join(domPath, sd.DOMAIN_META_DATA)
procPool = oop.getProcessPool(sdUUID)
procPool.fileUtils.createdir(metadataDir, 0o775)
for metaFile, metaSize in FILE_SPECIAL_VOLUME_SIZES_MIB.iteritems():
try:
procPool.truncateFile(
os.path.join(metadataDir, metaFile),
metaSize * constants.MEGAB, METADATA_PERMISSIONS)
except Exception as e:
raise se.StorageDomainMetadataCreationError(
"create meta file '%s' failed: %s" % (metaFile, str(e)))
metaFile = os.path.join(metadataDir, sd.METADATA)
md = FileSDMetadata(metaFile)
# initialize domain metadata content
# FIXME : This is 99% like the metadata in block SD
# Do we really need to keep the EXPORT_PATH?
# no one uses it
md.update({
sd.DMDK_VERSION: version,
sd.DMDK_SDUUID: sdUUID,
sd.DMDK_TYPE: storageType,
sd.DMDK_CLASS: domClass,
sd.DMDK_DESCRIPTION: domainName,
sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
sd.DMDK_POOLS: [],
sd.DMDK_LOCK_POLICY: '',
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC:
sd.DEFAULT_LEASE_PARAMS[sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
sd.DMDK_LEASE_TIME_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LEASE_TIME_SEC],
sd.DMDK_IO_OP_TIMEOUT_SEC:
sd.DEFAULT_LEASE_PARAMS[sd.DMDK_IO_OP_TIMEOUT_SEC],
sd.DMDK_LEASE_RETRIES:
sd.DEFAULT_LEASE_PARAMS[sd.DMDK_LEASE_RETRIES],
REMOTE_PATH: remotePath
})
def getFileList(self, pattern, caseSensitive):
"""
Returns a list of all files in the domain filtered according to
extension.
"""
basedir = self.getIsoDomainImagesDir()
filesList = self.oop.simpleWalk(basedir)
if pattern != '*':
if caseSensitive:
filesList = fnmatch.filter(filesList, pattern)
else:
regex = fnmatch.translate(pattern)
reobj = re.compile(regex, re.IGNORECASE)
filesList = [f for f in filesList if reobj.match(f)]
filesDict = {}
filePrefixLen = len(basedir) + 1
for entry in filesList:
st = self.oop.os.stat(entry)
stats = {'size': str(st.st_size), 'ctime': str(st.st_ctime)}
try:
self.oop.fileUtils.validateQemuReadable(entry)
stats['status'] = 0 # Status OK
except OSError as e:
if e.errno != errno.EACCES:
raise
stats['status'] = se.StorageServerAccessPermissionError.code
fileName = entry[filePrefixLen:]
filesDict[fileName] = stats
return filesDict
def getVolumeClass(self):
"""
Return a type specific volume generator object
"""
return fileVolume.FileVolume
def getVAllocSize(self, imgUUID, volUUID):
""" Returns file volume allocated size in bytes. """
volPath = os.path.join(self.mountpoint, self.sdUUID, 'images',
imgUUID, volUUID)
stat = self.oop.os.stat(volPath)
return stat.st_blocks * ST_BYTES_PER_BLOCK
def getVolumeLease(self, imgUUID, volUUID):
"""
Return the volume lease (leasePath, leaseOffset)
"""
if self.hasVolumeLeases():
vol = self.produceVolume(imgUUID, volUUID)
volumePath = vol.getVolumePath()
leasePath = volumePath + fileVolume.LEASE_FILEEXT
return leasePath, fileVolume.LEASE_FILEOFFSET
return None, None
def validate(self):
"""
Validate that the storage domain is accessible.
"""
self.log.info("sdUUID=%s", self.sdUUID)
self.invalidateMetadata()
if not len(self.getMetadata()):
raise se.StorageDomainAccessError(self.sdUUID)
def validateMasterMount(self):
return self.oop.fileUtils.pathExists(self.getMasterDir())
def getAllImages(self):
"""
Fetch the set of the Image UUIDs in the SD.
"""
# Get Volumes of an image
pattern = os.path.join(self.storage_repository,
# ISO domains don't have images,
# we can assume single domain
self.getPools()[0],
self.sdUUID, sd.DOMAIN_IMAGES)
pattern = os.path.join(pattern, constants.UUID_GLOB_PATTERN)
files = self.oop.glob.glob(pattern)
images = set()
for i in files:
if self.oop.os.path.isdir(i):
images.add(os.path.basename(i))
return images
def getImagePath(self, imgUUID):
return os.path.join(self.domaindir, sd.DOMAIN_IMAGES, imgUUID)
def deleteImage(self, sdUUID, imgUUID, volsImgs):
currImgDir = self.getImagePath(imgUUID)
dirName, baseName = os.path.split(currImgDir)
toDelDir = os.path.join(dirName, sd.REMOVED_IMAGE_PREFIX + baseName)
self.log.debug("Renaming dir %s to %s", currImgDir, toDelDir)
try:
self.oop.os.rename(currImgDir, toDelDir)
except OSError as e:
self.log.error("image: %s can't be moved", currImgDir)
raise se.ImageDeleteError("%s %s" % (imgUUID, str(e)))
for volUUID in volsImgs:
volPath = os.path.join(toDelDir, volUUID)
try:
self.log.debug("Removing file: %s", volPath)
self.oop.os.remove(volPath)
metaFile = volPath + '.meta'
self.log.debug("Removing file: %s", metaFile)
self.oop.os.remove(metaFile)
leaseFile = volPath + '.lease'
self.log.debug("Removing file: %s", leaseFile)
self.oop.os.remove(leaseFile)
except OSError:
self.log.error("vol: %s can't be removed.",
volPath, exc_info=True)
self.log.debug("Removing directory: %s", toDelDir)
try:
self.oop.os.rmdir(toDelDir)
except OSError as e:
self.log.error("removed image dir: %s can't be removed", toDelDir)
raise se.ImageDeleteError("%s %s" % (imgUUID, str(e)))
def zeroImage(self, sdUUID, imgUUID, volsImgs):
self.log.warning("image %s on a fileSD %s won't be zeroed." %
(imgUUID, sdUUID))
self.deleteImage(sdUUID, imgUUID, volsImgs)
def deactivateImage(self, imgUUID):
"""
Deactivate all the volumes belonging to the image.
imgUUID: the image to be deactivated.
"""
pass
def getAllVolumes(self):
"""
Return dict {volUUID: ((imgUUIDs,), parentUUID)} of the domain.
(imgUUIDs,) is a tuple of all the images that contain a certain
volUUID. For non-templates volumes, this tuple consists of a single
image. For template volume it consists of all the images that are
based on the template volume. In that case, the first imgUUID in the
tuple is the self-image of the template.
The parent of a non-template volume cannot be determined in file domain
without reading the metadata. However, in order to have an output
compatible to block domain, we report parent as None.
Template volumes have no parent, and thus we report BLANK_UUID as their
parentUUID.
"""
volMetaPattern = os.path.join(self.mountpoint, self.sdUUID,
sd.DOMAIN_IMAGES, "*", "*.meta")
volMetaPaths = self.oop.glob.glob(volMetaPattern)
# First create mapping from images to volumes
images = collections.defaultdict(list)
for metaPath in volMetaPaths:
head, tail = os.path.split(metaPath)
volUUID, volExt = os.path.splitext(tail)
imgUUID = os.path.basename(head)
images[imgUUID].append(volUUID)
# Using images to volumes mapping, we can create volumes to images
# mapping, detecting template volumes and template images, based on
# these rules:
#
# Template volumes are hard linked in every image directory
# which is derived from that template, therefore:
#
# 1. A template volume which is in use will appear at least twice
# (in the template image dir and in the derived image dir)
#
# 2. Any volume which appears more than once in the dir tree is
# by definition a template volume.
#
# 3. Any image which has more than 1 volume is not a template
# image.
volumes = {}
for imgUUID, volUUIDs in images.iteritems():
for volUUID in volUUIDs:
if volUUID in volumes:
# This must be a template volume (rule 2)
volumes[volUUID]['parent'] = sd.BLANK_UUID
if len(volUUIDs) > 1:
# This image is not a template (rule 3)
volumes[volUUID]['imgs'].append(imgUUID)
else:
# This image is a template (rule 3)
volumes[volUUID]['imgs'].insert(0, imgUUID)
else:
volumes[volUUID] = {'imgs': [imgUUID], 'parent': None}
return dict((k, sd.ImgsPar(tuple(v['imgs']), v['parent']))
for k, v in volumes.iteritems())
def linkBCImage(self, imgPath, imgUUID):
# Nothing to do here other than returning the path
return self.getLinkBCImagePath(imgUUID)
def createImageLinks(self, srcImgPath, imgUUID):
"""
qcow chain is build by reading each qcow header and reading the path
to the parent. When creating the qcow layer, we pass a relative path
which allows us to build a directory with links to all volumes in the
chain anywhere we want. This method creates a directory with the image
uuid under /var/run/vdsm and creates sym links to all the volumes in
the chain.
srcImgPath: Dir where the image volumes are.
"""
sdRunDir = os.path.join(constants.P_VDSM_STORAGE, self.sdUUID)
fileUtils.createdir(sdRunDir)
imgRunDir = os.path.join(sdRunDir, imgUUID)
self.log.debug("Creating symlink from %s to %s", srcImgPath, imgRunDir)
try:
os.symlink(srcImgPath, imgRunDir)
except OSError as e:
if e.errno == errno.EEXIST:
self.log.debug("img run dir already exists: %s", imgRunDir)
else:
self.log.error("Failed to create img run dir: %s", imgRunDir)
raise
return imgRunDir
def activateVolumes(self, imgUUID, volUUIDs):
"""
Activate all the volumes listed in volUUIDs
"""
# Volumes leaves created in 2.2 did not have group writeable bit
# set. We have to set it here if we want qemu-kvm to write to old
# NFS volumes. In theory it is necessary to fix the permission
# of the leaf only but to not introduce an additional requirement
# (ordered volUUIDs) we fix them all.
imgDir = os.path.join(self.mountpoint, self.sdUUID, sd.DOMAIN_IMAGES,
imgUUID)
volPaths = tuple(os.path.join(imgDir, v) for v in volUUIDs)
for volPath in volPaths:
self.log.debug("Fixing permissions on %s", volPath)
self.oop.fileUtils.copyUserModeToGroup(volPath)
return self.createImageLinks(imgDir, imgUUID)
@classmethod
def format(cls, sdUUID):
"""
Format detached storage domain.
This removes all data from the storage domain.
"""
cls.log.info("Formatting domain %s", sdUUID)
try:
domaindir = cls.findDomainPath(sdUUID)
except (se.StorageDomainDoesNotExist):
pass
else:
try:
oop.getProcessPool(sdUUID).fileUtils.cleanupdir(
domaindir, ignoreErrors=False)
except RuntimeError as e:
raise se.MiscDirCleanupFailure(str(e))
return True
def getRemotePath(self):
return self._manifest.remotePath
def getRealPath(self):
"""
Return the actual path to the underlying storage.
This function needs to be overloaded by the child classes.
"""
return ""
def getInfo(self):
"""
Get storage domain info
"""
# self.log.info("sdUUID=%s", self.sdUUID)
# First call parent getInfo() - it fills in all the common details
info = sd.StorageDomain.getInfo(self)
# Now add fileSD specific data
info['remotePath'] = self.getRealPath()
return info
def getStats(self):
"""
Get storage domain statistics
"""
# self.log.info("sdUUID=%s", self.sdUUID)
stats = {'disktotal': '',
'diskfree': '',
'mdavalid': True,
'mdathreshold': True,
'mdasize': 0,
'mdafree': 0}
try:
st = self.oop.os.statvfs(self.domaindir)
stats['disktotal'] = str(st.f_frsize * st.f_blocks)
stats['diskfree'] = str(st.f_frsize * st.f_bavail)
except OSError as e:
self.log.info("sdUUID=%s %s", self.sdUUID, str(e))
if e.errno == errno.ESTALE:
raise se.FileStorageDomainStaleNFSHandle
raise se.StorageDomainAccessError(self.sdUUID)
return stats
def mountMaster(self):
"""
Mount the master metadata file system. Should be called only by SPM.
"""
masterdir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
if not self.oop.fileUtils.pathExists(masterdir):
self.log.debug("Creating master directory: %s", masterdir)
self.oop.os.mkdir(masterdir, 0o755)
def unmountMaster(self):
"""
Unmount the master metadata file system. Should be called only by SPM.
"""
pass
def selftest(self):
"""
Run internal self test
"""
try:
self.oop.os.statvfs(self.domaindir)
except OSError as e:
if e.errno == errno.ESTALE:
# In case it is "Stale NFS handle" we are taking preventive
# measures and unmounting this NFS resource. Chances are
# that is the most intelligent thing we can do in this
# situation anyway.
self.log.debug("Unmounting stale file system %s",
self.mountpoint)
mount.getMountFromTarget(self.mountpoint).umount()
raise se.FileStorageDomainStaleNFSHandle()
raise
def imageGarbageCollector(self):
"""
Image Garbage Collector
remove the remnants of the removed images (they could be left sometimes
(on NFS mostly) due to lazy file removal
"""
removedPattern = os.path.join(self.domaindir, sd.DOMAIN_IMAGES,
sd.REMOVED_IMAGE_PREFIX + '*')
removedImages = self.oop.glob.glob(removedPattern)
self.log.debug("Removing remnants of deleted images %s" %
removedImages)
for imageDir in removedImages:
self.oop.fileUtils.cleanupdir(imageDir)
def templateRelink(self, imgUUID, volUUID):
"""
Relink all hardlinks of the template 'volUUID' in all VMs based on it.
This function assumes that template image is used by other volumes.
"""
allVols = self.getAllVolumes()
tImgs = allVols[volUUID].imgs
if len(tImgs) < 2:
self.log.debug("Volume %s is an unused template or a regular "
"volume. Found in images: %s allVols: %s", volUUID,
tImgs, allVols)
return
templateImage = tImgs[0]
relinkImgs = tuple(tImgs[1:])
repoPath = self._getRepoPath()
basePath = os.path.join(repoPath, self.sdUUID, sd.DOMAIN_IMAGES)
volFiles = [volUUID, volUUID + fileVolume.META_FILEEXT]
if self.hasVolumeLeases():
volFiles.append(volUUID + fileVolume.LEASE_FILEEXT)
for rImg in relinkImgs:
# This function assumes that all relevant images and template
# namespaces are locked.
for volFile in volFiles:
tLink = os.path.join(basePath, rImg, volFile)
tVol = os.path.join(basePath, templateImage, volFile)
self.log.debug("Force linking %s to %s", tVol, tLink)
self.oop.utils.forceLink(tVol, tLink)
def _getMountsList(pattern="*"):
fileDomPattern = os.path.join(
sd.StorageDomain.storage_repository, sd.DOMAIN_MNT_POINT,
pattern)
# For pattern='*' in mixed pool (block and file domains)
# glob will return sd.BLOCKSD_DIR and sd.GLUSTERSD_DIR among
# real mount points. Remove these directories from glob results.
mntList = [mnt for mnt in glob.iglob(fileDomPattern)
if not mnt.endswith(_MOUNTLIST_IGNORE)]
glusterDomPattern = os.path.join(
sd.StorageDomain.storage_repository, sd.DOMAIN_MNT_POINT,
sd.GLUSTERSD_DIR, pattern)
mntList.extend(glob.glob(glusterDomPattern))
return mntList
def scanDomains(pattern="*"):
log = logging.getLogger("Storage.scanDomains")
mntList = _getMountsList(pattern)
def collectMetaFiles(possibleDomain):
try:
metaFiles = oop.getProcessPool(possibleDomain).glob.glob(
os.path.join(possibleDomain,
constants.UUID_GLOB_PATTERN,
sd.DOMAIN_META_DATA))
for metaFile in metaFiles:
if (os.path.basename(os.path.dirname(metaFile)) !=
sd.MASTER_FS_DIR):
sdUUID = os.path.basename(os.path.dirname(metaFile))
return (sdUUID, os.path.dirname(metaFile))
except Timeout:
log.warn("Metadata collection for domain path %s timedout",
possibleDomain, exc_info=True)
except Exception:
log.warn("Could not collect metadata file for domain path %s",
possibleDomain, exc_info=True)
# Run collectMetaFiles in extenral processes.
# The amount of processes that can be initiated in the same time is the
# amount of stuck domains we are willing to handle +1.
# We Use 30% of the available slots.
# TODO: calculate it right, now we use same value of max process per
# domain.
for res in misc.itmap(collectMetaFiles, mntList, oop.HELPERS_PER_DOMAIN):
if res is None:
continue
yield res
def getStorageDomainsList():
return [item[0] for item in scanDomains()]
| kvaps/vdsm | vdsm/storage/fileSD.py | Python | gpl-2.0 | 28,044 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.