code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import unittest
import uuid
from mock import patch, Mock
import omnijson as json
import requests
from brockman.client import FocusLabClient
from brockman.exceptions import (BadAPIKey, BadRequest, ResourceNotFound,
ServerError, UnknownError)
class ClientTests(unittest.TestCase):
def get_client(self, **kwargs):
client_kwargs = {
'api_key': 'testing-key'
}
client_kwargs.update(kwargs)
return FocusLabClient(**client_kwargs)
def test_simple_init(self):
key = 'testing-key'
endpoint = 'https://api.focuslab.io/api/v1/'
client = FocusLabClient(api_key=key)
self.assertEquals(key, client.api_key)
self.assertEquals(endpoint, client.endpoint)
def test_full_init(self):
key = 'testing-key'
endpoint = 'https://api.focuslab-dev.io/api/v1/'
client = FocusLabClient(api_key=key, endpoint=endpoint)
self.assertEquals(key, client.api_key)
self.assertEquals(endpoint, client.endpoint)
def test_get_simple_url(self):
client = self.get_client()
resource = 'trigger'
result = client.get_url(resource)
expected = 'https://api.focuslab.io/api/v1/trigger/'
self.assertEquals(expected, result)
def test_get_item_url(self):
client = self.get_client()
resource = 'trigger/32'
result = client.get_url(resource)
expected = 'https://api.focuslab.io/api/v1/trigger/32/'
self.assertEquals(expected, result)
def test_get_basic_data(self):
client = self.get_client()
action = 'viewed'
obj = 'blog post'
actor_id = uuid.uuid4()
result = client.get_trigger_data(action=action, obj=obj, actor_id=actor_id)
expected = {
'action': unicode(action),
'object': unicode(obj),
'actor_id': unicode(actor_id)
}
self.assertEquals(expected, result)
def test_get_full_data(self):
client = self.get_client()
action = 'viewed'
obj = 'blog post'
actor_id = uuid.uuid4()
identities = {
'email': ['test1@test.com', 'test2@test.com'],
'user_id': 42,
}
attributes = {
'tags': ['high-risk', 'big-spender'],
'plan': 'basic',
}
variables = {
'tags': ['sales', 'promo'],
'author': 'bob',
}
result = client.get_trigger_data(
action=action,
obj=obj,
actor_id=actor_id,
identities=identities,
attributes=attributes,
variables=variables,
)
expected = {
'action': unicode(action),
'object': unicode(obj),
'actor_id': unicode(actor_id),
'captured_identities': identities,
'captured_attributes': attributes,
'variables': variables,
}
self.assertEquals(expected, result)
def test_get_headers(self):
client = self.get_client()
result = client.get_headers()
expected = {
'Content-Type': 'application/json',
'X-FL-API-KEY': str(client.api_key),
}
self.assertEquals(expected, result)
def record_trigger(self, client, post_mock, response_status=201, check_post_call=True):
action = 'viewed'
obj = 'blog post'
actor_id = uuid.uuid4()
identities = {
'email': ['test1@test.com', 'test2@test.com'],
'user_id': 42,
}
attributes = {
'tags': ['high-risk', 'big-spender'],
'plan': 'basic',
}
variables = {
'tags': ['sales', 'promo'],
}
response_mock = Mock()
response_mock.status_code = response_status
post_mock.return_value = response_mock
client.record_trigger(
actor_id=actor_id,
action=action,
obj=obj,
identities=identities,
attributes=attributes,
variables=variables,
)
if check_post_call:
expected_post_data = json.dumps({
'action': action,
'object': obj,
'actor_id': str(actor_id),
'captured_identities': identities,
'captured_attributes': attributes,
'variables': variables,
})
expected_headers = {
'Content-Type': 'application/json',
'X-FL-API-KEY': str(client.api_key),
}
post_mock.assertCalledWith(
'https://api.focuslab.io/api/v1/triggers/',
data=expected_post_data,
headers=expected_headers,
)
@patch.object(requests, 'post')
def test_record_trigger(self, post_mock):
client = self.get_client()
self.record_trigger(client, post_mock)
@patch.object(requests, 'post')
def test_record_trigger_401(self, post_mock):
client = self.get_client()
with self.assertRaises(BadAPIKey):
self.record_trigger(
client,
post_mock,
response_status=401,
check_post_call=False,
)
@patch.object(requests, 'post')
def test_record_trigger_403(self, post_mock):
client = self.get_client()
with self.assertRaises(BadAPIKey):
self.record_trigger(
client,
post_mock,
response_status=403,
check_post_call=False,
)
@patch.object(requests, 'post')
def test_record_trigger_400(self, post_mock):
client = self.get_client()
with self.assertRaises(BadRequest):
self.record_trigger(
client,
post_mock,
response_status=400,
check_post_call=False,
)
@patch.object(requests, 'post')
def test_record_trigger_404(self, post_mock):
client = self.get_client()
with self.assertRaises(ResourceNotFound):
self.record_trigger(
client,
post_mock,
response_status=404,
check_post_call=False,
)
@patch.object(requests, 'post')
def test_record_trigger_500(self, post_mock):
client = self.get_client()
with self.assertRaises(ServerError):
self.record_trigger(
client,
post_mock,
response_status=500,
check_post_call=False,
)
@patch.object(requests, 'post')
def test_record_trigger_unkown_error(self, post_mock):
client = self.get_client()
with self.assertRaises(UnknownError):
self.record_trigger(
client,
post_mock,
response_status=417,
check_post_call=False,
)
| FocusLab/brockman | tests/client.py | Python | bsd-3-clause | 7,040 |
from bitboard import *
from ldata import *
from pychess.Utils.const import *
#
# Caveat: Many functions in this module has very similar code. If you fix a
# bug, or write a perforance enchace, please update all functions. Apologies
# for the inconvenience
#
def isAttacked (board, cord, color):
""" To determine if cord is attacked by any pieces from color. """
pboards = board.boards[color]
# Knights
if pboards[KNIGHT] & moveArray[KNIGHT][cord]:
return True
rayto = fromToRay[cord]
blocker = board.blocker
# Bishops & Queens
bisque = (pboards[BISHOP] | pboards[QUEEN]) & moveArray[BISHOP][cord]
others = ~bisque & blocker
for t in iterBits(bisque):
# If there is a path and no other piece stand in our way
ray = rayto[t]
if ray and not ray & others:
return True
# Rooks & Queens
rooque = (pboards[ROOK] | pboards[QUEEN]) & moveArray[ROOK][cord]
others = ~rooque & blocker
for t in iterBits(rooque):
# If there is a path and no other piece stand in our way
ray = rayto[t]
if ray and not ray & others:
return True
# Pawns
# Would a pawn of the opposite color, standing at out kings cord, be able
# to attack any of our pawns?
ptype = color == WHITE and BPAWN or PAWN
if pboards[PAWN] & moveArray[ptype][cord]:
return True
# King
if pboards[KING] & moveArray[KING][cord]:
return True
return False
def getAttacks (board, cord, color):
""" To create a bitboard of pieces of color, which attacks cord """
pieces = board.boards[color]
# Knights
bits = pieces[KNIGHT] & moveArray[KNIGHT][cord]
# Kings
bits |= pieces[KING] & moveArray[KING][cord]
# Pawns
bits |= pieces[PAWN] & moveArray[color == WHITE and BPAWN or PAWN][cord]
rayto = fromToRay[cord]
blocker = board.blocker
# Bishops and Queens
bisque = (pieces[BISHOP] | pieces[QUEEN]) & moveArray[BISHOP][cord]
for c in iterBits(bisque):
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
# Rooks and queens
rooque = (pieces[ROOK] | pieces[QUEEN]) & moveArray[ROOK][cord]
for c in iterBits(rooque):
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
return bits
def getPieceMoves (board, cord, color, piece):
""" To create a bitboard of specified pieces of color, which can move to
cord """
color = board.color
pieces = board.boards[color]
if piece == KNIGHT or piece == KING:
return pieces[piece] & moveArray[piece][cord]
rayto = fromToRay[cord]
blocker = board.blocker
if sliders[piece]:
cords = pieces[piece] & moveArray[piece][cord]
bits = 0
for c in iterBits(cords):
ray = rayto[c]
if ray and not clearBit(ray & blocker, c):
bits |= bitPosArray[c]
return bits
if piece == PAWN:
pawns = pieces[PAWN]
bits = pawns & moveArray[color == WHITE and BPAWN or PAWN][cord]
bits |= pawns & bitPosArray[cord + (color == WHITE and -8 or 8)]
if not blocker & bitPosArray[cord + (color == WHITE and -8 or 8)]:
bits |= pawns & rankBits[color == WHITE and 1 or 6]
return bits
def pinnedOnKing (board, cord, color):
# Determine if the piece on cord is pinned against its colors king.
# In chess, a pin is a situation in which a piece is forced to stay put
# because moving it would expose a more valuable piece behind it to
# capture.
# Caveat: pinnedOnKing should only be called by genCheckEvasions().
kingCord = board.kings[color]
dir = directions[kingCord][cord]
if dir == -1: return False
opcolor = 1 - color
blocker = board.blocker
# Path from piece to king is blocked, so no pin
if clearBit(fromToRay[kingCord][cord], cord) & blocker:
return False
b = (rays[kingCord][dir] ^ fromToRay[kingCord][cord]) & blocker
if not b: return False
cord1 = cord > kingCord and firstBit (b) or lastBit (b)
# If diagonal
if dir <= 3 and bitPosArray[cord1] & \
(board.boards[opcolor][QUEEN] | board.boards[opcolor][BISHOP]):
return True
# Rank / file
if dir >= 4 and bitPosArray[cord1] & \
(board.boards[opcolor][QUEEN] | board.boards[opcolor][ROOK]):
return True
return False
def staticExchangeEvaluate (board, moveOrTcord, color=None):
""" The GnuChess Static Exchange Evaluator (or SEE for short).
First determine the target square. Create a bitboard of all squares
attacking the target square for both sides. Using these 2 bitboards,
we take turn making captures from smallest piece to largest piece.
When a sliding piece makes a capture, we check behind it to see if
another attacker piece has been exposed. If so, add this to the bitboard
as well. When performing the "captures", we stop if one side is ahead
and doesn't need to capture, a form of pseudo-minimaxing. """
#
# Notice: If you use the tcord version, the color is the color attacked, and
# the color to witch the score is relative.
#
swaplist = [0]
if color == None:
move = moveOrTcord
flag = move >> 12
fcord = (move >> 6) & 63
tcord = move & 63
color = board.friends[BLACK] & bitPosArray[fcord] and BLACK or WHITE
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
ours = getAttacks (board, tcord, color)
ours = clearBit (ours, fcord)
theirs = getAttacks (board, tcord, opcolor)
if xray[board.arBoard[fcord]]:
ours, theirs = addXrayPiece (board, tcord, fcord, color, ours, theirs)
if flag in PROMOTIONS:
swaplist = [PIECE_VALUES[flag-3] - PAWN_VALUE]
lastval = -PIECE_VALUES[flag-3]
else:
if flag == ENPASSANT:
swaplist = [PAWN_VALUE]
else: swaplist = [PIECE_VALUES[board.arBoard[tcord]]]
lastval = -PIECE_VALUES[board.arBoard[fcord]]
else:
tcord = moveOrTcord
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
ours = getAttacks (board, tcord, color)
theirs = getAttacks (board, tcord, opcolor)
lastval = -PIECE_VALUES[board.arBoard[tcord]]
while theirs:
for piece in range(PAWN, KING+1):
r = theirs & opboards[piece]
if r:
cord = firstBit(r)
theirs = clearBit(theirs, cord)
if xray[piece]:
ours, theirs = addXrayPiece (board, tcord, cord,
color, ours, theirs)
swaplist.append(swaplist[-1] + lastval)
lastval = PIECE_VALUES[piece]
break
if not ours:
break
for piece in range(PAWN, KING+1):
r = ours & boards[piece]
if r:
cord = firstBit(r)
ours = clearBit(ours, cord)
if xray[piece]:
ours, theirs = addXrayPiece (board, tcord, cord,
color, ours, theirs)
swaplist.append(swaplist[-1] + lastval)
lastval = -PIECE_VALUES[piece]
break
# At this stage, we have the swap scores in a list. We just need to
# mini-max the scores from the bottom up to the top of the list.
for n in xrange(len(swaplist)-1, 0, -1):
if n & 1:
if swaplist[n] <= swaplist[n-1]:
swaplist[n-1] = swaplist[n]
else:
if swaplist[n] >= swaplist[n-1]:
swaplist[n-1] = swaplist[n]
return swaplist[0]
xray = (False, True, False, True, True, True, False)
def addXrayPiece (board, tcord, fcord, color, ours, theirs):
""" This is used by swapOff.
The purpose of this routine is to find a piece which attack through
another piece (e.g. two rooks, Q+B, B+P, etc.) Color is the side attacking
the square where the swapping is to be done. """
dir = directions[tcord][fcord]
a = rays[fcord][dir] & board.blocker
if not a: return ours, theirs
if tcord < fcord:
ncord = firstBit(a)
else: ncord = lastBit(a)
piece = board.arBoard[ncord]
if piece == QUEEN or (piece == ROOK and dir > 3) or \
(piece == BISHOP and dir < 4):
bit = bitPosArray[ncord]
if bit & board.friends[color]:
ours |= bit
else:
theirs |= bit
return ours, theirs
def defends (board, fcord, tcord):
""" Could fcord attack tcord if the piece on tcord wasn't on the team of
fcord?
Doesn't test check. """
# Work on a board copy, as we are going to change some stuff
board = board.clone()
if board.friends[WHITE] & bitPosArray[fcord]:
color = WHITE
else: color = BLACK
opcolor = 1-color
boards = board.boards[color]
opboards = board.boards[opcolor]
# To see if we now defend the piece, we have to "give" it to the other team
piece = board.arBoard[tcord]
#backup = boards[piece]
#opbackup = opboards[piece]
boards[piece] &= notBitPosArray[tcord]
opboards[piece] |= bitPosArray[tcord]
board.friends[color] &= notBitPosArray[tcord]
board.friends[opcolor] |= bitPosArray[tcord]
# Can we "attack" the piece now?
backupColor = board.color
board.setColor(color)
from lmove import newMove
from validator import validateMove
islegal = validateMove (board, newMove(fcord, tcord))
board.setColor(backupColor)
# We don't need to set the board back, as we work on a copy
#boards[piece] = backup
#opboards[piece] = opbackup
#board.friends[color] |= bitPosArray[tcord]
#board.friends[opcolor] &= notBitPosArray[tcord]
return islegal
| jskurka/PyChess-Learning-Module | lib/pychess/Utils/lutils/attack.py | Python | gpl-3.0 | 10,373 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteEnvironment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Environments_DeleteEnvironment_async]
from google.cloud import dialogflow_v2
async def sample_delete_environment():
# Create a client
client = dialogflow_v2.EnvironmentsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteEnvironmentRequest(
name="name_value",
)
# Make the request
await client.delete_environment(request=request)
# [END dialogflow_v2_generated_Environments_DeleteEnvironment_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_environments_delete_environment_async.py | Python | apache-2.0 | 1,441 |
# -*- coding: utf-8 -*-
from robot.libraries import BuiltIn
from keywordgroup import KeywordGroup
BUILTIN = BuiltIn.BuiltIn()
class _RunOnFailureKeywords(KeywordGroup):
def __init__(self):
self._run_on_failure_keyword = None
self._running_on_failure_routine = False
# Public
def register_keyword_to_run_on_failure(self, keyword):
"""Sets the keyword to execute when a AppiumLibrary keyword fails.
`keyword_name` is the name of a keyword (from any available
libraries) that will be executed if a AppiumLibrary keyword fails.
It is not possible to use a keyword that requires arguments.
Using the value "Nothing" will disable this feature altogether.
The initial keyword to use is set in `importing`, and the
keyword that is used by default is `Capture Page Screenshot`.
Taking a screenshot when something failed is a very useful
feature, but notice that it can slow down the execution.
This keyword returns the name of the previously registered
failure keyword. It can be used to restore the original
value later.
Example:
| Register Keyword To Run On Failure | Log Source | # Run `Log Source` on failure. |
| ${previous kw}= | Register Keyword To Run On Failure | Nothing | # Disables run-on-failure functionality and stores the previous kw name in a variable. |
| Register Keyword To Run On Failure | ${previous kw} | # Restore to the previous keyword. |
This run-on-failure functionality only works when running tests on Python/Jython 2.4
or newer and it does not work on IronPython at all.
"""
old_keyword = self._run_on_failure_keyword
old_keyword_text = old_keyword if old_keyword is not None else "No keyword"
new_keyword = keyword if keyword.strip().lower() != "nothing" else None
new_keyword_text = new_keyword if new_keyword is not None else "No keyword"
self._run_on_failure_keyword = new_keyword
self._info('%s will be run on failure.' % new_keyword_text)
return old_keyword_text
# Private
def _run_on_failure(self):
if self._run_on_failure_keyword is None:
return
if self._running_on_failure_routine:
return
self._running_on_failure_routine = True
try:
BUILTIN.run_keyword(self._run_on_failure_keyword)
except Exception, err:
self._run_on_failure_error(err)
finally:
self._running_on_failure_routine = False
def _run_on_failure_error(self, err):
err = "Keyword '%s' could not be run on failure: %s" % (self._run_on_failure_keyword, err)
if hasattr(self, '_warn'):
self._warn(err)
return
raise Exception(err)
| michaelmendoza42/robotframework-appiumlibrary | src/AppiumLibrary/keywords/_runonfailure.py | Python | apache-2.0 | 2,924 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2021: SCS Software
from collections import OrderedDict
from io_scs_tools.exp.pim.piece_stream import Stream
from io_scs_tools.internals.structure import SectionData as _SectionData
from io_scs_tools.utils.printout import lprint
class Piece:
__index = 0
__vertex_count = 0
__triangle_count = 0
__stream_count = 0
__material = None # save whole material reference to get index out of it when packing
__streams = OrderedDict() # dict of Stream class
__triangles = [] # list of Triangle class
__vertices_hash = {}
__global_piece_count = 0
__global_vertex_count = 0
__global_triangle_count = 0
@staticmethod
def reset_counters():
Piece.__global_piece_count = 0
Piece.__global_triangle_count = 0
Piece.__global_vertex_count = 0
@staticmethod
def get_global_piece_count():
return Piece.__global_piece_count
@staticmethod
def get_global_vertex_count():
return Piece.__global_vertex_count
@staticmethod
def get_global_triangle_count():
return Piece.__global_triangle_count
@staticmethod
def __calc_vertex_hash(index, uvs, rgba, tangent):
"""Calculates vertex hash from original vertex index, uvs components and vertex color.
:param index: original index from Blender mesh
:type index: int
:param uvs: list of uvs used on vertex (each uv must be in SCS coordinates)
:type uvs: list of (tuple | mathutils.Vector)
:param rgba: rgba representation of vertex color in SCS values
:type rgba: tuple | mathutils.Color
:param tangent: vertex tangent in SCS coordinates or none, if piece doesn't have tangents
:type tangent: tuple | None
:return: calculated vertex hash
:rtype: str
"""
vertex_hash = str(index)
for uv in uvs:
vertex_hash = "%s%.4f%.4f" % (vertex_hash, uv[0], uv[1])
vertex_hash = "%s%.4f%.4f%.4f%.4f" % (vertex_hash, rgba[0], rgba[1], rgba[2], rgba[3])
if tangent:
vertex_hash = "%s%.4f%.4f%.4f%.4f" % (vertex_hash, tangent[0], tangent[1], tangent[2], tangent[3])
return vertex_hash
def __init__(self, index, material):
"""Constructs empty piece.
NOTE: empty piece will contain for mandatory stream which will be empty: POSITION, NORMAL, UV0, RGBA
:param index:
:type index:
:param material: material that should be used on for this piece
:type material: io_scs_tools.exp.pim.material.Material
"""
self.__vertex_count = 0
self.__triangle_count = 0
self.__stream_count = 0
self.__streams = OrderedDict()
self.__triangles = []
self.__vertices_hash = {}
self.__index = index
self.__material = material
# CONSTRUCT ALL MANDATORY STREAMS
stream = Stream(Stream.Types.POSITION, -1)
self.__streams[Stream.Types.POSITION] = stream
stream = Stream(Stream.Types.NORMAL, -1)
self.__streams[Stream.Types.NORMAL] = stream
Piece.__global_piece_count += 1
def add_triangle(self, triangle):
"""Adds new triangle to piece
NOTE: if length of given triangle iterable is different than 3 it will be refused!
:param triangle: tuple of 3 integers representing vertex indices
:type triangle: tuple
:return: True if added; False otherwise
:rtype:
"""
# expensive safety checks not needed for release but keep them for debug purposes
# if len(triangle) != 3:
# return False
# check indecies integrity
for vertex in triangle:
if vertex < 0 or vertex >= self.__vertex_count:
return False
self.__triangles.append(tuple(triangle))
Piece.__global_triangle_count += 1
return True
def add_vertex(self, vert_index, position, normal, uvs, uvs_aliases, rgba, tangent):
"""Adds new vertex to position and normal streams
:param vert_index: original vertex index from Blender mesh
:type vert_index: int | str
:param position: vector or tuple of vertex position in SCS coordinates
:type position: tuple | mathutils.Vector
:param normal: vector or tuple of vertex normal in SCS coordinates
:type normal: tuple | mathutils.Vector
:param uvs: list of uvs used on vertex (each uv must be in SCS coordinates)
:type uvs: list of (tuple | mathutils.Vector)
:param uvs_aliases: list of uv aliases names per uv layer
:type uvs_aliases: list[list[str]]
:param rgba: rgba representation of vertex color in SCS values
:type rgba: tuple | mathutils.Color
:param tangent: tuple representation of vertex tangent in SCS values or None if piece doesn't have tangents
:type tangent: tuple | None
:return: vertex index inside piece streams ( use it for adding triangles )
:rtype: int
"""
vertex_hash = self.__calc_vertex_hash(vert_index, uvs, rgba, tangent)
# save vertex if the vertex with the same properties doesn't exists yet in streams
if vertex_hash not in self.__vertices_hash:
stream = self.__streams[Stream.Types.POSITION]
stream.add_entry(position)
stream = self.__streams[Stream.Types.NORMAL]
stream.add_entry(normal)
for i, uv in enumerate(uvs):
uv_type = Stream.Types.UV + str(i)
# create more uv streams on demand
if uv_type not in self.__streams:
self.__streams[uv_type] = Stream(Stream.Types.UV, i)
stream = self.__streams[uv_type]
""":type: Stream"""
stream.add_entry(uv)
for alias in uvs_aliases[i]:
stream.add_alias(alias)
if tangent:
# create tangent stream on demand
if Stream.Types.TANGENT not in self.__streams:
self.__streams[Stream.Types.TANGENT] = Stream(Stream.Types.TANGENT, -1)
stream = self.__streams[Stream.Types.TANGENT]
stream.add_entry(tangent)
if Stream.Types.RGBA not in self.__streams:
self.__streams[Stream.Types.RGBA] = Stream(Stream.Types.RGBA, -1)
stream = self.__streams[Stream.Types.RGBA]
stream.add_entry(rgba)
vert_index_internal = stream.get_size() - 1 # streams has to be alligned so I can take last one for the index
self.__vertices_hash[vertex_hash] = vert_index_internal
self.__vertex_count = vert_index_internal + 1
Piece.__global_vertex_count += 1
return self.__vertices_hash[vertex_hash]
def get_index(self):
return self.__index
def get_vertex_count(self):
return self.__streams[Stream.Types.POSITION].get_size()
def get_as_section(self):
"""Gets piece represented with SectionData structure class.
:return: packed piece as section data
:rtype: io_scs_tools.internals.structure.SectionData
"""
# UPDATE COUNTERS
self.__vertex_count = self.__streams[Stream.Types.POSITION].get_size()
self.__triangle_count = len(self.__triangles)
self.__stream_count = len(self.__streams)
section = _SectionData("Piece")
section.props.append(("Index", self.__index))
if not self.__material or self.__material.get_index() == -1:
lprint("W Piece with index %s doesn't have data about material, expect errors in game!", (self.__index,))
section.props.append(("Material", -1))
else:
section.props.append(("Material", self.__material.get_index()))
section.props.append(("VertexCount", self.__vertex_count))
section.props.append(("TriangleCount", self.__triangle_count))
section.props.append(("StreamCount", self.__stream_count))
stream_size = None
for stream_tag in self.__streams:
stream = self.__streams[stream_tag]
# CHECK SYNC OF STREAMS
if not stream_size:
stream_size = stream.get_size()
elif stream_size != stream.get_size():
lprint("W Piece with index %s has desynced stream sizes, expect errors in game!", (self.__index,))
break
# APPEND STREAMS
section.sections.append(stream.get_as_section())
# APPEND TRIANGLES
triangle_section = _SectionData("Triangles")
for triangle in self.__triangles:
triangle_section.data.append(triangle)
section.sections.append(triangle_section)
return section
| SCSSoftware/BlenderTools | addon/io_scs_tools/exp/pim/piece.py | Python | gpl-2.0 | 9,608 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
import sys
from ..metadata import generic, helpers, kodi, kodi_12plus, mede8er, media_browser, ps3, tivo, wdtv
__all__ = ['generic', 'helpers', 'kodi', 'kodi_12plus', 'media_browser', 'ps3', 'wdtv', 'tivo', 'mede8er']
def available_generators():
return [x for x in __all__ if x not in ['generic', 'helpers']]
def _getMetadataModule(name):
name = name.lower()
prefix = __name__ + '.'
if name in available_generators() and prefix + name in sys.modules:
return sys.modules[prefix + name]
else:
return None
def _getMetadataClass(name):
module = _getMetadataModule(name)
if not module:
return None
return module.metadata_class()
def get_metadata_generator_dict():
result = {}
for cur_generator_id in available_generators():
cur_generator = _getMetadataClass(cur_generator_id)
if not cur_generator:
continue
result[cur_generator.name] = cur_generator
return result
| FireBladeNooT/Medusa_1_6 | medusa/metadata/__init__.py | Python | gpl-3.0 | 1,680 |
from b import C
class D(C):
def <warning descr="'b.C.method' is marked as '@final' and should not be overridden">method</warning>(self):
pass | siosio/intellij-community | python/testData/inspections/PyFinalInspection/OverridingFinalMethod/a.py | Python | apache-2.0 | 153 |
from django.db import models
import simplejson
from time import localtime, strftime
from datetime import datetime, timedelta
import re
from django_thermostat.utils import gen_comparing_time
from django_thermometer.temperature import read_temperatures
#from django_thermostat.mappings import get_mappings
from django.db.models import Avg
import logging
from django.conf import settings
logger = logging.getLogger("thermostat.models")
logger.setLevel(settings.LOG_LEVEL)
class Thermometer(models.Model):
tid = models.CharField(max_length=30, unique=True)
caption = models.CharField(max_length=30, null=True, blank=True, unique=True)
is_internal_reference = models.NullBooleanField(unique=True)
is_external_reference = models.NullBooleanField(unique=True)
def __unicode__(self):
return u"%s" % self.caption if self.caption is not None else self.tid
def read(self, ):
return read_temperatures(self.tid)[self.tid]["celsius"]
class Context(models.Model):
confort_temperature = models.DecimalField(default=22, decimal_places=2, max_digits=4)
economic_temperature = models.DecimalField(default=18, decimal_places=2, max_digits=4)
tuned_temperature = models.DecimalField(null=True, blank=True, decimal_places=2, max_digits=4)
heat_on = models.BooleanField(default=False)
manual = models.BooleanField(default=True)
flame = models.BooleanField(default=False)
def to_json(self, ):
return simplejson.dumps({
"status": "ON" if self.heat_on else "OFF",
"confort": self.confort_temperature,
"economic": self.economic_temperature,
"manual": self.manual,
"flame": self.flame,
"tuned": self.tuned_temperature,
"time": "%s" % strftime("%H:%M:%S", localtime()),
})
class Day(models.Model):
name = models.CharField(max_length=10)
value = models.CharField(max_length=3)
def __unicode__(self, ):
return u"%s" % self.name
def to_pypelib(self, ):
return u"(current_day_of_week = %s)" % self.value
class TimeRange(models.Model):
start = models.TimeField()
end = models.TimeField()
def __unicode__(self, ):
return u"%s - %s" % (self.start, self.end)
def start_to_comparing(self, ):
return u"%s" % gen_comparing_time(
self.start.hour,
self.start.minute,
self.start.second,
)
def end_to_comparing(self, ):
return u"%s" % gen_comparing_time(
self.end.hour,
self.end.minute,
self.end.second,
)
def to_pypelib(self, ):
#((current_time > %f) && (current_time < %f))
return u"((current_time > %s ) && (current_time < %s))" % (
self.start_to_comparing(),
self.end_to_comparing())
TEMP_CHOICES = (
("None", "No hacer nada"),
("luz_pasillo_off", "Apagar luz del pasillo"),
("luz_pasillo_on", "Encender luz del pasillo"),
("luz_descansillo_off", "Apagar luz del descansillo"),
("luz_descansillo_on", "Encender luz del descansillo"),
("tune_to_confort", "Confort"),
("tune_to_economic", "Economic"),
("salon_on", "Subir salon"),
("salon_off", "Bajar salon"),
("pasillo_off", "Bajar pasillo"),
("pasillo_on", "Subir pasillo"),
("cuarto_oeste_off", "Bajar cuarto oeste"),
("cuarto_oeste_on", "Subir cuarto oeste"),
("cuarto_este_off", "Bajar cuarto este"),
("cuarto_este_on", "Subir cuarto este"),
("a_lights_on", "Subir grupo A"),
("a_lights_off", "Bajar grupo A"),
("set_heater_off", "Apagar caldera"),
("set_heater_on", "Encender caldera"),
("tune_economic_to_19", "Economica a 19"),
("tune_economic_to_18", "Economica a 18"),
)
COND_CHOICES = (
("is_at_night", "Is at night"),
("current_external_temperature", "Current external temperature"),
("current_internal_temperature", "Current internal temperature"),
)
OPERATOR_CHOICES = (
("=", "="),
("<", "<"),
(">", ">"),
(">=", ">="),
("<=", "<="),
)
class Conditional(models.Model):
statement = models.CharField(max_length=60,choices=COND_CHOICES)
operator = models.CharField(max_length=2,choices=OPERATOR_CHOICES)
statement2 = models.CharField(max_length=60, choices=COND_CHOICES, null=True, blank=True)
value = models.CharField(max_length=10, null=True, blank=True)
ocurred = models.BooleanField(default=False)
def __unicode__(self):
return self.to_pypelib()
def to_pypelib(self):
# from django_thermostat.mappings import get_mappings
# if not self.statement2 is None:
# self.value = get_mappings()[self.statement2]()
return u"(%s %s %s)" % (
self.statement,
self.operator,
self.value if self.statement2 is None else self.statement2)
def save(self):
if self.statement2 is None and self.value == "":
raise AttributeError("Either statment2 or value must not be none")
super(Conditional, self).save()
class Rule(models.Model):
days = models.ManyToManyField(Day, null=True, blank=True)
ranges = models.ManyToManyField(TimeRange, null=True, blank=True)
conditionals = models.ManyToManyField(Conditional, null=True, blank=True)
action = models.CharField(max_length=25, choices=TEMP_CHOICES, null=True, blank=True)
active = models.BooleanField(default=True)
thermostat = models.BooleanField(default=False)
def __unicode__(self, ):
return "[%s] therm: %s; days: %s; time ranges: %s; conditionals: %s; action: %s" % (
self.active,
self.thermostat,
self.days.all(),
self.ranges.all(),
self.conditionals.all(),
self.action,
)
def to_pypelib(self, ):
if self.thermostat:
out = "if (heater_manual = 0 ) && "
else:
out = "if "
days = self.days.all()
ranges = self.ranges.all()
conds = self.conditionals.all()
if days.count():
out = "%s (" % out
for day in days:
out = "%s %s || " % (out, day.to_pypelib())
out = re.sub("\|\|\s$", " ) &&", out)
if ranges.count():
out = "%s (" % out
for trang in ranges.all():
out = "%s %s || " % (out, trang.to_pypelib())
out = re.sub("\|\|\s$", ") &&", out)
if conds.count():
out = "%s (" % out
for c in conds:
out = "%s %s || " % (out, c.to_pypelib())
out = re.sub("\|\|\s$", ") &&", out)
out = re.sub("&&$", "", out)
if ranges.count() == 0 and days.count() == 0 and conds.count() == 0:
out = "%s 1 = 1 " % out
if self.action == "None":
return "%s then ACCEPT" % out
return "%s then accept nonterminal do %s" % (out, self.action)
class ThermometerDataManager(models.Manager):
@staticmethod
def get_last_n_days(n):
"""
Method NOT returning QuerySet
"""
ffin = datetime.utcnow().replace(minute=0, second=0)
fini = ffin - timedelta(days=n)
data = {}
for d in ThermometerData.objects.filter(timestamp__gt=fini, timestamp__lt=ffin):
if d.thermometer.caption not in data:
data[d.thermometer.caption] = {}
data[d.thermometer.caption][d.timestamp.strftime('%s')] = d.value
return data
@staticmethod
def get_last_n_weeks(n):
"""
Method NOT returning QuerySet
"""
data = {}
for i in reversed(range(7*n)):
ffin = datetime.utcnow().replace(hour=0, minute=0, second=0) - timedelta(days=i)
fini = ffin - timedelta(days=1)
logger.debug("inteval: %s - %s" % (fini, ffin))
for therm in Thermometer.objects.all():
if therm.caption not in data:
data[therm.caption] = {}
d = ThermometerData.objects.filter(
thermometer=therm,
timestamp__gt=fini,
timestamp__lt=ffin).aggregate(Avg('value'))
data[therm.caption][fini.strftime('%s')] = d['value__avg']
logger.debug("thermomentro: %s, data: %s" % (therm.id, d['value__avg']))
return data
@staticmethod
def get_last_n_months(n):
pass
@staticmethod
def get_last_year(self):
pass
class ThermometerData(models.Model):
objects = ThermometerDataManager()
thermometer = models.ForeignKey(Thermometer)
timestamp = models.DateTimeField(auto_now_add=True)
value = models.DecimalField(max_digits=6, decimal_places=2)
def __unicode__(self):
return u"%s, %s: %0.2f" % (self.thermometer, self.timestamp, self.value)
| jpardobl/django-thermostat | django_thermostat/models.py | Python | bsd-3-clause | 8,988 |
#***
#*********************************************************************
#*************************************************************************
#***
#*** GizmoDaemon Config Script
#*** Powermate ButtonTimeout config
#***
#*****************************************
#*****************************************
#***
"""
Copyright (c) 2007, Gizmo Daemon Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################
# Imports
##########################
from GizmoDaemon import *
from GizmoScriptDefault import *
from GizmoDeviceStrings import *
import time
ENABLED = True
VERSION_NEEDED = 3.3
INTERESTED_CLASSES = [GizmoEventClass.Powermate]
############################
# PowermateButtonTimeout Class definition
##########################
class PowermateButtonTimeout(GizmoScriptDefault):
"""
ButtonTimeout Powermate Event Mapping
"""
############################
# Public Functions
##########################
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# Check for rotations
if Event.Type == GizmoEventType.EV_KEY:
if Event.Value == 1:
#print "Powermate Button Timeout Timer Created [" + str(POWERMATE_BUTTON_TIMEOUT) + "s]"
self.ButtonTimeoutTimers[Gizmo.FileName] = GizmodTimer(POWERMATE_BUTTON_TIMEOUT, self.timerCallback, Gizmo)
self.ButtonTimeoutTimers[Gizmo.FileName].start()
self.ButtonTimeoutEatNexts[Gizmo.FileName] = False
else:
if not self.ButtonTimeoutEatNexts[Gizmo.FileName]:
#print "Powermate Button Timeout Timer Canceled"
self.ButtonTimeoutTimers[Gizmo.FileName].cancel()
else:
#print "Powermate Button Event Eaten due to Button Timeout"
self.ButtonTimeoutEatNexts[Gizmo.FileName] = False
# loop through all the user scripts and fire off the event
# if they handle it
for UserScript in Gizmod.Dispatcher.userScripts:
if UserScript.__class__.__name__.find("Powermate") == -1:
continue
if "onDeviceEventEaten" in dir(UserScript):
if UserScript.onDeviceEventEaten(Event, Gizmo):
break
return True
elif Event.Type == GizmoEventType.EV_REL:
try:
self.ButtonTimeoutTimers[Gizmo.FileName].cancel()
except KeyError:
# no big deal!
pass
return False
def onDeviceEventButtonTimeout(self, Gizmo):
"""
Called when a Powermate's button times out
This is generated from 200-Powermate-ButtonTimeout.py
"""
#print "Button Timeout in [" + self.__class__.__name__ + "] from [" + str(Gizmo.Type) + " " + str(Gizmo.DeviceClassID) + "]"
return False
def onDeviceEventEaten(self, Event, Gizmo):
"""
Called when a Powermate's button is released after timing out
This is generated from 200-Powermate-ButtonTimeout.py
"""
#print "Device Event Eaten in [" + self.__class__.__name__ + "] from [" + str(Gizmo.Type) + " " + str(Gizmo.DeviceClassID) + "]"
return False
def timerCallback(self, UserData):
"""
Timer function callback
"""
# set the eat next state
self.ButtonTimeoutEatNexts[UserData.FileName] = True
# loop through all the user scripts and fire off the event
# if they handle it
for UserScript in Gizmod.Dispatcher.userScripts:
if UserScript.__class__.__name__.find("Powermate") == -1:
continue
if "onDeviceEventButtonTimeout" in dir(UserScript):
if UserScript.onDeviceEventButtonTimeout(UserData):
break
############################
# Private Functions
##########################
def __init__(self):
"""
Default Constructor
"""
GizmoScriptDefault.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES)
self.ButtonTimeoutTimers = {}
self.ButtonTimeoutEatNexts = {}
############################
# PowermateButtonTimeout class end
##########################
# register the user script
PowermateButtonTimeout()
| jtriley/gizmod | scripts/modules.d/200-Powermate-ButtonTimeout.py | Python | apache-2.0 | 4,462 |
from p1_support import load_level, show_level
from math import sqrt
from heapq import heappush, heappop
import operator
VERBOSE = False
def debug(*args):
if (VERBOSE):
print ''.join([str(arg) for arg in args])
def dijkstras_shortest_path(src, dst, graph, adj):
dist = {}
prev = {}
dist[src] = 0
prev[src] = None # parent of the source node
queue = []
# Python heapq (heap, item) : item can be a tuple or single value
# If tuple is used, the first element will be used as key (key, data)
heappush(queue, (dist[src], src))
while queue:
pathCost, node = heappop(queue)
if node == dst:
break
adjacent = adj(graph, node)
# Extract (position, cost) from list of adjacent states
for neighbor, cost in adjacent:
totalCost = pathCost + cost
#print totalCost
if neighbor not in dist or totalCost < dist[neighbor]:
dist[neighbor] = totalCost
prev[neighbor] = node # parent of [ neighbor ] is node
heappush(queue, ( totalCost, neighbor))
path = []
# Path found build it, else return empty path
if node == dst:
# Traverse up the parent tree
while node: # while there is a parent (prev[src] = None)
path.append(node)
node = prev[node] # update to the parent
# Path is from dst to src, reverse it
path.reverse()
if path:
debug("Path: ", path)
debug("Path cost: ", pathCost)
return path
def navigation_edges(level, cell):
# Valid movement deltas
deltas = {
'LEFT_DOWN': (-1, -1),
'LEFT': (-1, 0),
'LEFT_UP': (-1, 1),
'DOWN': (0, -1),
'UP': (0, 1),
'RIGHT_DOWN': (1, -1),
'RIGHT': (1, 0),
'RIGHT_UP': (1, 1)
};
validMoves = []
for delta in deltas.values():
# Calculate new position
position = (cell[0]+delta[0], cell[1]+delta[1])
if position in level['spaces']:
# Calculate edge cost
cost = sqrt(delta[0] ** 2 + delta[1] ** 2)
# Valid move is a tuple (nextState, edgeCost)
validMoves.append((position, cost))
return validMoves
def test_route(filename, src_waypoint, dst_waypoint):
level = load_level(filename)
if VERBOSE:
print("Level layout:")
show_level(level)
src = level['waypoints'][src_waypoint]
dst = level['waypoints'][dst_waypoint]
path = dijkstras_shortest_path(src, dst, level, navigation_edges)
if path:
show_level(level, path)
else:
print "No path possible!"
# Show the level if the user hasn't already seen it
if not VERBOSE:
show_level(level, [])
if __name__ == '__main__':
import sys
# Use command line options
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] level_file src_waypoint dst_waypoint")
parser.add_option("-v", "--verbose", dest="verbose", help="use verbose logging", action="store_true", default=False)
(options, args) = parser.parse_args()
# Make sure the appropriate number of arguments was supplied
if (len(args) != 3):
print "Unexpected argument count."
parser.print_help()
else:
VERBOSE = options.verbose
filename, src_waypoint, dst_waypoint = args
test_route(filename, src_waypoint, dst_waypoint)
| jtsommers/dijkd | p1.py | Python | gpl-2.0 | 3,077 |
#-------------------------------------------------------------------------------
# Name: base_converter
# Purpose: Provides Basic MARC21 RDA converter for use in other classes
#
# Author: Jeremy Nelson
#
# Created: 2014/09/15
# Copyright: (c) Jeremy Nelson, Colorado College 2014
# Licence: MIT
#-------------------------------------------------------------------------------
from pymarc import Field
class BaseMARC21Conversion(object):
def __init__(self):
pass
def __format245__(self, field245):
"""Method takes a 245 field from a MARC record and returns properly
formatted subfields. By not copying subfield 'h', performs the first
conversion PCC recommendation.
Args:
field245(pymarc.Field): 245 field
Returns:
pymarc.Field
"""
if field245.tag != '245':
return
subfield_a,subfield_c= '',''
a_subfields = field245.get_subfields('a')
indicator1,indicator2 = field245.indicators
if len(a_subfields) > 0:
subfield_a = a_subfields[0]
if len(subfield_a) > 0:
if ['.','\\'].count(subfield_a[-1]) > 0:
subfield_a = subfield_a[:-1].strip()
new245 = Field(tag='245',
indicators=[indicator1,indicator2],
subfields = ['a', u'{0} '.format(subfield_a)])
b_subfields = field245.get_subfields('b')
c_subfields = field245.get_subfields('c')
n_subfields = field245.get_subfields('n')
p_subfields = field245.get_subfields('p')
# Order for 245 subfields are:
# $a $n $p $b $c
if len(n_subfields) > 0:
for subfield_n in n_subfields:
new245.add_subfield('n', subfield_n)
if len(p_subfields) > 0:
for subfield_p in p_subfields:
new245.add_subfield('p', subfield_p)
if len(c_subfields) > 0 and len(b_subfields) < 1:
if 'a' in new245.subfields:
new245['a'] = u'{0} /'.format(new245['a'].strip())
elif len(b_subfields) > 0:
if 'a' in new245.subfields:
new245['a'] = u'{0} :'.format(new245['a'].strip())
if len(b_subfields) > 0:
for subfield_b in b_subfields:
new245.add_subfield('b',subfield_b)
if len(c_subfields) > 0:
for subfield_c in c_subfields:
new245.add_subfield('c',subfield_c)
return new245
def main():
pass
if __name__ == '__main__':
main()
| Tutt-Library/rda-enhancement | rda_enhancement/base_converter.py | Python | mit | 2,599 |
"""
TODO
Would be nice to load the entire config at once rather
than each time per method
"""
import json
import os.path
class UserData():
LOCATION = os.path.join('assets', 'game-data' ,'saved-games' + os.sep)
FILENAME = 'game-data.json'
FULL_PATH = LOCATION + FILENAME
def __init__(self):
self.create_settings_file()
def create_settings_file(self):
if not os.path.exists(UserData.FULL_PATH):
settings = {
"settings": {
"music": True,
"controls": {
"up": "K_UP",
"down": "K_DOWN",
"left": "K_LEFT",
"right": "K_RIGHT",
"select": "K_RETURN",
"action": "K_SPACE",
"secondary": "K_TAB",
"escape": "K_ESCAPE"
}
}
}
with open( UserData.FULL_PATH, 'w') as saved_game:
saved_game.write(json.dumps(settings))
def get_controls(self):
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
return game_data['settings']['controls']
def has_completed_level(self, level: str) -> bool:
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
if level in game_data:
if game_data[level]['completed_level'] == True:
return True
return False
def get_has_seen_introduction(self):
"""
Brings back whether we've seen the intro text or not
"""
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
try:
return game_data['has_seen_introduction']
except KeyError:
return None
def register_has_seen_introduction(self):
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
game_data['has_seen_introduction'] = True
with open(path, 'w') as saved_game:
saved_game.write(json.dumps(game_data))
def get_last_played_level(self) -> str:
"""
Saves the name of the level we last played so that
when we head to the level select, we're positioned over it
"""
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
try:
return game_data['last_played_level']
except KeyError:
return None
def register_last_played_level(self, level: str):
"""
Saves the name of the level we last played so that
when we head to the level select, we're positioned over it
"""
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
game_data['last_played_level'] = level
with open(path, 'w') as saved_game:
saved_game.write(json.dumps(game_data))
def save(self, completed_level: str, collected_tape=None):
"""Saves our game"""
path = UserData.LOCATION + UserData.FILENAME
with open(path) as saved_game:
game_data = json.load(saved_game)
game_data[completed_level] = {
'completed_level': True,
}
if collected_tape is not None:
game_data[completed_level]['collected_tape'] = collected_tape
with open(path, 'w') as saved_game:
saved_game.write(json.dumps(game_data))
def delete_save_data(self):
"""Removes the save data but preserves our settings"""
with open( UserData.FULL_PATH) as saved_game:
game_data = json.load(saved_game)
settings = game_data['settings']
game_data.clear()
game_data['settings'] = settings
with open( UserData.FULL_PATH, 'w') as saved_game:
saved_game.write(json.dumps(game_data))
def toggle_music_option(self):
with open( UserData.FULL_PATH) as saved_game:
game_data = json.load(saved_game)
settings = game_data['settings']
settings['music'] = not settings['music']
with open( UserData.FULL_PATH, 'w') as saved_game:
saved_game.write(json.dumps(game_data))
def get_music_option(self):
with open( UserData.FULL_PATH) as saved_game:
game_data = json.load(saved_game)
return game_data['settings']['music']
def has_video_for_level(self, level: str):
with open( UserData.FULL_PATH) as saved_game:
game_data = json.load(saved_game)
if level in game_data:
if 'collected_tape' in game_data[level] and game_data[level]['collected_tape'] == True:
return True
| joereynolds/Mr-Figs | src/user_data.py | Python | gpl-3.0 | 5,064 |
import unittest
from sql import Sql
class SqlTest(unittest.TestCase):
def test_create_word_table_sql_correct(self):
self.assertEqual(Sql().create_word_table_sql(3), 'CREATE TABLE IF NOT EXISTS word (word1, word2, word3, count)')
def test_create_param_table_sql_correct(self):
self.assertEqual(Sql().create_param_table_sql(), 'CREATE TABLE IF NOT EXISTS param (name, value)')
def test_set_param_sql_correct(self):
self.assertEqual(Sql().set_param_sql(), 'INSERT INTO param (name, value) VALUES (?, ?)')
def test_create_index_sql_correct(self):
self.assertEqual(Sql().create_index_sql(3), 'CREATE INDEX IF NOT EXISTS i_word ON word (word1, word2, word3)')
def test_select_count_for_words_sql_correct(self):
self.assertEqual(Sql().select_count_for_words_sql(3), 'SELECT count FROM word WHERE word1=? AND word2=? AND word3=?')
def test_update_count_for_words_sql_correct(self):
self.assertEqual(Sql().update_count_for_words_sql(3), 'UPDATE word SET count=? WHERE word1=? AND word2=? AND word3=?')
def test_insert_row_for_words_sql_correct(self):
self.assertEqual(Sql().insert_row_for_words_sql(3), 'INSERT INTO word (word1, word2, word3, count) VALUES (?, ?, ?, ?)')
def test_select_words_and_counts_sql_correct(self):
self.assertEqual(Sql().select_words_and_counts_sql(3), 'SELECT word3, count FROM word WHERE word1=? AND word2=?')
def test_delete_words_sql_correct(self):
self.assertEqual(Sql().delete_words_sql(), 'DELETE FROM word')
if __name__ == '__main__':
unittest.main() | codebox/markov-text | test/sql_test.py | Python | mit | 1,618 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# FIXME: Cleanups
# - Change all class to be "new style" classes
# - Consolidate ListChoice and DualListChoice to use the same class
# and rename to better name
# - Consolidate RadioChoice and DropdownChoice to use same class
# and rename to better name
# - Consolidate ListOf and ListOfStrings/ListOfIntegers
# - Checkbox
# -> rename to Boolean
# -> Add alternative rendering "dropdown"
import math
import os
import time
import re
import sre_constants
import urlparse
import types
import base64
import hashlib
import socket
import ipaddress
from Cryptodome.PublicKey import RSA
from UserDict import DictMixin
import json
import cmk.paths
import cmk.defines as defines
import livestatus
import forms
import utils
from gui_exceptions import MKUserError, MKGeneralException
def type_name(v):
try:
return type(v).__name__
except:
return html.attrencode(type(v))
seconds_per_day = 86400
# Abstract base class of all value declaration classes.
class ValueSpec(object):
def __init__(self, **kwargs):
super(ValueSpec, self).__init__()
self._title = kwargs.get("title")
self._help = kwargs.get("help")
if "default_value" in kwargs:
self._default_value = kwargs.get("default_value")
self._validate = kwargs.get("validate")
# debug display allows to view the class name of a certain element in the gui.
# If set False, then the corresponding div is present, but hidden.
self.debug_display = False
def title(self):
return self._title
def help(self):
return self._help
# Create HTML-form elements that represent a given
# value and let the user edit that value. The varprefix
# is prepended to the HTML variable names and is needed
# in order to make the variable unique in case that another
# Value of the same type is being used as well.
# The function may assume that the type of the value is valid.
def render_input(self, varprefix, value):
pass
# Sets the input focus (cursor) into the most promiment
# field of the HTML code previously rendered with render_input()
def set_focus(self, varprefix):
html.set_focus(varprefix)
# Create a canonical, minimal, default value that
# matches the datatype of the value specification and
# fulfills also data validation.
def canonical_value(self):
return None
# Return a default value for this variable. This
# is optional and only used in the value editor
# for same cases where the default value is known.
def default_value(self):
try:
if type(self._default_value) in [types.FunctionType, types.MethodType]:
return self._default_value()
else:
return self._default_value
except:
return self.canonical_value()
# Creates a text-representation of the value that can be
# used in tables and other contextes. It is to be read
# by the user and need not to be parsable.
# The function may assume that the type of the value is valid.
#
# In the current implementation this function is only used to
# render the object for html code. So it is allowed to add
# html code for better layout in the GUI.
def value_to_text(self, value):
return repr(value)
# Create a value from the current settings of the
# HTML variables. This function must also check the validity
# and may raise a MKUserError in case of invalid set variables.
def from_html_vars(self, varprefix):
return None
# Check if a given value matches the
# datatype of described by this class. This method will
# be used by cmk -X on the command line in order to
# validate main.mk (some happy day in future)
def validate_datatype(self, value, varprefix):
pass
# Check if a given value is within the ranges that are
# allowed for this type of value. This function should
# assume that the data type is valid (either because it
# has been returned by from_html_vars() or because it has
# been checked with validate_datatype()).
def validate_value(self, value, varprefix):
self.custom_validate(value, varprefix)
# Needed for implementation of customer validation
# functions that are configured by the user argument
# validate = .... Problem: this function must be
# called by *every* validate_value() function in all
# subclasses - explicitely.
def custom_validate(self, value, varprefix):
if self._validate:
self._validate(value, varprefix)
def classtype_info(self):
superclass = " ".join(base.__name__ for base in self.__class__.__bases__)
if not self.debug_display:
return
html.div("Check_MK-Type: %s %s" % (superclass, type(self).__name__), class_="legend")
# A fixed non-editable value, e.g. to be use in "Alternative"
class FixedValue(ValueSpec):
def __init__(self, value, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._value = value
self._totext = kwargs.get("totext")
def canonical_value(self):
return self._value
def render_input(self, varprefix, value):
html.write(self.value_to_text(value))
def value_to_text(self, value):
if self._totext != None:
return self._totext
elif type(value) == unicode:
return value
else:
return str(value)
def from_html_vars(self, varprefix):
return self._value
def validate_datatype(self, value, varprefix):
if not self._value == value:
raise MKUserError(varprefix, _("Invalid value, must be '%r' but is '%r'") %
(self._value, value))
def validate_value(self, value, varprefix):
self.validate_datatype(value, varprefix)
ValueSpec.custom_validate(self, value, varprefix)
# Time in seconds
class Age(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._label = kwargs.get("label")
self._minvalue = kwargs.get("minvalue")
self._display = kwargs.get("display", ["days", "hours", "minutes", "seconds"])
def canonical_value(self):
if self._minvalue:
return self._minvalue
else:
return 0
def render_input(self, varprefix, value):
self.classtype_info()
days, rest = divmod(value, 60*60*24)
hours, rest = divmod(rest, 60*60)
minutes, seconds = divmod(rest, 60)
html.open_div()
if self._label:
html.write(self._label + " ")
takeover = 0
first = True
for uid, title, value, tkovr_fac in [ ("days", _("days"), days, 24),
("hours", _("hours"), hours, 60),
("minutes", _("mins"), minutes, 60),
("seconds", _("secs"), seconds, 60) ]:
if uid in self._display:
value += takeover
takeover = 0
html.number_input(varprefix + "_" + uid, value, 3 if first else 2)
html.write(" %s " % title)
first = False
else:
takeover = (takeover + value) * tkovr_fac
html.close_div()
def from_html_vars(self, varprefix):
# TODO: Validate for correct numbers!
return (
utils.saveint(html.var(varprefix + '_days', 0)) * 3600 * 24
+ utils.saveint(html.var(varprefix + '_hours', 0)) * 3600
+ utils.saveint(html.var(varprefix + '_minutes', 0)) * 60
+ utils.saveint(html.var(varprefix + '_seconds', 0))
)
def value_to_text(self, value):
days, rest = divmod(value, 60*60*24)
hours, rest = divmod(rest, 60*60)
minutes, seconds = divmod(rest, 60)
parts = []
for title, count in [
( _("days"), days, ),
( _("hours"), hours, ),
( _("minutes"), minutes, ),
( _("seconds"), seconds, )]:
if count:
parts.append("%d %s" % (count, title))
if parts:
return " ".join(parts)
else:
return _("no time")
def validate_datatype(self, value, varprefix):
if type(value) != int:
raise MKUserError(varprefix, _("The value %r has type %s, but must be of type int") %
(value, type_name(value)))
def validate_value(self, value, varprefix):
if self._minvalue != None and value < self._minvalue:
raise MKUserError(varprefix, _("%s is too low. The minimum allowed value is %s.") % (
value, self._minvalue))
# Editor for a single integer
class Integer(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._size = kwargs.get("size", 5)
# TODO: inconsistency with default_value. All should be named with underscore
self._minvalue = kwargs.get("minvalue")
self._maxvalue = kwargs.get("maxvalue")
self._label = kwargs.get("label")
self._unit = kwargs.get("unit", "")
self._thousand_sep = kwargs.get("thousand_sep")
self._display_format = kwargs.get("display_format", "%d")
self._align = kwargs.get("align", "left")
if "size" not in kwargs and "maxvalue" in kwargs and kwargs["maxvalue"] != None:
self._size = 1 + int(math.log10(self._maxvalue)) + \
(3 if type(self._maxvalue) == float else 0)
def canonical_value(self):
if self._minvalue:
return self._minvalue
else:
return 0
def render_input(self, varprefix, value, convfunc = utils.saveint):
self.classtype_info()
if self._label:
html.write(self._label)
html.nbsp()
if self._align == "right":
style = "text-align: right;"
else:
style = ""
if value == "": # This is needed for ListOfIntegers
html.text_input(varprefix, "", "number", size = self._size, style = style)
else:
html.number_input(varprefix, self._display_format % convfunc(value), size = self._size, style = style)
if self._unit:
html.nbsp()
html.write(self._unit)
def from_html_vars(self, varprefix):
try:
return int(html.var(varprefix))
except:
raise MKUserError(varprefix,
_("The text <b><tt>%s</tt></b> is not a valid integer number.") % html.var(varprefix))
def value_to_text(self, value):
text = self._display_format % value
if self._thousand_sep:
sepped = ""
rest = text
while len(rest) > 3:
sepped = self._thousand_sep + rest[-3:] + sepped
rest = rest[:-3]
sepped = rest + sepped
text = sepped
if self._unit:
text += " " + self._unit
return text
def validate_datatype(self, value, varprefix):
if type(value) not in [ int, long ]:
raise MKUserError(varprefix, _("The value %r has the wrong type %s, but must be of type int")
% (value, type_name(value)))
def validate_value(self, value, varprefix):
if self._minvalue != None and value < self._minvalue:
raise MKUserError(varprefix, _("%s is too low. The minimum allowed value is %s.") % (
value, self._minvalue))
if self._maxvalue != None and value > self._maxvalue:
raise MKUserError(varprefix, _("%s is too high. The maximum allowed value is %s.") % (
value, self._maxvalue))
ValueSpec.custom_validate(self, value, varprefix)
# Filesize in Byte,Kbyte,Mbyte,Gigatbyte, Terrabyte
class Filesize(Integer):
def __init__(self, **kwargs):
Integer.__init__(self, **kwargs)
self._names = [ 'Byte', 'KByte', 'MByte', 'GByte', 'TByte', ]
def get_exponent(self, value):
for exp, unit_name in list(enumerate(self._names))[::-1]:
if value == 0:
return 0,0
if value % (1024 ** exp) == 0:
return exp, value / (1024 ** exp)
def render_input(self, varprefix, value):
self.classtype_info()
exp, count = self.get_exponent(value)
html.number_input(varprefix + '_size', count, size = self._size)
html.nbsp()
choices = [ (str(nr), name) for (nr, name) in enumerate(self._names) ]
html.dropdown(varprefix + '_unit', choices, deflt=str(exp))
def from_html_vars(self, varprefix):
try:
return int(html.var(varprefix + '_size')) * (1024 ** int(html.var(varprefix + '_unit')))
except:
raise MKUserError(varprefix + '_size', _("Please enter a valid integer number"))
def value_to_text(self, value):
exp, count = self.get_exponent(value)
return "%s %s" % (count, self._names[exp])
# Editor for a line of text
class TextAscii(ValueSpec):
def __init__(self, **kwargs):
super(TextAscii, self).__init__(**kwargs)
self._label = kwargs.get("label")
self._size = kwargs.get("size", 25) # also possible: "max"
self._try_max_width = kwargs.get("try_max_width", False) # If set, uses calc(100%-10px)
self._cssclass = kwargs.get("cssclass", "text")
self._strip = kwargs.get("strip", True)
self._attrencode = kwargs.get("attrencode", True)
self._allow_empty = kwargs.get("allow_empty", _("none"))
self._empty_text = kwargs.get("empty_text", "")
self._read_only = kwargs.get("read_only")
self._none_is_empty = kwargs.get("none_is_empty", False)
self._forbidden_chars = kwargs.get("forbidden_chars", "")
self._regex = kwargs.get("regex")
self._regex_error = kwargs.get("regex_error",
_("Your input does not match the required format."))
self._minlen = kwargs.get('minlen', None)
if type(self._regex) == str:
self._regex = re.compile(self._regex)
self._onkeyup = kwargs.get("onkeyup")
self._autocomplete = kwargs.get("autocomplete", True)
def canonical_value(self):
return ""
def render_input(self, varprefix, value, hidden=False):
self.classtype_info()
if value == None:
value = ""
else:
value = "%s" % value
if self._label:
html.write(self._label)
html.nbsp()
if hidden:
type_ = "password"
else:
type_ = "text"
attrs = {}
if self._onkeyup:
attrs["onkeyup"] = self._onkeyup
html.text_input(varprefix, value,
size=self._size,
try_max_width=self._try_max_width,
read_only=self._read_only,
cssclass=self._cssclass,
type=type_,
attrs=attrs,
autocomplete="off" if not self._autocomplete else None,
)
def value_to_text(self, value):
if not value:
return self._empty_text
else:
if self._attrencode:
return html.attrencode(value)
else:
return value
def from_html_vars(self, varprefix):
value = html.var(varprefix, "")
if self._strip:
value = value.strip()
if self._none_is_empty and not value:
return None
else:
return value
def validate_datatype(self, value, varprefix):
if self._none_is_empty and value == None:
return
if type(value) != str:
raise MKUserError(varprefix, _("The value must be of type str, but it has type %s") %
type_name(value))
def validate_value(self, value, varprefix):
try:
unicode(value)
except:
raise MKUserError(varprefix, _("Non-ASCII characters are not allowed here."))
if self._forbidden_chars:
for c in self._forbidden_chars:
if c in value:
raise MKUserError(varprefix, _("The character <tt>%s</tt> is not allowed here.") % c)
if self._none_is_empty and value == "":
raise MKUserError(varprefix, _("An empty value must be represented with None here."))
if not self._allow_empty and value.strip() == "":
raise MKUserError(varprefix, _("An empty value is not allowed here."))
if value and self._regex:
if not self._regex.match(value):
raise MKUserError(varprefix, self._regex_error)
if self._minlen != None and len(value) < self._minlen:
raise MKUserError(varprefix, _("You need to provide at least %d characters.") % self._minlen)
ValueSpec.custom_validate(self, value, varprefix)
class TextUnicode(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
def from_html_vars(self, varprefix):
return html.get_unicode_input(varprefix, "").strip()
def validate_datatype(self, value, varprefix):
if type(value) not in [ str, unicode ]:
raise MKUserError(varprefix, _("The value must be of type str or unicode, but it has type %s") %
type_name(value))
# Internal ID as used in many places (for contact names, group name,
# an so on)
class ID(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
self._regex = re.compile('^[a-zA-Z_][-a-zA-Z0-9_]*$')
self._regex_error = _("An identifier must only consist of letters, digits, dash and "
"underscore and it must start with a letter or underscore.")
# Same as the ID class, but allowing unicode objects
class UnicodeID(TextUnicode):
def __init__(self, **kwargs):
TextUnicode.__init__(self, **kwargs)
self._regex = re.compile(r'^[\w][-\w0-9_]*$', re.UNICODE)
self._regex_error = _("An identifier must only consist of letters, digits, dash and "
"underscore and it must start with a letter or underscore.")
class UserID(TextUnicode):
def __init__(self, **kwargs):
TextUnicode.__init__(self, **kwargs)
self._regex = re.compile(r'^[\w][-\w0-9_\.@]*$', re.UNICODE)
self._regex_error = _("An identifier must only consist of letters, digits, dash, dot, "
"at and underscore. But it must start with a letter or underscore.")
class RegExp(TextAscii):
infix = "infix"
prefix = "prefix"
complete = "complete"
def __init__(self, mode, **kwargs):
self._mode = mode
self._case_sensitive = kwargs.get("case_sensitive", True)
TextAscii.__init__(self,
attrencode = True,
cssclass = self._css_classes(),
**kwargs
)
self._mingroups = kwargs.get("mingroups", 0)
self._maxgroups = kwargs.get("maxgroups")
def help(self):
help_text = []
default_help_text = TextAscii.help(self)
if default_help_text != None:
help_text.append(default_help_text + "<br><br>")
help_text.append(_("The text entered here is handled as a regular expression pattern."))
if self._mode == RegExp.infix:
help_text.append(_("The pattern is applied as infix search. Add a leading <tt>^</tt> "
"to make it match from the beginning and/or a tailing <tt>$</tt> "
"to match till the end of the text."))
elif self._mode == RegExp.prefix:
help_text.append(_("The pattern is matched from the beginning. Add a tailing "
"<tt>$</tt> to change it to a whole text match."))
elif self._mode == RegExp.complete:
help_text.append(_("The pattern is matching the whole text. You can add <tt>.*</tt> "
"in front or at the end of your pattern to make it either a prefix "
"or infix search."))
if self._case_sensitive == True:
help_text.append(_("The match is performed case sensitive."))
elif self._case_sensitive == False:
help_text.append(_("The match is performed case insensitive."))
help_text.append(
_("Please note that any backslashes need to be escaped using a backslash, "
"for example you need to insert <tt>C:\\\\windows\\\\</tt> if you want to match "
"<tt>c:\windows\</tt>.")
)
return " ".join(help_text)
def _css_classes(self):
classes = [ "text", "regexp" ]
if self._case_sensitive == True:
classes.append("case_sensitive")
elif self._case_sensitive == False:
classes.append("case_insensitive")
if self._mode != None:
classes.append(self._mode)
return " ".join(classes)
def validate_value(self, value, varprefix):
TextAscii.validate_value(self, value, varprefix)
# Check if the string is a valid regex
try:
compiled = re.compile(value)
except sre_constants.error, e:
raise MKUserError(varprefix, _('Invalid regular expression: %s') % e)
if compiled.groups < self._mingroups:
raise MKUserError(varprefix, _("Your regular expression containes <b>%d</b> groups. "
"You need at least <b>%d</b> groups.") % (compiled.groups, self._mingroups))
if self._maxgroups != None and compiled.groups > self._maxgroups:
raise MKUserError(varprefix, _("Your regular expression containes <b>%d</b> groups. "
"It must have at most <b>%d</b> groups.") % (compiled.groups, self._maxgroups))
ValueSpec.custom_validate(self, value, varprefix)
class RegExpUnicode(TextUnicode, RegExp):
def __init__(self, **kwargs):
TextUnicode.__init__(self, attrencode = True, **kwargs)
RegExp.__init__(self, **kwargs)
def validate_value(self, value, varprefix):
TextUnicode.validate_value(self, value, varprefix)
RegExp.validate_value(self, value, varprefix)
ValueSpec.custom_validate(self, value, varprefix)
class EmailAddress(TextAscii):
def __init__(self, **kwargs):
kwargs.setdefault("size", 40)
TextAscii.__init__(self, **kwargs)
# The "new" top level domains are very unlimited in length. Theoretically they can be
# up to 63 chars long. But currently the longest is 24 characters. Check this out with:
# wget -qO - http://data.iana.org/TLD/tlds-alpha-by-domain.txt | tail -n+2 | wc -L
self._regex = re.compile('^[A-Z0-9._%&+-]+@(localhost|[A-Z0-9.-]+\.[A-Z]{2,24})$', re.I)
self._make_clickable = kwargs.get("make_clickable", False)
def value_to_text(self, value):
if not value:
return TextAscii.value_to_text(self, value)
elif self._make_clickable:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
return "%s" % html.render_a( HTML(value) , href="mailto:%s" % value)
else:
return value
class EmailAddressUnicode(TextUnicode, EmailAddress):
def __init__(self, **kwargs):
TextUnicode.__init__(self, **kwargs)
EmailAddress.__init__(self, **kwargs)
self._regex = re.compile(r'^[\w.%&+-]+@(localhost|[\w.-]+\.[\w]{2,24})$', re.I | re.UNICODE)
def validate_value(self, value, varprefix):
TextUnicode.validate_value(self, value, varprefix)
EmailAddress.validate_value(self, value, varprefix)
ValueSpec.custom_validate(self, value, varprefix)
# Same as IPv4Network, but allowing both IPv4 and IPv6
class IPNetwork(TextAscii):
def __init__(self, **kwargs):
kwargs.setdefault("size", 34)
super(IPNetwork, self).__init__(**kwargs)
def _ip_class(self):
return ipaddress.ip_interface
def validate_value(self, value, varprefix):
super(IPNetwork, self).validate_value(value, varprefix)
try:
self._ip_class()(value.decode("utf-8"))
except ValueError, e:
raise MKUserError(varprefix, _("Invalid address: %s") % e)
# Network as used in routing configuration, such as
# "10.0.0.0/8" or "192.168.56.1"
class IPv4Network(IPNetwork):
def __init__(self, **kwargs):
kwargs.setdefault("size", 18)
super(IPv4Network, self).__init__(**kwargs)
def _ip_class(self):
return ipaddress.IPv4Interface
class IPv4Address(IPNetwork):
def __init__(self, **kwargs):
kwargs.setdefault("size", 16)
super(IPv4Address, self).__init__(**kwargs)
def _ip_class(self):
return ipaddress.IPv4Address
class TextAsciiAutocomplete(TextAscii):
def __init__(self, completion_ident, completion_params, **kwargs):
kwargs["onkeyup"] = "vs_autocomplete(this, %s, %s, %s);%s" % \
(json.dumps(completion_ident),
json.dumps(completion_params),
json.dumps(kwargs.get("onkeyup")),
kwargs.get("onkeyup", ""))
kwargs["autocomplete"] = False
super(TextAsciiAutocomplete, self).__init__(**kwargs)
@classmethod
def idents(cls):
idents = {}
for type_class in cls.__subclasses__(): # pylint: disable=no-member
idents[type_class.ident] = type_class
return idents
@classmethod
def ajax_handler(cls):
ident = html.var("ident")
if not ident:
raise MKUserError("ident", _("You need to set the \"%s\" parameter.") % "ident")
if ident not in cls.idents():
raise MKUserError("ident", _("Invalid ident: %s") % ident)
raw_params = html.var("params")
if not raw_params:
raise MKUserError("params", _("You need to set the \"%s\" parameter.") % "params")
try:
params = json.loads(raw_params)
except ValueError, e: # Python 3: json.JSONDecodeError
raise MKUserError("params", _("Invalid parameters: %s") % e)
value = html.var("value")
if value is None:
raise MKUserError("params", _("You need to set the \"%s\" parameter.") % "value")
result_data = cls.idents()[ident].autocomplete_choices(value, params)
# Check for correct result_data format
assert type(result_data) == list
if result_data:
assert type(result_data[0]) in [list, tuple]
assert len(result_data[0]) == 2
html.write(json.dumps(result_data))
# Renders an input field for entering a host name while providing
# an auto completion dropdown field
class MonitoredHostname(TextAsciiAutocomplete):
ident = "hostname"
def __init__(self, from_active_config=False, **kwargs):
super(MonitoredHostname, self).__init__("hostname", {
# Autocomplete from active config via livestatus or WATO world
"from_active_config": from_active_config,
}, **kwargs)
# called by the webservice with the current input fiel value and
# the completions_params to get the list of choices
@classmethod
def autocomplete_choices(cls, value, params):
if params["from_active_config"]:
return cls._get_choices_via_livestatus(value)
else:
return cls._get_choices_via_wato(value)
@classmethod
def _get_choices_via_livestatus(cls, value):
import sites
query = (
"GET hosts\n"
"Columns: host_name\n"
"Filter: host_name ~~ %s" % livestatus.lqencode(value)
)
hosts = sorted(sites.live().query_column_unique(query))
return [ (h, h) for h in hosts ]
@classmethod
def _get_choices_via_wato(cls, value):
raise NotImplementedError()
# A host name with or without domain part. Also allow IP addresses
class Hostname(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
self._regex = re.compile('^[-0-9a-zA-Z_.]+$')
self._regex_error = _("Please enter a valid hostname or IPv4 address. "
"Only letters, digits, dash, underscore and dot are allowed.")
if "allow_empty" not in kwargs:
self._allow_empty = False
# Use this for all host / ip address input fields!
class HostAddress(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
self._allow_host_name = kwargs.get("allow_host_name", True)
self._allow_ipv4_address = kwargs.get("allow_ipv4_address", True)
self._allow_ipv6_address = kwargs.get("allow_ipv6_address", True)
def validate_value(self, value, varprefix):
if value and self._allow_host_name and self._is_valid_host_name(value):
pass
elif value and self._allow_ipv4_address and self._is_valid_ipv4_address(value):
pass
elif value and self._allow_ipv6_address and self._is_valid_ipv6_address(value):
pass
elif not self._allow_empty:
raise MKUserError(varprefix, _("Invalid host address. You need to specify the address "
"either as %s.") % ", ".join(self._allowed_type_names()))
ValueSpec.custom_validate(self, value, varprefix)
def _is_valid_host_name(self, hostname):
# http://stackoverflow.com/questions/2532053/validate-a-hostname-string/2532344#2532344
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
# must be not all-numeric, so that it can't be confused with an IPv4 address.
# Host names may start with numbers (RFC 1123 section 2.1) but never the final part,
# since TLDs are alphabetic.
if re.match(r"[\d.]+$", hostname):
return False
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def _is_valid_ipv4_address(self, address):
# http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python/4017219#4017219
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def _is_valid_ipv6_address(self, address):
# http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python/4017219#4017219
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def _allowed_type_names(self):
allowed = []
if self._allow_host_name:
allowed.append(_("Host- or DNS name"))
if self._allow_ipv4_address:
allowed.append(_("IPv4 address"))
if self._allow_ipv6_address:
allowed.append(_("IPv6 address"))
return allowed
class AbsoluteDirname(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
self._regex = re.compile('^(/|(/[^/]+)+)$')
self._regex_error = _("Please enter a valid absolut pathname with / as a path separator.")
# Valuespec for a HTTP or HTTPS Url, that
# automatically adds http:// to the value if no protocol has
# been specified
class HTTPUrl(TextAscii):
def __init__(self, **kwargs):
kwargs.setdefault("size", 64)
TextAscii.__init__(self, **kwargs)
self._target = kwargs.get("target")
def validate_value(self, value, varprefix):
TextAscii.validate_value(self, value, varprefix)
if value:
if not value.startswith("http://") and not value.startswith("https://"):
raise MKUserError(varprefix, _("The URL must begin with http:// or https://"))
ValueSpec.custom_validate(self, value, varprefix)
def from_html_vars(self, varprefix):
value = TextAscii.from_html_vars(self, varprefix)
if value:
if not "://" in value:
value = "http://" + value
return value
def value_to_text(self, url):
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
try:
parts = urlparse.urlparse(url)
if parts.path in [ '', '/' ]:
text = parts.netloc
else:
text = parts.netloc + parts.path
except:
text = url[7:]
# Remove trailing / if the url does not contain
# any path component
return html.render_a(text, href=url, target=self._target if self._target else None)
def CheckMKVersion(**args):
args = args.copy()
args["regex"] = "[0-9]+\.[0-9]+\.[0-9]+([bpi][0-9]+|i[0-9]+p[0-9]+)?$"
args["regex_error"] = _("This is not a valid Check_MK version number")
return TextAscii(**args)
class TextAreaUnicode(TextUnicode):
def __init__(self, **kwargs):
TextUnicode.__init__(self, **kwargs)
self._cols = kwargs.get("cols", 60)
self._try_max_width = kwargs.get("try_max_width", False) # If set, uses calc(100%-10px)
self._rows = kwargs.get("rows", 20) # Allowed: "auto" -> Auto resizing
self._minrows = kwargs.get("minrows", 0) # Minimum number of initial rows when "auto"
self._monospaced = kwargs.get("monospaced", False) # select TT font
def value_to_text(self, value):
if self._monospaced:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
return "%s" % html.render_pre( HTML(value) , class_="ve_textarea")
else:
return html.attrencode(value).replace("\n", "<br>")
def render_input(self, varprefix, value):
self.classtype_info()
if value == None:
value = "" # should never happen, but avoids exception for invalid input
if self._rows == "auto":
func = 'valuespec_textarea_resize(this);'
attrs = { "onkeyup" : func, "onmousedown" : func, "onmouseup" : func, "onmouseout" : func }
if html.has_var(varprefix):
rows = len(html.var(varprefix).splitlines())
else:
rows = len(value.splitlines())
rows = max(rows, self._minrows)
else:
attrs = {}
rows = self._rows
if self._monospaced:
attrs["class"] = "tt"
html.text_area(varprefix, value, rows=rows, cols=self._cols,
attrs = attrs, try_max_width=self._try_max_width)
# Overridded because we do not want to strip() here and remove '\r'
def from_html_vars(self, varprefix):
text = html.get_unicode_input(varprefix, "").replace('\r', '')
if text and not text.endswith("\n"):
text += "\n" # force newline at end
return text
# A variant of TextAscii() that validates a path to a filename that
# lies in an existing directory.
# TODO: Rename the valuespec here to ExistingFilename or somehting similar
class Filename(TextAscii):
def __init__(self, **kwargs):
TextAscii.__init__(self, **kwargs)
if "size" not in kwargs:
self._size = 60
if "default" in kwargs:
self._default_path = kwargs["default"]
else:
self._default_path = "/tmp/foo"
if "trans_func" in kwargs:
self._trans_func = kwargs["trans_func"]
else:
self._trans_func = None
def canonical_value(self):
return self._default_path
def validate_value(self, value, varprefix):
# The transformation function only changes the value for validation. This is
# usually a function which is later also used within the code which uses
# this variable to e.g. replace macros
if self._trans_func:
value = self._trans_func(value)
if len(value) == 0:
raise MKUserError(varprefix, _("Please enter a filename."))
if value[0] != "/":
raise MKUserError(varprefix, _("Sorry, only absolute filenames are allowed. "
"Your filename must begin with a slash."))
if value[-1] == "/":
raise MKUserError(varprefix, _("Your filename must not end with a slash."))
dir = value.rsplit("/", 1)[0]
if not os.path.isdir(dir):
raise MKUserError(varprefix, _("The directory %s does not exist or is not a directory.") % dir)
# Write permissions to the file cannot be checked here since we run with Apache
# permissions and the file might be created with Nagios permissions (on OMD this
# is the same, but for others not)
ValueSpec.custom_validate(self, value, varprefix)
class ListOfStrings(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
if "valuespec" in kwargs:
self._valuespec = kwargs.get("valuespec")
elif "size" in kwargs:
self._valuespec = TextAscii(size=kwargs["size"])
else:
self._valuespec = TextAscii()
self._vertical = kwargs.get("orientation", "vertical") == "vertical"
self._allow_empty = kwargs.get("allow_empty", True)
self._empty_text = kwargs.get("empty_text", "")
self._max_entries = kwargs.get("max_entries")
self._separator = kwargs.get("separator", "") # in case of float
def help(self):
help_text = ValueSpec.help(self)
field_help = self._valuespec.help()
if help_text and field_help:
return help_text + " " + field_help
elif field_help:
return field_help
else:
return help_text
def render_input(self, vp, value):
self.classtype_info()
# Form already submitted?
if html.has_var(vp + "_0"):
value = self.from_html_vars(vp)
# Remove variables from URL, so that they do not appear
# in hidden_fields()
nr = 0
while html.has_var(vp + "_%d" % nr):
html.del_var(vp + "_%d" % nr)
nr += 1
class_ = ["listofstrings"]
if self._vertical:
class_.append("vertical")
else:
class_.append("horizontal")
html.open_div(id_=vp, class_=class_)
for nr, s in enumerate(value + [""]):
html.open_div()
self._valuespec.render_input(vp + "_%d" % nr, s)
if self._vertical != "vertical" and self._separator:
html.nbsp()
html.write(self._separator)
html.nbsp()
html.close_div()
html.close_div()
html.div('', style="clear:left;")
html.help(self.help())
html.javascript("list_of_strings_init('%s');" % vp)
def canonical_value(self):
return []
def value_to_text(self, value):
if not value:
return self._empty_text
if self._vertical:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
s = map(lambda v: html.render_tr(html.render_td(HTML(self._valuespec.value_to_text(v)))), value)
return "%s" % html.render_table(HTML().join(s))
else:
return ", ".join([ self._valuespec.value_to_text(v) for v in value ])
def from_html_vars(self, vp):
value = []
nr = 0
while True:
varname = vp + "_%d" % nr
if not html.has_var(varname):
break
if html.var(varname, "").strip():
value.append(self._valuespec.from_html_vars(varname))
nr += 1
return value
def validate_datatype(self, value, vp):
if type(value) != list:
raise MKUserError(vp, _("Expected data type is list, but your type is %s.") %
type_name(value))
for nr, s in enumerate(value):
self._valuespec.validate_datatype(s, vp + "_%d" % nr)
def validate_value(self, value, vp):
if len(value) == 0 and not self._allow_empty:
if self._empty_text:
msg = self._empty_text
else:
msg = _("Please specify at least one value")
raise MKUserError(vp + "_0", msg)
if self._max_entries != None and len(value) > self._max_entries:
raise MKUserError(vp + "_%d" % self._max_entries,
_("You can specify at most %d entries") % self._max_entries)
if self._valuespec:
for nr, s in enumerate(value):
self._valuespec.validate_value(s, vp + "_%d" % nr)
ValueSpec.custom_validate(self, value, vp)
class ListOfIntegers(ListOfStrings):
def __init__(self, **kwargs):
int_args = {}
for key in [ "minvalue", "maxvalue" ]:
if key in kwargs:
int_args[key] = kwargs[key]
int_args["display_format"] = "%s"
int_args["convfunc"] = lambda x: x if x == '' else utils.saveint(x)
int_args["minvalue"] = 17
int_args["default_value"] = 34
valuespec = Integer(**int_args)
kwargs["valuespec"] = valuespec
ListOfStrings.__init__(self, **kwargs)
# Generic list-of-valuespec ValueSpec with Javascript-based
# add/delete/move
class ListOf(ValueSpec):
def __init__(self, valuespec, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._valuespec = valuespec
self._magic = kwargs.get("magic", "@!@")
self._rowlabel = kwargs.get("row_label")
self._add_label = kwargs.get("add_label", _("Add new element"))
self._del_label = kwargs.get("del_label", _("Delete this entry"))
self._movable = kwargs.get("movable", True)
self._totext = kwargs.get("totext") # pattern with option %d
self._text_if_empty = kwargs.get("text_if_empty", _("No entries"))
self._allow_empty = kwargs.get("allow_empty", True)
self._empty_text = kwargs.get("empty_text") # complain text if empty
# Makes a sort button visible that can be used to sort the list in the GUI
# (without submitting the form). But this currently only works for list of
# tuples that contain input field elements directly. The value of sort_by
# refers to the index of the sort values in the tuple
self._sort_by = kwargs.get("sort_by")
if not self._empty_text:
self._empty_text = _("Please specify at least one entry")
# Implementation idea: we render our element-valuespec
# once in a hidden div that is not evaluated. All occurances
# of a magic string are replaced with the actual number
# of entry, while beginning with 1 (this makes visual
# numbering in labels, etc. possible). The current number
# of entries is stored in the hidden variable 'varprefix'
def render_input(self, varprefix, value):
self.classtype_info()
# Beware: the 'value' is only the default value in case the form
# has not yet been filled in. In the complain phase we must
# ignore 'value' but reuse the input from the HTML variables -
# even if they are not syntactically correct. Calling from_html_vars
# here is *not* an option since this might not work in case of
# a wrong user input.
# Render reference element for cloning
self._show_reference_entry(varprefix, self._magic, self._valuespec.default_value())
# In the 'complain' phase, where the user already saved the
# form but the validation failed, we must not display the
# original 'value' but take the value from the HTML variables.
if html.has_var("%s_count" % varprefix):
filled_in = True
count = len(self.get_indexes(varprefix))
value = [None] * count # dummy for the loop
else:
filled_in = False
count = len(value)
html.hidden_field('%s_count' % varprefix,
str(count),
id = '%s_count' % varprefix,
add_var = True)
# Actual table of currently existing entries
self._show_current_entries(varprefix, value)
html.br()
html.jsbutton(varprefix + "_add", self._add_label,
"valuespec_listof_add('%s', '%s')" % (varprefix, self._magic))
if self._sort_by is not None:
html.jsbutton(varprefix + "_sort", _("Sort"),
"valuespec_listof_sort(%s, %s, %s)" %
(json.dumps(varprefix), json.dumps(self._magic), json.dumps(self._sort_by)))
html.javascript("valuespec_listof_fixarrows(document.getElementById('%s_table').childNodes[0]);" % varprefix)
def del_button(self, vp, nr):
js = "valuespec_listof_delete(this, '%s', '%s')" % (vp, nr)
html.icon_button("#", self._del_label, "delete", onclick=js)
def _show_reference_entry(self, varprefix, index, value):
html.open_table(id_="%s_prototype" % varprefix, style="display:none;")
self._show_entry(varprefix, index, value)
html.close_table()
def _show_current_entries(self, varprefix, value):
html.open_table(id_="%s_table" % varprefix, class_=["valuespec_listof"])
for nr, v in enumerate(value):
self._show_entry(varprefix, "%d" % (nr + 1), v)
html.close_table()
def _show_entry(self, varprefix, index, value):
html.open_tr()
html.open_td(class_="vlof_buttons")
html.hidden_field(varprefix + "_indexof_" + index, "",
add_var=True, class_="index") # reconstruct order after moving stuff
html.hidden_field(varprefix + "_orig_indexof_" + index, "",
add_var=True, class_="orig_index")
if self._movable:
html.element_dragger_js("tr", drop_handler="vs_listof_drop_handler",
handler_args={"cur_index": index, "varprefix": varprefix})
self.del_button(varprefix, index)
html.close_td()
html.open_td(class_="vlof_content")
self._valuespec.render_input(varprefix + "_" + index, value)
html.close_td()
html.close_tr()
def canonical_value(self):
return []
def value_to_text(self, value):
if self._totext:
if "%d" in self._totext:
return self._totext % len(value)
else:
return self._totext
elif not value:
return self._text_if_empty
else:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
s = map(lambda v: html.render_tr(html.render_td( HTML(self._valuespec.value_to_text(v)) )), value)
return "%s" % html.render_table(HTML().join(s))
def get_indexes(self, varprefix):
count = html.get_integer_input(varprefix + "_count", 0)
n = 1
indexes = {}
while n <= count:
indexof = html.var(varprefix + "_indexof_%d" % n)
# for deleted entries, we have removed the whole row, therefore indexof is None
if indexof != None:
indexes[int(indexof)] = n
n += 1
return indexes
def from_html_vars(self, varprefix):
indexes = self.get_indexes(varprefix)
value = []
k = indexes.keys()
k.sort()
for i in k:
val = self._valuespec.from_html_vars(varprefix + "_%d" % indexes[i])
value.append(val)
return value
def validate_datatype(self, value, varprefix):
if type(value) != list:
raise MKUserError(varprefix, _("The type must be list, but is %s") % type_name(value))
for n, v in enumerate(value):
self._valuespec.validate_datatype(v, varprefix + "_%d" % (n+1))
def validate_value(self, value, varprefix):
if not self._allow_empty and len(value) == 0:
raise MKUserError(varprefix, self._empty_text)
for n, v in enumerate(value):
self._valuespec.validate_value(v, varprefix + "_%d" % (n+1))
ValueSpec.custom_validate(self, value, varprefix)
# A generic valuespec where the user can choose from a list of sub-valuespecs.
# Each sub-valuespec can be added only once
class ListOfMultiple(ValueSpec):
def __init__(self, choices, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._choices = choices
self._choice_dict = dict(choices)
self._size = kwargs.get("size")
self._add_label = kwargs.get("add_label", _("Add element"))
self._del_label = kwargs.get("del_label", _("Delete this entry"))
self._delete_style = kwargs.get("delete_style", "default") # or "filter"
def del_button(self, varprefix, ident):
js = "vs_listofmultiple_del('%s', '%s')" % (varprefix, ident)
html.icon_button("#", self._del_label, "delete", onclick=js)
def render_input(self, varprefix, value):
self.classtype_info()
# Beware: the 'value' is only the default value in case the form
# has not yet been filled in. In the complain phase we must
# ignore 'value' but reuse the input from the HTML variables -
# even if they are not syntactically correct. Calling from_html_vars
# here is *not* an option since this might not work in case of
# a wrong user input.
# Special styling for filters
extra_css = "filter" if self._delete_style == "filter" else None
# In the 'complain' phase, where the user already saved the
# form but the validation failed, we must not display the
# original 'value' but take the value from the HTML variables.
if html.var("%s_active" % varprefix):
value = self.from_html_vars(varprefix)
# Save all selected items
html.hidden_field('%s_active' % varprefix,
';'.join([ k for k in value.keys() if k in self._choice_dict]),
id = '%s_active' % varprefix, add_var = True)
# Actual table of currently existing entries
html.open_table(id_="%s_table" % varprefix, class_=["valuespec_listof", extra_css])
def render_content():
html.open_td( class_=["vlof_content", extra_css])
vs.render_input(prefix, value.get(ident))
html.close_td()
def render_del():
html.open_td(class_=["vlof_buttons", extra_css])
self.del_button(varprefix, ident)
html.close_td()
for ident, vs in self._choices:
cls = 'unused' if ident not in value else ''
prefix = varprefix + '_' + ident
html.open_tr(id_="%s_row" % prefix, class_=cls)
if self._delete_style == "filter":
render_content()
render_del()
else:
render_del()
render_content()
html.close_tr()
html.close_table()
html.br()
choices = [('', '')] + [ (ident, vs.title()) for ident, vs in self._choices ]
html.dropdown(varprefix + '_choice', choices,
style="width: %dex" % self._size if self._size is not None else None,
class_="vlof_filter" if self._delete_style == "filter" else None)
html.javascript('vs_listofmultiple_init(\'%s\');' % varprefix)
html.jsbutton(varprefix + '_add', self._add_label, "vs_listofmultiple_add('%s')" % varprefix)
def canonical_value(self):
return {}
def value_to_text(self, value):
table_content = HTML()
for ident, val in value:
vs = self._choice_dict[ident]
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
table_content += html.render_tr(html.render_td(vs.title())\
+ html.render_td( HTML(vs.value_to_text(val)) ))
return "%s" % html.render_table(table_content)
def from_html_vars(self, varprefix):
value = {}
active = html.var('%s_active' % varprefix).strip()
if not active:
return value
for ident in active.split(';'):
vs = self._choice_dict[ident]
value[ident] = vs.from_html_vars(varprefix + '_' + ident)
return value
def validate_datatype(self, value, varprefix):
if type(value) != dict:
raise MKUserError(varprefix, _("The type must be dict, but is %s") % type_name(value))
for ident, val in value.items():
self._choice_dict[ident].validate_datatype(val, varprefix + '_' + ident)
def validate_value(self, value, varprefix):
for ident, val in value.items():
self._choice_dict[ident].validate_value(val, varprefix + '_' + ident)
ValueSpec.custom_validate(self, value, varprefix)
# Same but for floating point values
class Float(Integer):
def __init__(self, **kwargs):
Integer.__init__(self, **kwargs)
self._decimal_separator = kwargs.get("decimal_separator", ".")
self._display_format = kwargs.get("display_format", "%.2f")
self._allow_int = kwargs.get("allow_int", False)
def render_input(self, varprefix, value):
self.classtype_info()
Integer.render_input(self, varprefix, value, convfunc = utils.savefloat)
def canonical_value(self):
return float(Integer.canonical_value(self))
def value_to_text(self, value):
return Integer.value_to_text(self, value).replace(".", self._decimal_separator)
def from_html_vars(self, varprefix):
try:
return float(html.var(varprefix))
except:
raise MKUserError(varprefix,
_("The text <b><tt>%s</tt></b> is not a valid floating point number.") % html.var(varprefix))
def validate_datatype(self, value, varprefix):
if type(value) == float:
return
if type(value) in [ int, long ] and self._allow_int:
return
raise MKUserError(varprefix, _("The value %r has type %s, but must be of type float%s") %
(value, type_name(value), _(" or int") if self._allow_int else ''))
class Percentage(Float):
def __init__(self, **kwargs):
Float.__init__(self, **kwargs)
if "minvalue" not in kwargs:
self._minvalue = 0.0
if "maxvalue" not in kwargs:
self._maxvalue = 101.0
if "unit" not in kwargs:
self._unit = "%"
if "display_format" not in kwargs:
self._display_format = "%.1f"
self._allow_int = kwargs.get("allow_int", False)
def value_to_text(self, value):
return (self._display_format + "%%") % value
def validate_datatype(self, value, varprefix):
if self._allow_int:
if type(value) not in [ int, float ]:
raise MKUserError(varprefix, _("The value %r has type %s, but must be either float or int")
% (value, type_name(value)))
else:
Float.validate_datatype(self, value, varprefix)
class Checkbox(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._label = kwargs.get("label")
self._true_label = kwargs.get("true_label", _("on"))
self._false_label = kwargs.get("false_label", _("off"))
self._onclick = kwargs.get("onclick")
def canonical_value(self):
return False
def render_input(self, varprefix, value):
self.classtype_info()
html.checkbox(varprefix, value, label=self._label, onclick=self._onclick)
def value_to_text(self, value):
return self._true_label if value else self._false_label
def from_html_vars(self, varprefix):
if html.var(varprefix):
return True
else:
return False
def validate_datatype(self, value, varprefix):
if type(value) != bool:
raise MKUserError(varprefix, _("The value %r has type %s, but must be of type bool") %
(value, type_name(value)))
# A type-save dropdown choice. Parameters:
# help_separator: if you set this to a character, e.g. "-", then
# value_to_texg will omit texts from the character up to the end of
# a choices name.
# Note: The list of choices may contain 2-tuples or 3-tuples.
# The format is (value, text {, icon} )
# choices may also be a function that returns - when called
# wihtout arguments - such a tuple list. That way the choices
# can by dynamically computed
class DropdownChoice(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._choices = kwargs["choices"]
self._help_separator = kwargs.get("help_separator")
self._label = kwargs.get("label")
self._prefix_values = kwargs.get("prefix_values", False)
self._sorted = kwargs.get("sorted", False)
self._empty_text = kwargs.get("empty_text", _("There are no elements defined for this selection yet."))
self._invalid_choice = kwargs.get("invalid_choice", "complain") # also possible: "replace"
self._invalid_choice_title = kwargs.get("invalid_choice_title", _("Element '%r' does not exist anymore"))
self._invalid_choice_error = kwargs.get("invalid_choice_error",
_("The selected element is not longer available. Please select something else."))
self._no_preselect = kwargs.get("no_preselect", False)
self._no_preselect_value = kwargs.get("no_preselect_value", None)
self._no_preselect_title = kwargs.get("no_preselect_title", "") # if not preselected
self._no_preselect_error = kwargs.get("no_preselect_error", _("Please make a selection"))
self._on_change = kwargs.get("on_change")
self._read_only = kwargs.get("read_only", False)
def choices(self):
result = []
if type(self._choices) == list:
result = self._choices
elif type(self._choices) == dict:
result = ListChoice.dict_choices(self._choices)
else:
result = self._choices()
if self._no_preselect:
return [(self._no_preselect_value, self._no_preselect_title)] + result
else:
return result
def canonical_value(self):
choices = self.choices()
if len(choices) > 0:
return choices[0][0]
else:
return None
def render_input(self, varprefix, value):
self.classtype_info()
if self._label:
html.write("%s " % self._label)
choices = self.choices()
defval = choices[0][0] if choices else None
options = []
for entry in self.choices():
if self._prefix_values:
entry = (entry[0], "%s - %s" % entry)
options.append(entry)
if entry[0] == value:
defval = entry[0]
# In complain mode: Use the value received from the HTML variable
if self._invalid_choice == "complain" and value != None and self._value_is_invalid(value):
defval = value
options.append((defval, self._get_invalid_choice_title(value)))
if value == None and not options:
html.write(self._empty_text)
return
if len(options) == 0:
html.write(self._empty_text)
elif len(options[0]) == 3:
html.icon_dropdown(varprefix, self._options_for_html(options),
deflt=self.option_id(defval))
else:
html.dropdown(varprefix, self._options_for_html(options),
deflt=self.option_id(defval),
onchange=self._on_change,
sorted=self._sorted,
read_only=self._read_only)
def _get_invalid_choice_title(self, value):
if "%s" in self._invalid_choice_title or "%r" in self._invalid_choice_title:
return self._invalid_choice_title % (value,)
else:
return self._invalid_choice_title
def value_to_text(self, value):
for entry in self.choices():
val, title = entry[:2]
if value == val:
if self._help_separator:
return html.attrencode(title.split(self._help_separator, 1)[0].strip())
else:
return html.attrencode(title)
return html.attrencode(self._get_invalid_choice_title(value))
def from_html_vars(self, varprefix):
choices = self.choices()
for n, entry in enumerate(choices):
val, title = entry[:2]
if self._is_selected_option_from_html(varprefix, val):
return val
if self._invalid_choice == "replace":
return self.default_value() # garbled URL or len(choices) == 0
else:
raise MKUserError(varprefix, self._invalid_choice_error)
def _is_selected_option_from_html(self, varprefix, val):
selected_value = html.var(varprefix)
return selected_value == self.option_id(val)
def _options_for_html(self, orig_options):
options = []
for val, title in orig_options:
options.append((self.option_id(val), title))
return options
@staticmethod
def option_id(val):
return "%s" % hashlib.sha256(repr(val)).hexdigest()
def validate_value(self, value, varprefix):
if self._no_preselect and value == self._no_preselect_value:
raise MKUserError(varprefix, self._no_preselect_error)
if self._invalid_choice == "complain" and self._value_is_invalid(value):
if value != None:
raise MKUserError(varprefix, self._invalid_choice_error)
else:
raise MKUserError(varprefix, _("There is no element available to choose from."))
ValueSpec.custom_validate(self, value, varprefix)
def validate_datatype(self, value, varprefix):
choices = self.choices()
if not choices and value == None:
return
for val, title in choices:
if val == value:
return
#raise MKUserError(varprefix, _("Invalid value %s, must be in %s") %
# (value, ", ".join([v for (v,t) in choices])))
def _value_is_invalid(self, value):
for entry in self.choices():
if entry[0] == value:
return False
return True
# Special conveniance variant for monitoring states
# TODO: Rename to ServiceState() or something like this
class MonitoringState(DropdownChoice):
def __init__(self, **kwargs):
choices = [ ( 0, _("OK")),
( 1, _("WARN")),
( 2, _("CRIT")),
( 3, _("UNKNOWN")) ]
kwargs.setdefault("default_value", 0)
DropdownChoice.__init__(self, choices=choices, **kwargs)
class HostState(DropdownChoice):
def __init__(self, **kwargs):
choices = [
( 0, _("UP")),
( 1, _("DOWN")),
( 2, _("UNREACHABLE")),
]
kwargs.setdefault("default_value", 0)
DropdownChoice.__init__(self, choices=choices, **kwargs)
# A Dropdown choice where the elements are ValueSpecs.
# The currently selected ValueSpec will be displayed.
# The text representations of the ValueSpecs will be used as texts.
# A ValueSpec of None is also allowed and will return
# the value None. It is also allowed to leave out the
# value spec for some of the choices (which is the same as
# using None).
# The resulting value is either a single value (if no
# value spec is defined for the selected entry) or a pair
# of (x, y) where x is the value of the selected entry and
# y is the value of the valuespec assigned to that entry.
# choices is a list of triples: [ ( value, title, vs ), ... ]
class CascadingDropdown(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
if type(kwargs["choices"]) == list:
self._choices = self.normalize_choices(kwargs["choices"])
else:
self._choices = kwargs["choices"] # function, store for later
self._separator = kwargs.get("separator", ", ")
self._sorted = kwargs.get("sorted", True)
self._orientation = kwargs.get("orientation", "vertical") # or horizontal
if kwargs.get("encoding", "tuple") == "list":
self._encoding_type = list
else:
self._encoding_type = tuple
self._no_elements_text = kwargs.get("no_elements_text",
_("There are no elements defined for this selection"))
self._no_preselect = kwargs.get("no_preselect", False)
self._no_preselect_value = kwargs.get("no_preselect_value", None)
self._no_preselect_title = kwargs.get("no_preselect_title", "") # if not preselected
self._no_preselect_error = kwargs.get("no_preselect_error", _("Please make a selection"))
def normalize_choices(self, choices):
new_choices = []
for entry in choices:
if len(entry) == 2: # plain entry with no sub-valuespec
entry = entry + (None,) # normlize to three entries
new_choices.append(entry)
return new_choices
def choices(self):
if type(self._choices) == list:
result = self._choices
else:
result = self.normalize_choices(self._choices())
if self._no_preselect:
result = [(self._no_preselect_value, self._no_preselect_title, None)] \
+ result
return result
def canonical_value(self):
choices = self.choices()
if not choices:
return None
if choices[0][2]:
return self._encoding_type((choices[0][0], choices[0][2].canonical_value()))
else:
return choices[0][0]
def default_value(self):
try:
return self._default_value
except:
choices = self.choices()
if not choices:
return None
if choices[0][2]:
return self._encoding_type((choices[0][0], choices[0][2].default_value()))
else:
return choices[0][0]
def render_input(self, varprefix, value):
self.classtype_info()
def_val = '0'
options = []
choices = self.choices()
if not choices:
html.write(self._no_elements_text)
return
for nr, (val, title, vs) in enumerate(choices):
options.append((str(nr), title))
# Determine the default value for the select, so the
# the dropdown pre-selects the line corresponding with value.
# Note: the html.dropdown() with automatically show the modified
# selection, if the HTML variable varprefix_sel aleady
# exists.
if value == val or (
type(value) == self._encoding_type and value[0] == val):
def_val = str(nr)
vp = varprefix + "_sel"
onchange="valuespec_cascading_change(this, '%s', %d);" % (varprefix, len(choices))
html.dropdown(vp, options, deflt=def_val, onchange=onchange, sorted=self._sorted)
# make sure, that the visibility is done correctly, in both
# cases:
# 1. Form painted for the first time (no submission yet, vp missing in URL)
# 2. Form already submitted -> honor URL variable vp for visibility
cur_val = html.var(vp)
if self._orientation == "vertical":
html.br()
else:
html.nbsp()
for nr, (val, title, vs) in enumerate(choices):
if vs:
vp = varprefix + "_%d" % nr
# Form already submitted once (and probably in complain state)
if cur_val != None:
try:
def_val_2 = vs.from_html_vars(vp)
except MKUserError:
def_val_2 = vs.default_value()
if cur_val == str(nr):
disp = ""
else:
disp = "none"
else: # form painted the first time
if value == val \
or (type(value) == self._encoding_type and value[0] == val):
if type(value) == self._encoding_type:
def_val_2 = value[1]
else:
def_val_2 = vs.default_value()
disp = ""
else:
def_val_2 = vs.default_value()
disp = "none"
html.open_span(id_="%s_%s_sub" % (varprefix, nr), style="display:%s;" % disp)
html.help(vs.help())
vs.render_input(vp, def_val_2)
html.close_span()
def value_to_text(self, value):
choices = self.choices()
for val, title, vs in choices:
if (vs and value and value[0] == val) or \
(value == val):
if not vs:
return title
else:
return title + self._separator + \
vs.value_to_text(value[1])
return "" # Nothing selected? Should never happen
def from_html_vars(self, varprefix):
choices = self.choices()
# No choices and "no elements text" is shown: The html var is
# not present and no choice can be made. So fallback to default
# value and let the validation methods lead to an error message.
if not choices:
return self.default_value()
try:
sel = int(html.var(varprefix + "_sel"))
except:
sel = 0
val, title, vs = choices[sel]
if vs:
val = self._encoding_type((val, vs.from_html_vars(varprefix + "_%d" % sel)))
return val
def validate_datatype(self, value, varprefix):
choices = self.choices()
for nr, (val, title, vs) in enumerate(choices):
if value == val or (
type(value) == self._encoding_type and value[0] == val):
if vs:
if type(value) != self._encoding_type or len(value) != 2:
raise MKUserError(varprefix + "_sel",
_("Value must be a %s with two elements.") % self._encoding_type.__name__)
vs.validate_datatype(value[1], varprefix + "_%d" % nr)
return
raise MKUserError(varprefix, _("Value %r is not allowed here.") % value)
def validate_value(self, value, varprefix):
if self._no_preselect and value == self._no_preselect_value:
raise MKUserError(varprefix, self._no_preselect_error)
choices = self.choices()
for nr, (val, title, vs) in enumerate(choices):
if value == val or (
type(value) == self._encoding_type and value[0] == val):
if vs:
vs.validate_value(value[1], varprefix + "_%d" % nr)
ValueSpec.custom_validate(self, value, varprefix)
return
raise MKUserError(varprefix, _("Value %r is not allowed here.") % (value, ))
# The same logic as the dropdown choice, but rendered
# as a group of radio buttons.
# columns == None or unset -> separate with " "
class RadioChoice(DropdownChoice):
def __init__(self, **kwargs):
DropdownChoice.__init__(self, **kwargs)
self._columns = kwargs.get("columns")
# Allow orientation as corner cases of columns
orientation = kwargs.get("orientation")
if orientation == "vertical":
self._columns = 1
elif orientation == "horizontal":
self._columns = 9999999
def render_input(self, varprefix, value):
self.classtype_info()
html.begin_radio_group()
if self._columns != None:
html.open_table(class_=["radiochoice"])
html.open_tr()
if self._sorted:
choices = self._choices[:]
choices.sort(cmp=lambda a,b: cmp(a[1], b[1]))
else:
choices = self._choices
for index, entry in enumerate(choices):
if self._columns != None:
html.open_td()
if len(entry) > 2 and entry[2] != None: # icon!
label = html.render_icon(entry[2], entry[1])
else:
label = entry[1]
html.radiobutton(varprefix, self.option_id(entry[0]), value == entry[0], label)
if len(entry) > 3 and entry[3]:
html.open_p()
html.write(entry[3])
html.close_p()
if self._columns != None:
html.close_td()
if (index+1) % self._columns == 0 and (index+1) < len(self._choices):
html.tr('')
else:
html.nbsp()
if self._columns != None:
mod = len(self._choices) % self._columns
if mod:
for td_counter in range(self._columns - mod - 1):
html.td('')
html.close_tr()
html.close_table()
html.end_radio_group()
# A list of checkboxes representing a list of values
class ListChoice(ValueSpec):
@staticmethod
def dict_choices(types):
return [ ("%s" % type_id, "%d - %s" % (type_id, type_name))
for (type_id, type_name) in sorted(types.items()) ]
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._choices = kwargs.get("choices")
self._columns = kwargs.get("columns", 1)
self._allow_empty = kwargs.get("allow_empty", True)
self._empty_text = kwargs.get("empty_text", _("(nothing selected)"))
self._loaded_at = None
self._render_function = kwargs.get("render_function",
lambda id, val: val)
self._toggle_all = kwargs.get("toggle_all", False)
self._render_orientation = kwargs.get("render_orientation", "horizontal") # other: vertical
self._no_elements_text = kwargs.get("no_elements_text",
_("There are no elements defined for this selection"))
# In case of overloaded functions with dynamic elements
def load_elements(self):
if self._choices != None:
if type(self._choices) == list:
self._elements = self._choices
elif type(self._choices) == dict:
self._elements = ListChoice.dict_choices(self._choices)
else:
self._elements = self._choices()
return
if self._loaded_at != id(html):
self._elements = self.get_elements()
self._loaded_at = id(html) # unique for each query!
def get_elements(self):
raise NotImplementedError()
def canonical_value(self):
return []
def _draw_listchoice(self, varprefix, value, elements, columns, toggle_all):
if self._toggle_all:
html.a(_("Check / Uncheck all"), href="javascript:vs_list_choice_toggle_all('%s')" % varprefix)
html.open_table(id_="%s_tbl" % varprefix, class_=["listchoice"])
for nr, (key, title) in enumerate(elements):
if nr % self._columns == 0:
if nr > 0:
html.close_tr()
html.open_tr()
html.open_td()
html.checkbox("%s_%d" % (varprefix, nr), key in value, label=title)
html.close_td()
html.close_tr()
html.close_table()
def render_input(self, varprefix, value):
self.classtype_info()
self.load_elements()
if not self._elements:
html.write(self._no_elements_text)
return
self._draw_listchoice(varprefix, value, self._elements, self._columns, self._toggle_all)
# Make sure that at least one variable with the prefix is present
html.hidden_field(varprefix, "1", add_var=True)
def value_to_text(self, value):
if not value:
return self._empty_text
self.load_elements()
d = dict(self._elements)
texts = [ self._render_function(v, d.get(v,v)) for v in value ]
if self._render_orientation == "horizontal":
return ", ".join(texts)
else:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
return "%s" % html.render_table(html.render_tr(html.render_td(html.render_br().join( map(lambda x: HTML(x), texts) ))))
#OLD: return "<table><tr><td>" + "<br>".join(texts) + "</td></tr></table>"
def from_html_vars(self, varprefix):
self.load_elements()
value = []
for nr, (key, title) in enumerate(self._elements):
if html.get_checkbox("%s_%d" % (varprefix, nr)):
value.append(key)
return value
def validate_datatype(self, value, varprefix):
self.load_elements()
if type(value) != list:
raise MKUserError(varprefix, _("The datatype must be list, but is %s") % type_name(value))
for v in value:
if self._value_is_invalid(v):
raise MKUserError(varprefix, _("%s is not an allowed value") % v)
def validate_value(self, value, varprefix):
if not self._allow_empty and not value:
raise MKUserError(varprefix, _('You have to select at least one element.'))
ValueSpec.custom_validate(self, value, varprefix)
def _value_is_invalid(self, value):
d = dict(self._elements)
return value not in d
# A alternative way of editing list choices
class MultiSelect(ListChoice):
def __init__(self, **kwargs):
ListChoice.__init__(self, **kwargs)
def _render_field(self, name, choices, selected=None):
if selected is None:
selected = []
html.open_select(multiple="", name=name)
for key, title in choices:
html.option(title, value=key, selected='' if key in selected else None)
html.close_select()
def render_input(self, varprefix, value):
self.classtype_info()
self.load_elements()
self._render_field(varprefix, self._elements, value)
def from_html_vars(self, varprefix):
self.load_elements()
value = []
hv = html.list_var(varprefix)
for key, title in self._elements:
if key in hv:
value.append(key)
return value
# Implements a choice of items which is realized with
# two ListChoices select fields. One contains all available
# items and one contains all selected items.
# Optionally you can have the user influance the order of
# the entries by simply clicking them in a certain order.
# If that feature is not being used, then the original order
# of the elements is always being kept.
# TODO: Beware: the keys in this choice are not type safe.
# They can only be strings. They must not contain | or other
# dangerous characters. We should fix this and make it this
# compatible to DropdownChoice()
class DualListChoice(ListChoice):
def __init__(self, **kwargs):
super(DualListChoice, self).__init__(**kwargs)
self._autoheight = kwargs.get("autoheight", False)
self._custom_order = kwargs.get("custom_order", False)
self._instant_add = kwargs.get("instant_add", False)
self._enlarge_active = kwargs.get("enlarge_active", False)
if "rows" in kwargs:
self._rows = kwargs.get("rows", 5)
self._autoheight = False
else:
self._rows = 5
self._size = kwargs.get("size") # Total width in ex
def render_input(self, varprefix, value):
self.classtype_info()
self.load_elements()
if not self._elements:
html.write_text(_("There are no elements for selection."))
return
# Use values from HTTP request in complain mode
if value is None:
value = self.from_html_vars(varprefix)
selected = []
unselected = []
if self._custom_order:
edict = dict(self._elements)
allowed_keys = edict.keys()
for v in value:
if v in allowed_keys:
selected.append((v, edict[v]))
for v, name in self._elements:
if v not in value:
unselected.append((v, edict[v]))
else:
for e in self._elements:
if e[0] in value:
selected.append(e)
else:
unselected.append(e)
select_func = 'vs_duallist_switch(\'unselected\', \'%s\', %d);' % (varprefix, 1 if self._custom_order else 0)
unselect_func = 'vs_duallist_switch(\'selected\', \'%s\', 1);' % varprefix
html.open_table(class_=["vs_duallist"], style = "width: %dpx;" % (self._size * 6.4) if self._size else None)
html.open_tr()
html.open_td(class_="head")
html.write_text(_('Available'))
if not self._instant_add:
html.a(">", href="javascript:%s;" % select_func, class_=["control", "add"])
html.close_td()
html.open_td(class_="head")
html.write_text(_('Selected'))
if not self._instant_add:
html.a("<", href="javascript:%s;" % unselect_func, class_=["control", "del"])
html.close_td()
html.close_tr()
onchange_unselected = select_func if self._instant_add else ''
onchange_selected = unselect_func if self._instant_add else ''
if self._enlarge_active:
onchange_selected = 'vs_duallist_enlarge(\'selected\', \'%s\');' % varprefix
onchange_unselected = 'vs_duallist_enlarge(\'unselected\', \'%s\');' % varprefix
attrs = {
'multiple' : 'multiple',
'style' : 'height:auto' if self._autoheight else "height: %dpx" % (self._rows * 16),
'ondblclick' : select_func if not self._instant_add else '',
}
html.open_tr()
html.open_td()
attrs["onchange"] = onchange_unselected
html.multi_select(varprefix + '_unselected', unselected, deflt='', sorted=self._custom_order, **attrs)
html.close_td()
html.open_td()
attrs["onchange"] = onchange_selected
html.multi_select(varprefix + '_selected', selected, deflt='', sorted=self._custom_order, **attrs)
html.close_td()
html.close_tr()
html.close_table()
html.hidden_field(varprefix, '|'.join([k for k, v in selected]), id = varprefix, add_var = True)
def validate_value(self, value, varprefix):
try:
ListChoice.validate_value(self, value, varprefix)
except MKUserError, e:
raise MKUserError(e.varname + "_selected", e.message)
def from_html_vars(self, varprefix):
self.load_elements()
selected = html.var(varprefix, '').split('|')
value = []
if self._custom_order:
edict = dict(self._elements)
allowed_keys = edict.keys()
for v in selected:
if v in allowed_keys:
value.append(v)
else:
for key, title in self._elements:
if key in selected:
value.append(key)
return value
# A type-save dropdown choice with one extra field that
# opens a further value spec for entering an alternative
# Value.
class OptionalDropdownChoice(DropdownChoice):
def __init__(self, **kwargs):
DropdownChoice.__init__(self, **kwargs)
self._explicit = kwargs["explicit"]
self._otherlabel = kwargs.get("otherlabel", _("Other"))
def canonical_value(self):
return self._explicit.canonical_value()
def value_is_explicit(self, value):
return value not in [ c[0] for c in self.choices() ]
def render_input(self, varprefix, value):
self.classtype_info()
defval = "other"
options = []
for n, (val, title) in enumerate(self.choices()):
options.append((str(n), title))
if val == value:
defval = str(n)
if self._sorted:
options.sort(cmp = lambda a,b: cmp(a[1], b[1]))
options.append(("other", self._otherlabel))
html.dropdown(varprefix, options, deflt=defval, # style="float:left;",
onchange="valuespec_toggle_dropdown(this, '%s_ex');" % varprefix)
if html.has_var(varprefix):
div_is_open = html.var(varprefix) == "other"
else:
div_is_open = self.value_is_explicit(value)
html.open_span(id_="%s_ex" % varprefix, style=["white-space: nowrap;", None if div_is_open else "display:none;"])
html.nbsp()
if defval == "other":
input_value = value
else:
input_value = self._explicit.default_value()
html.help(self._explicit.help())
self._explicit.render_input(varprefix + "_ex", input_value)
html.close_span()
def value_to_text(self, value):
for val, title in self.choices():
if val == value:
return title
return self._explicit.value_to_text(value)
def from_html_vars(self, varprefix):
choices = self.choices()
sel = html.var(varprefix)
if sel == "other":
return self._explicit.from_html_vars(varprefix + "_ex")
for n, (val, title) in enumerate(choices):
if sel == str(n):
return val
return choices[0][0] # can only happen if user garbled URL
def validate_value(self, value, varprefix):
if self.value_is_explicit(value):
self._explicit.validate_value(value, varprefix)
# else valid_datatype already has made the job
ValueSpec.custom_validate(self, value, varprefix)
def validate_datatype(self, value, varprefix):
for val, title in self.choices():
if val == value:
return
self._explicit.validate_datatype(value, varprefix + "_ex")
# Input of date with optimization for nearby dates
# in the future. Useful for example for alarms. The
# date is represented by a UNIX timestamp where the
# seconds are silently ignored.
def round_date(t):
return int(t) / seconds_per_day * seconds_per_day
def today():
return round_date(time.time())
class Weekday(DropdownChoice):
def __init__(self, **kwargs):
kwargs['choices'] = sorted(defines.weekdays().items())
DropdownChoice.__init__(self, **kwargs)
class RelativeDate(OptionalDropdownChoice):
def __init__(self, **kwargs):
choices = [
(0, _("today")),
(1, _("tomorrow"))
]
weekday = time.localtime(today()).tm_wday
for w in range(2, 7):
wd = (weekday + w) % 7
choices.append((w, defines.weekday_name(wd)))
for w in range(0, 7):
wd = (weekday + w) % 7
if w < 2:
title = _(" next week")
else:
title = _(" in %d days") % (w + 7)
choices.append((w + 7, defines.weekday_name(wd) + title))
kwargs['choices'] = choices
kwargs['explicit'] = Integer()
kwargs['otherlabel'] = _("in ... days")
OptionalDropdownChoice.__init__(self, **kwargs)
if "default_days" in kwargs:
self._default_value = kwargs["default_days"] * seconds_per_day + today()
else:
self._default_value = today()
def canonical_value(self):
return self._default_value
def render_input(self, varprefix, value):
self.classtype_info()
reldays = (round_date(value) - today()) / seconds_per_day
OptionalDropdownChoice.render_input(self, varprefix, reldays)
def value_to_text(self, value):
reldays = (round_date(value) - today()) / seconds_per_day
if reldays == -1:
return _("yesterday")
elif reldays == -2:
return _("two days ago")
elif reldays < 0:
return _("%d days ago") % -reldays
elif reldays < len(self._choices):
return self._choices[reldays][1]
else:
return _("in %d days") % reldays
def from_html_vars(self, varprefix):
reldays = OptionalDropdownChoice.from_html_vars(self, varprefix)
return today() + reldays * seconds_per_day
def validate_datatype(self, value, varprefix):
if type(value) not in [ float, int ]:
raise MKUserError(varprefix, _("Date must be a number value"))
def validate_value(self, value, varprefix):
ValueSpec.custom_validate(self, value, varprefix)
# A ValueSpec for editing a date. The date is
# represented as a UNIX timestamp x where x % seconds_per_day
# is zero (or will be ignored if non-zero), as long as
# include_time is not set to True
class AbsoluteDate(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._show_titles = kwargs.get("show_titles", True)
self._label = kwargs.get("label")
self._include_time = kwargs.get("include_time", False)
self._format = kwargs.get("format", "%F %T" if self._include_time else "%F")
self._default_value = kwargs.get("default_value", None)
self._allow_empty = kwargs.get('allow_empty', False)
# The default is that "None" means show current date/time in the
# input fields. This option changes the input fields to be empty by default
# and makes the value able to be None when no time is set.
# FIXME: Shouldn't this be the default?
self._none_means_empty = kwargs.get("none_means_empty", False)
def default_value(self):
if self._default_value != None:
return self._default_value
else:
if self._allow_empty:
return None
if self._include_time:
return time.time()
else:
return today()
def canonical_value(self):
return self.default_value()
def split_date(self, value):
if self._none_means_empty and value == None:
return (None,) * 6
lt = time.localtime(value)
return lt.tm_year, lt.tm_mon, lt.tm_mday, \
lt.tm_hour, lt.tm_min, lt.tm_sec
def render_input(self, varprefix, value):
self.classtype_info()
if self._label:
html.write("%s" % self._label)
html.nbsp()
year, month, day, hour, mmin, sec = self.split_date(value)
values = [ ("_year", year, 4),
("_month", month, 2),
("_day", day, 2)]
if self._include_time:
values += [ None,
("_hour", hour, 2),
("_min", mmin, 2),
("_sec", sec, 2)]
if not self._show_titles:
titles = [_("Year"), _("Month"), _("Day")]
if self._include_time:
titles += ['', _("Hour"), _("Minute"), _("Sec.")]
html.open_table(class_=["vs_date"])
html.open_tr()
map(html.th, titles)
html.close_tr()
html.open_tr()
for val in values:
html.open_td()
html.nbsp() if val is None else\
html.number_input(varprefix + val[0], val[1], size=val[2])
html.close_td()
html.close_tr()
html.close_table()
else:
for count, val in enumerate(values):
if count > 0:
html.write_text(" ")
html.nbsp() if val is None else\
html.number_input(varprefix + val[0], val[1], size=val[2])
def set_focus(self, varprefix):
html.set_focus(varprefix + "_year")
def value_to_text(self, value):
return time.strftime(self._format, time.localtime(value))
def from_html_vars(self, varprefix):
parts = []
entries = [
("year", _("year"), 1970, 2038),
("month", _("month"), 1, 12),
("day", _("day"), 1, 31)
]
if self._include_time:
entries += [
("hour", _("hour"), 0, 23),
("min", _("min"), 0, 59),
("sec", _("sec"), 0, 59),
]
for what, title, mmin, mmax in entries:
try:
varname = varprefix + "_" + what
part = int(html.var(varname))
except:
if self._allow_empty:
return None
else:
raise MKUserError(varname, _("Please enter a valid number"))
if part < mmin or part > mmax:
raise MKUserError(varname, _("The value for %s must be between %d and %d") %
(title, mmin, mmax))
parts.append(part)
# Construct broken time from input fields. Assume no-dst
parts += [0] * (3 if self._include_time else 6)
# Convert to epoch
epoch = time.mktime(tuple(parts))
# Convert back to localtime in order to know DST setting
localtime = time.localtime(epoch)
# Enter DST setting of that time
parts[-1] = localtime.tm_isdst
# Convert to epoch again
return time.mktime(tuple(parts))
def validate_datatype(self, value, varprefix):
if value == None and self._allow_empty:
return
if type(value) not in [ int, float ]:
raise MKUserError(varprefix, _("The type of the timestamp must be int or float, but is %s") %
type_name(value))
def validate_value(self, value, varprefix):
if (not self._allow_empty and value == None) or value < 0 or int(value) > (2**31-1):
return MKUserError(varprefix, _("%s is not a valid UNIX timestamp") % value)
ValueSpec.custom_validate(self, value, varprefix)
# Valuespec for entering times like 00:35 or 16:17. Currently
# no seconds are supported. But this could easily be added.
# The value itself is stored as a pair of integers, a.g.
# (0, 35) or (16, 17). If the user does not enter a time
# the vs will return None.
class Timeofday(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._allow_24_00 = kwargs.get("allow_24_00", False)
self._allow_empty = kwargs.get("allow_empty", True)
def canonical_value(self):
if self._allow_empty:
return None
else:
return (0, 0)
def render_input(self, varprefix, value):
self.classtype_info()
text = ("%02d:%02d" % value) if value else ''
html.text_input(varprefix, text, size = 5)
def value_to_text(self, value):
if value == None:
return ""
else:
return "%02d:%02d" % value
def from_html_vars(self, varprefix):
# Fully specified
text = html.var(varprefix, "").strip()
if not text:
return None
if re.match("^(24|[0-1][0-9]|2[0-3]):[0-5][0-9]$", text):
return tuple(map(int, text.split(":")))
# only hours
try:
b = int(text)
return (b, 0)
except:
raise MKUserError(varprefix,
_("Invalid time format '<tt>%s</tt>', please use <tt>24:00</tt> format.") % text)
def validate_datatype(self, value, varprefix):
if self._allow_empty and value == None:
return
if type(value) != tuple:
raise MKUserError(varprefix, _("The datatype must be tuple, but ist %s") % type_name(value))
if len(value) != 2:
raise MKUserError(varprefix, _("The tuple must contain two elements, but you have %d") % len(value))
for x in value:
if type(x) != int:
raise MKUserError(varprefix, _("All elements of the tuple must be of type int, you have %s") % type_name(x))
def validate_value(self, value, varprefix):
if not self._allow_empty and value == None:
raise MKUserError(varprefix, _("Please enter a time."))
if self._allow_24_00:
max_value = (24, 0)
else:
max_value = (23, 59)
if value > max_value:
raise MKUserError(varprefix, _("The time must not be greater than %02d:%02d.") % max_value)
elif value[0] < 0 or value[1] < 0 or value[0] > 24 or value[1] > 59:
raise MKUserError(varprefix, _("Hours/Minutes out of range"))
ValueSpec.custom_validate(self, value, varprefix)
# Range like 00:15 - 18:30
class TimeofdayRange(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._allow_empty = kwargs.get("allow_empty", True)
self._bounds = (
Timeofday(allow_empty = self._allow_empty,
allow_24_00 = True),
Timeofday(allow_empty = self._allow_empty,
allow_24_00 = True),
)
def canonical_value(self):
if self._allow_empty:
return None
else:
return (0, 0), (24, 0)
def render_input(self, varprefix, value):
self.classtype_info()
if value == None:
value = (None, None)
self._bounds[0].render_input(varprefix + "_from", value[0])
html.nbsp()
html.write_text("-")
html.nbsp()
self._bounds[1].render_input(varprefix + "_until", value[1])
def value_to_text(self, value):
if value == None:
return ""
else:
return self._bounds[0].value_to_text(value[0]) + "-" + \
self._bounds[1].value_to_text(value[1])
def from_html_vars(self, varprefix):
from_value = self._bounds[0].from_html_vars(varprefix + "_from")
until_value = self._bounds[1].from_html_vars(varprefix + "_until")
if (from_value == None) != (until_value == None):
raise MKUserError(varprefix + "_from", _("Please leave either both from and until empty or enter two times."))
if from_value == None:
return None
else:
return (from_value, until_value)
def validate_datatype(self, value, varprefix):
if self._allow_empty and value == None:
return
if type(value) != tuple:
raise MKUserError(varprefix, _("The datatype must be tuple, but ist %s") % type_name(value))
if len(value) != 2:
raise MKUserError(varprefix, _("The tuple must contain two elements, but you have %d") % len(value))
self._bounds[0].validate_datatype(value[0], varprefix + "_from")
self._bounds[1].validate_datatype(value[1], varprefix + "_until")
def validate_value(self, value, varprefix):
if value == None:
if self._allow_empty:
return
else:
raise MKUserError(varprefix + "_from", _("Please enter a valid time of day range"))
return
self._bounds[0].validate_value(value[0], varprefix + "_from")
self._bounds[1].validate_value(value[1], varprefix + "_until")
if value[0] > value[1]:
raise MKUserError(varprefix + "_until", _("The <i>from</i> time must not be later then the <i>until</i> time."))
ValueSpec.custom_validate(self, value, varprefix)
# TODO: Move to cmklib
month_names = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")
]
class TimeHelper(object):
@staticmethod
def round(timestamp, unit):
time_s = list(time.localtime(timestamp))
time_s[3] = time_s[4] = time_s[5] = 0
time_s[8] = -1
if unit == 'd':
return time.mktime(time_s)
elif unit == 'w':
days = time_s[6] # 0 based
elif unit == 'm':
days = time_s[2] - 1 # 1 based
elif unit == 'y':
days = time_s[7] - 1 # 1 based
else:
raise MKGeneralException("invalid time unit %s" % unit)
return TimeHelper.round(time.mktime(time_s) - days * 86400 + 3600, 'd')
@staticmethod
def add(timestamp, count, unit):
if unit == 'h':
return timestamp + 3600 * count
elif unit == 'd':
return timestamp + 86400 * count
elif unit == 'w':
return timestamp + (7 * 86400) * count
elif unit == 'm':
time_s = list(time.localtime(timestamp))
years, months = divmod(abs(count), 12)
if count < 0:
years *= -1
months *= -1
time_s[0] += years
time_s[1] += months
if time_s[1] <= 0:
time_s[0] -= 1
time_s[1] = 12 - time_s[1]
time_s[8] = -1
return time.mktime(time_s)
elif unit == 'y':
time_s = list(time.localtime(timestamp))
time_s[0] += count
return time.mktime(time_s)
else:
MKGeneralException("invalid time unit %s" % unit)
class Timerange(CascadingDropdown):
def __init__(self, **kwargs):
self._title = _('Time range')
self._allow_empty = kwargs.get("allow_empty", False)
self._include_time = kwargs.get("include_time", False)
self._fixed_choices = kwargs.get("choices", [])
kwargs['choices'] = self._prepare_choices
CascadingDropdown.__init__(self, **kwargs)
def _prepare_choices(self):
choices = list(self._fixed_choices)
if self._allow_empty:
choices += [ (None, '') ]
choices += self._get_graph_timeranges() + [
( "d0", _("Today") ),
( "d1", _("Yesterday") ),
( "w0", _("This week") ),
( "w1", _("Last week") ),
( "m0", _("This month") ),
( "m1", _("Last month") ),
( "y0", _("This year") ),
( "y1", _("Last year") ),
( "age", _("The last..."), Age() ),
( "date", _("Date range"),
Tuple(
orientation = "horizontal",
title_br = False,
elements = [
AbsoluteDate(title = _("From:")),
AbsoluteDate(title = _("To:")),
])),
]
if self._include_time:
choices += [
( "time", _("Date & time range"),
Tuple(
orientation = "horizontal",
title_br = False,
elements = [
AbsoluteDate(
title = _("From:"),
include_time = True,
),
AbsoluteDate(
title = _("To:"),
include_time = True,
),
],
),
)
]
return choices
def _get_graph_timeranges(self):
try:
import config # FIXME
return [ (('age', timerange_attrs["duration"]), timerange_attrs['title'])
for timerange_attrs in config.graph_timeranges ]
except AttributeError: # only available in cee
return [ ( "4h", _("The last 4 hours")),
( "25h", _("The last 25 hours")),
( "8d", _("The last 8 days")),
( "35d", _("The last 35 days")),
( "400d", _("The last 400 days")), ]
def compute_range(self, rangespec):
if rangespec == None:
rangespec = "4h"
# Compatibility with previous versions
elif rangespec[0] == "pnp_view":
rangespec = {
1: "4h",
2: "25h",
3: "8d",
4: "35d",
5: "400d"
}.get(rangespec[1], "4h")
now = time.time()
if rangespec[0] == 'age':
from_time = now - rangespec[1]
until_time = now
title = _("The last ") + Age().value_to_text(rangespec[1])
return (from_time, until_time), title
elif rangespec[0] in [ 'date', 'time' ]:
from_time, until_time = rangespec[1]
if from_time > until_time:
raise MKUserError("avo_rangespec_9_0_year", _("The end date must be after the start date"))
if rangespec[0] == 'date':
# add 25 hours, then round to 00:00 of that day. This accounts for
# daylight-saving time
until_time = TimeHelper.round(TimeHelper.add(until_time, 25, 'h'), 'd')
title = AbsoluteDate().value_to_text(from_time) + " ... " + \
AbsoluteDate().value_to_text(until_time)
return (from_time, until_time), title
else:
until_time = now
if rangespec[0].isdigit(): # 4h, 400d
count = int(rangespec[:-1])
from_time = TimeHelper.add(now, count * -1, rangespec[-1])
unit_name = {
'd': "days",
'h': "hours"
}[rangespec[-1]]
title = _("Last %d %s") % (count, unit_name)
return (from_time, now), title
year, month = time.localtime(now)[:2]
# base time is current time rounded down to the nearest unit (day, week, ...)
from_time = TimeHelper.round(now, rangespec[0])
# derive titles from unit ()
titles = {
'd': (_("Today"), _("Yesterday")),
'w': (_("This week"), _("Last week")),
'y': (str(year), str(year - 1)),
'm': ("%s %d" % (month_names[month - 1], year),
"%s %d" % (month_names[(month + 10) % 12], year - int(month == 1))),
}[rangespec[0]]
if rangespec[1] == '0':
return (from_time, now), titles[0]
else: # last (previous)
prev_time = TimeHelper.add(from_time, -1, rangespec[0])
# add one hour to the calculated time so that if dst started in that period,
# we don't round down a whole day
prev_time = TimeHelper.round(prev_time + 3600, 'd')
return (prev_time, from_time), titles[1]
# A selection of various date formats
def DateFormat(**args):
args.setdefault("title", _("Date format"))
args.setdefault("default_value", "%Y-%m-%d")
args["choices"] = [
("%Y-%m-%d", "1970-12-18"),
("%d.%m.%Y", "18.12.1970"),
("%m/%d/%Y", "12/18/1970"),
("%d.%m.", "18.12."),
("%m/%d", "12/18"),
]
return DropdownChoice(**args)
def TimeFormat(**args):
args.setdefault("title", _("Time format"))
args.setdefault("default_value", "%H:%M:%S")
args["choices"] = [
("%H:%M:%S", "18:27:36"),
("%l:%M:%S %p", "12:27:36 PM"),
("%H:%M", "18:27"),
("%l:%M %p", "6:27 PM"),
("%H", "18"),
("%l %p", "6 PM"),
]
return DropdownChoice(**args)
# Make a configuration value optional, i.e. it may be None.
# The user has a checkbox for activating the option. Example:
# debug_log: it is either None or set to a filename.
class Optional(ValueSpec):
def __init__(self, valuespec, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._valuespec = valuespec
self._label = kwargs.get("label")
self._negate = kwargs.get("negate", False)
self._none_label = kwargs.get("none_label", _("(unset)"))
self._none_value = kwargs.get("none_value", None)
self._sameline = kwargs.get("sameline", False)
self._indent = kwargs.get("indent", True)
def canonical_value(self):
return self._none_value
def render_input(self, varprefix, value):
self.classtype_info()
div_id = "option_" + varprefix
checked = html.get_checkbox(varprefix + "_use")
if checked == None:
if self._negate:
checked = value == self._none_value
else:
checked = value != self._none_value
html.open_span()
if self._label is not None:
label = self._label
elif self.title():
label = self.title()
elif self._negate:
label = _(" Ignore this option")
else:
label = _(" Activate this option")
html.checkbox("%s_use" % varprefix, checked, label=label,
onclick="valuespec_toggle_option(this, %r, %r)" %
(div_id, 1 if self._negate else 0))
if self._sameline:
html.nbsp()
else:
html.br()
html.close_span()
if self._indent:
indent = 40
else:
indent = 0
html.open_span(id_=div_id, style=["margin-left: %dpx;" % indent,
"display:none;" if checked == self._negate else None])
if value == self._none_value:
value = self._valuespec.default_value()
if self._valuespec.title():
html.write(self._valuespec.title() + " ")
self._valuespec.render_input(varprefix + "_value", value)
html.close_span()
def value_to_text(self, value):
if value == self._none_value:
return self._none_label
else:
return self._valuespec.value_to_text(value)
def from_html_vars(self, varprefix):
checkbox_checked = html.get_checkbox(varprefix + "_use") == True # not None or False
if checkbox_checked != self._negate:
return self._valuespec.from_html_vars(varprefix + "_value")
else:
return self._none_value
def validate_datatype(self, value, varprefix):
if value != self._none_value:
self._valuespec.validate_datatype(value, varprefix + "_value")
def validate_value(self, value, varprefix):
if value != self._none_value:
self._valuespec.validate_value(value, varprefix + "_value")
ValueSpec.custom_validate(self, value, varprefix)
# Makes a configuration value optional, while displaying the current
# value as text with a checkbox in front of it. When the checkbox is being checked,
# the text hides and the encapsulated valuespec is being shown.
class OptionalEdit(Optional):
def __init__(self, valuespec, **kwargs):
Optional.__init__(self, valuespec, **kwargs)
self._label = ''
def render_input(self, varprefix, value):
self.classtype_info()
div_id = "option_" + varprefix
checked = html.get_checkbox(varprefix + "_use")
if checked == None:
if self._negate:
checked = True
else:
checked = False
html.open_span()
if self._label is not None:
label = self._label
elif self.title():
label = self.title()
elif self._negate:
label = _(" Ignore this option")
else:
label = _(" Activate this option")
html.checkbox("%s_use" % varprefix, checked, label=label,
onclick="valuespec_toggle_option(this, %r, %r);valuespec_toggle_option(this, %r, %r)" %
(div_id + '_on', 1 if self._negate else 0,
div_id + '_off', 0 if self._negate else 1))
html.nbsp()
html.close_span()
if value == None:
value = self._valuespec.default_value()
html.open_span(id_="%s_off" % div_id, style="display:none;" if checked != self._negate else None)
html.write(value)
html.close_span()
html.open_span(id_="%s_on" % div_id, style="display:none;" if checked == self._negate else None)
if self._valuespec.title():
html.write(self._valuespec.title() + " ")
self._valuespec.render_input(varprefix + "_value", value)
html.close_span()
def from_html_vars(self, varprefix):
return self._valuespec.from_html_vars(varprefix + "_value")
# Handle case when there are several possible allowed formats
# for the value (e.g. strings, 4-tuple or 6-tuple like in SNMP-Communities)
# The different alternatives must have different data types that can
# be distinguished with validate_datatype.
class Alternative(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._elements = kwargs["elements"]
self._match = kwargs.get("match") # custom match function, returns index in elements
self._style = kwargs.get("style", "radio") # alternative: "dropdown"
self._show_alternative_title = kwargs.get("show_alternative_title")
self._on_change = kwargs.get("on_change") # currently only working for style="dropdown"
self._orientation = kwargs.get("orientation", "vertical") # or horizontal: for style="dropdown"
# Return the alternative (i.e. valuespec)
# that matches the datatype of a given value. We assume
# that always one matches. No error handling here.
# This may also tranform the input value in case it gets
# "decorated" in the from_html_vars function
def matching_alternative(self, value):
if self._match:
return self._elements[self._match(value)], value
for vs in self._elements:
try:
vs.validate_datatype(value, "")
return vs, value
except:
pass
return None, value
def render_input(self, varprefix, value):
self.classtype_info()
if self._style == "radio":
self.render_input_radio(varprefix, value)
else:
self.render_input_dropdown(varprefix, value)
def render_input_dropdown(self, varprefix, value):
mvs, value = self.matching_alternative(value)
options = []
sel_option = html.var(varprefix + "_use")
for nr, vs in enumerate(self._elements):
if not sel_option and vs == mvs:
sel_option = str(nr)
options.append((str(nr), vs.title()))
onchange="valuespec_cascading_change(this, '%s', %d);" % (varprefix, len(options))
if self._on_change:
onchange += self._on_change
if self._orientation == "horizontal":
html.open_table()
html.open_tr()
html.open_td()
html.dropdown(varprefix + "_use", options, deflt=sel_option, onchange=onchange)
if self._orientation == "vertical":
html.br()
html.br()
for nr, vs in enumerate(self._elements):
if str(nr) == sel_option:
disp = ""
cur_val = value
else:
disp = "none"
cur_val = vs.default_value()
if self._orientation == "horizontal":
html.close_td()
html.open_td()
html.open_span(id_="%s_%s_sub" % (varprefix, nr), style="display:%s" % disp)
html.help(vs.help())
vs.render_input(varprefix + "_%d" % nr, cur_val)
html.close_span()
if self._orientation == "horizontal":
html.close_td()
html.close_tr()
html.close_table()
def render_input_radio(self, varprefix, value):
mvs, value = self.matching_alternative(value)
for nr, vs in enumerate(self._elements):
if html.has_var(varprefix + "_use"):
checked = html.var(varprefix + "_use") == str(nr)
else:
checked = vs == mvs
html.help(vs.help())
title = vs.title()
if not title and nr:
html.nbsp()
html.nbsp()
html.radiobutton(varprefix + "_use", str(nr), checked, title)
if title:
html.open_ul()
if vs == mvs:
val = value
else:
val = vs.default_value()
vs.render_input(varprefix + "_%d" % nr, val)
if title:
html.close_ul()
def set_focus(self, varprefix):
# TODO: Set focus to currently active option
pass
def canonical_value(self):
return self._elements[0].canonical_value()
def default_value(self):
try:
if type(self._default_value) == type(lambda:True):
return self._default_value()
else:
return self._default_value
except:
return self._elements[0].default_value()
def value_to_text(self, value):
vs, value = self.matching_alternative(value)
if vs:
output = ""
if self._show_alternative_title and vs.title():
output = "%s<br>" % vs.title()
return output + vs.value_to_text(value)
else:
return _("invalid:") + " " + html.attrencode(str(value))
def from_html_vars(self, varprefix):
nr = int(html.var(varprefix + "_use"))
vs = self._elements[nr]
return vs.from_html_vars(varprefix + "_%d" % nr)
def validate_datatype(self, value, varprefix):
for vs in self._elements:
try:
vs.validate_datatype(value, "")
return
except:
pass
raise MKUserError(varprefix,
_("The data type of the value does not match any of the "
"allowed alternatives."))
def validate_value(self, value, varprefix):
vs, value = self.matching_alternative(value)
for nr, v in enumerate(self._elements):
if vs == v:
vs.validate_value(value, varprefix + "_%d" % nr)
ValueSpec.custom_validate(self, value, varprefix)
# Edit a n-tuple (with fixed size) of values
class Tuple(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._elements = kwargs["elements"]
self._show_titles = kwargs.get("show_titles", True)
self._orientation = kwargs.get("orientation", "vertical") # also: horizontal, float
self._separator = kwargs.get("separator", " ") # in case of float
self._title_br = kwargs.get("title_br", True)
def canonical_value(self):
return tuple([x.canonical_value() for x in self._elements])
def default_value(self):
return tuple([x.default_value() for x in self._elements])
def render_input(self, varprefix, value):
self.classtype_info()
if self._orientation != "float":
html.open_table(class_=["valuespec_tuple"])
if self._orientation == "horizontal":
html.open_tr()
for no, element in enumerate(self._elements):
try:
val = value[no]
except:
val = element.default_value()
vp = varprefix + "_" + str(no)
if self._orientation == "vertical":
html.open_tr()
elif self._orientation == "float":
html.write(self._separator)
if self._show_titles:
elem_title = element.title()
if elem_title:
title = element.title()[0].upper() + element.title()[1:]
else:
title = ""
if self._orientation == "vertical":
html.open_td(class_="tuple_left")
html.write(title)
html.help(element.help())
html.close_td()
elif self._orientation == "horizontal":
html.open_td(class_="tuple_td")
html.open_span(class_=["title"])
html.write(title)
html.help(element.help())
html.close_span()
if self._title_br:
html.br()
else:
html.write_text(" ")
else:
html.write_text(" ")
html.help(element.help())
if self._orientation == "vertical":
html.open_td(class_="tuple_right")
element.render_input(vp, val)
if self._orientation != "float":
html.close_td()
if self._orientation == "vertical":
html.close_tr()
if self._orientation == "horizontal":
html.close_tr()
if self._orientation != "float":
html.close_table()
def set_focus(self, varprefix):
self._elements[0].set_focus(varprefix + "_0")
def value_to_text(self, value):
return "" + ", ".join([ element.value_to_text(val)
for (element, val)
in zip(self._elements, value)]) + ""
def from_html_vars(self, varprefix):
value = []
for no, element in enumerate(self._elements):
vp = varprefix + "_" + str(no)
value.append(element.from_html_vars(vp))
return tuple(value)
def validate_value(self, value, varprefix):
for no, (element, val) in enumerate(zip(self._elements, value)):
vp = varprefix + "_" + str(no)
element.validate_value(val, vp)
ValueSpec.custom_validate(self, value, varprefix)
def validate_datatype(self, value, varprefix):
if type(value) != tuple:
raise MKUserError(varprefix,
_("The datatype must be a tuple, but is %s") % type_name(value))
if len(value) != len(self._elements):
raise MKUserError(varprefix,
_("The number of elements in the tuple must be exactly %d.") % len(self._elements))
for no, (element, val) in enumerate(zip(self._elements, value)):
vp = varprefix + "_" + str(no)
element.validate_datatype(val, vp)
class Dictionary(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._elements = kwargs["elements"]
self._empty_text = kwargs.get("empty_text", _("(no parameters)"))
# Optionally a text can be specified to be shown by value_to_text()
# when the value equal the default value of the value spec. Normally
# the default values are shown.
self._default_text = kwargs.get("default_text", None)
self._required_keys = kwargs.get("required_keys", [])
self._ignored_keys = kwargs.get("ignored_keys", [])
self._default_keys = kwargs.get("default_keys", []) # keys present in default value
if "optional_keys" in kwargs:
ok = kwargs["optional_keys"]
if type(ok) == list:
self._required_keys = \
[ e[0] for e in self._get_elements() if e[0] not in ok ]
self._optional_keys = True
elif ok:
self._optional_keys = True
else:
self._optional_keys = False
else:
self._optional_keys = True
if "hidden_keys" in kwargs:
self._hidden_keys = kwargs["hidden_keys"]
else:
self._hidden_keys = []
self._columns = kwargs.get("columns", 1) # possible: 1 or 2
self._render = kwargs.get("render", "normal") # also: "form" -> use forms.section()
self._form_narrow = kwargs.get("form_narrow", False) # used if render == "form"
self._form_isopen = kwargs.get("form_isopen", True) # used if render == "form"
self._headers = kwargs.get("headers") # "sup" -> small headers in oneline mode
self._migrate = kwargs.get("migrate") # value migration from old tuple version
self._indent = kwargs.get("indent", True)
def migrate(self, value):
if self._migrate:
return self._migrate(value)
else:
return value
def _get_elements(self):
if type(self._elements) == type(lambda: None) or isinstance(self._elements, types.MethodType):
return self._elements()
elif type(self._elements) == list:
return self._elements
else:
return []
# Additional variale form allows to specify the rendering
# style right now
def render_input(self, varprefix, value, form=None):
self.classtype_info()
value = self.migrate(value)
if not isinstance(value, (dict, DictMixin)):
value = {} # makes code simpler in complain phase
if form is True:
self.render_input_form(varprefix, value)
elif self._render == "form" and form is None:
self.render_input_form(varprefix, value)
elif self._render == "form_part" and form is None:
self.render_input_form(varprefix, value, as_part=True)
else:
self.render_input_normal(varprefix, value, self._render == "oneline")
def render_input_normal(self, varprefix, value, oneline = False):
headers_sup = oneline and self._headers == "sup"
if headers_sup or not oneline:
html.open_table(class_=["dictionary"])
if headers_sup:
html.open_tr()
for param, vs in self._get_elements():
if param in self._hidden_keys:
continue
if not oneline:
html.open_tr()
html.open_td(class_="dictleft")
div_id = varprefix + "_d_" + param
vp = varprefix + "_p_" + param
colon_printed = False
if self._optional_keys and param not in self._required_keys:
visible = html.get_checkbox(vp + "_USE")
if visible == None:
visible = param in value
label = vs.title()
if self._columns == 2:
label += ":"
colon_printed = True
html.checkbox("%s_USE" % vp, visible, label=label,
onclick="valuespec_toggle_option(this, %r)" % div_id)
else:
visible = True
if vs.title():
if headers_sup:
html.open_td()
html.open_b(class_=["header"])
html.write(" %s" % vs.title())
if oneline:
if self._headers == "sup":
html.close_b()
html.br()
else:
html.write_text(": ")
if self._columns == 2:
if vs.title() and not colon_printed:
html.write_text(':')
html.help(vs.help())
if not oneline:
html.close_td()
html.open_td(class_="dictright")
else:
if not oneline:
html.br()
html.open_div(id_= div_id,
class_=["dictelement", "indent" if (self._indent and self._columns == 1) else None],
style= "display:none;" if not visible else
("display:inline-block;" if oneline else None))
if self._columns == 1:
html.help(vs.help())
# Remember: in complain mode we do not render 'value' (the default value),
# but re-display the values from the HTML variables. We must not use 'value'
# in that case.
if type(value) == dict:
vs.render_input(vp, value.get(param, vs.default_value()))
else:
vs.render_input(vp, None)
html.close_div()
if not oneline:
html.close_td()
html.close_tr()
elif headers_sup:
html.close_td()
if not oneline:
html.close_table()
elif oneline and self._headers == "sup":
html.close_tr()
html.close_table()
def render_input_form(self, varprefix, value, as_part=False):
if self._headers:
for header, sections in self._headers:
self.render_input_form_header(varprefix, value, header, sections, as_part)
else:
self.render_input_form_header(varprefix, value, self.title() or _("Properties"), None, as_part)
if not as_part:
forms.end()
def render_input_form_header(self, varprefix, value, title, sections, as_part):
if not as_part:
forms.header(title, isopen=self._form_isopen, narrow=self._form_narrow)
for param, vs in self._get_elements():
if param in self._hidden_keys:
continue
if sections and param not in sections:
continue
div_id = varprefix + "_d_" + param
vp = varprefix + "_p_" + param
if self._optional_keys and param not in self._required_keys:
visible = html.get_checkbox(vp + "_USE")
if visible == None:
visible = param in value
checkbox_code = html.render_checkbox(vp + "_USE", deflt=visible,
onclick="valuespec_toggle_option(this, %r)" % div_id)
forms.section(vs.title(), checkbox=checkbox_code)
else:
visible = True
forms.section(vs.title())
html.open_div(id_=div_id, style="display:none;" if not visible else None)
html.help(vs.help())
vs.render_input(vp, value.get(param, vs.default_value()))
html.close_div()
def set_focus(self, varprefix, key=None):
elements = self._get_elements()
if elements:
if key == None:
elements[0][1].set_focus(varprefix + "_p_" + elements[0][0])
else:
for element_key, element_vs in elements:
if element_key == key:
element_vs.set_focus(varprefix + "_p_" + key)
def canonical_value(self):
return dict([
(name, vs.canonical_value())
for (name, vs)
in self._get_elements()
if name in self._required_keys or not self._optional_keys])
def default_value(self):
def_val = {}
for name, vs in self._get_elements():
if name in self._required_keys or not self._optional_keys or name in self._default_keys:
def_val[name] = vs.default_value()
return def_val
def value_to_text(self, value):
value = self.migrate(value)
oneline = self._render == "oneline"
if not value:
return self._empty_text
if self._default_text and value == self.default_value():
return self._default_text
elem = self._get_elements()
s = '' if oneline else HTML()
for param, vs in elem:
if param in value:
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
text = HTML(vs.value_to_text(value[param]))
if oneline:
if param != elem[0][0]:
s += ", "
s += "%s: %s" % (vs.title(), text)
else:
s += html.render_tr(html.render_td("%s: " % vs.title(), class_="title") + html.render_td(text))
if not oneline:
s = html.render_table(s)
return "%s" % s
def from_html_vars(self, varprefix):
value = {}
for param, vs in self._get_elements():
vp = varprefix + "_p_" + param
if not self._optional_keys \
or param in self._required_keys \
or html.get_checkbox(vp + "_USE"):
value[param] = vs.from_html_vars(vp)
return value
def validate_datatype(self, value, varprefix):
value = self.migrate(value)
if type(value) != dict:
raise MKUserError(varprefix, _("The type must be a dictionary, but it is a %s") % type_name(value))
for param, vs in self._get_elements():
if param in value:
vp = varprefix + "_p_" + param
try:
vs.validate_datatype(value[param], vp)
except MKUserError, e:
raise MKUserError(e.varname, _("%s: %s") % (vs.title(), e))
elif not self._optional_keys or param in self._required_keys:
raise MKUserError(varprefix, _("The entry %s is missing") % vs.title())
# Check for exceeding keys
allowed_keys = [ p for (p,v) in self._get_elements() ]
if self._ignored_keys:
allowed_keys += self._ignored_keys
for param in value.keys():
if param not in allowed_keys:
raise MKUserError(varprefix, _("Undefined key '%s' in the dictionary. Allowed are %s.") %
(param, ", ".join(allowed_keys)))
def validate_value(self, value, varprefix):
value = self.migrate(value)
for param, vs in self._get_elements():
if param in value:
vp = varprefix + "_p_" + param
vs.validate_value(value[param], vp)
elif not self._optional_keys or param in self._required_keys:
raise MKUserError(varprefix, _("The entry %s is missing") % vs.title())
ValueSpec.custom_validate(self, value, varprefix)
# Base class for selection of a Nagios element out
# of a given list that must be loaded from a file.
# Example: GroupSelection. Child class must define
# a function get_elements() that returns a dictionary
# from element keys to element titles.
class ElementSelection(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._loaded_at = None
self._label = kwargs.get("label")
self._empty_text = kwargs.get("empty_text", _("There are no elements defined for this selection yet."))
def load_elements(self):
if self._loaded_at != id(html):
self._elements = self.get_elements()
self._loaded_at = id(html) # unique for each query!
def get_elements(self):
raise NotImplementedError()
def canonical_value(self):
self.load_elements()
if len(self._elements) > 0:
return self._elements.keys()[0]
def render_input(self, varprefix, value):
self.classtype_info()
self.load_elements()
if len(self._elements) == 0:
html.write(self._empty_text)
else:
if self._label:
html.write("%s" % self._label)
html.nbsp()
html.dropdown(varprefix, self._elements.items(), deflt=value, sorted=True)
def value_to_text(self, value):
self.load_elements()
return html.attrencode(self._elements.get(value, value))
def from_html_vars(self, varprefix):
return html.var(varprefix)
def validate_value(self, value, varprefix):
self.load_elements()
if len(self._elements) == 0:
raise MKUserError(varprefix, _("You cannot save this rule.") + ' ' + self._empty_text)
if value not in self._elements:
raise MKUserError(varprefix, _("%s is not an existing element in this selection.") % (value,))
ValueSpec.custom_validate(self, value, varprefix)
def validate_datatype(self, value, varprefix):
self.load_elements()
# When no elements exists the default value is None and e.g. in wato.mode_edit_rule()
# handed over to validate_datatype() before rendering the input form. Disable the
# validation in this case to prevent validation errors. A helpful message is shown
# during render_input()
if len(self._elements) == 0 and value == None:
return
if type(value) != str:
raise MKUserError(varprefix, _("The datatype must be str (string), but is %s") % type_name(value))
class AutoTimestamp(FixedValue):
def __init__(self, **kwargs):
FixedValue.__init__(self, **kwargs)
def canonical_value(self):
return time.time()
def from_html_vars(self, varprefix):
return time.time()
def value_to_text(self, value):
return time.strftime("%F %T", time.localtime(value))
def validate_datatype(self, value, varprefix):
if type(value) not in [ int, float ]:
return MKUserError(varprefix, _("Invalid datatype of timestamp: must be int or float."))
# Fully transparant VS encapsulating a vs in a foldable
# container.
class Foldable(ValueSpec):
def __init__(self, valuespec, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._valuespec = valuespec
self._open = kwargs.get("open", False)
self._title_function = kwargs.get("title_function", None)
def render_input(self, varprefix, value):
self.classtype_info()
try:
title_value = value
if html.form_submitted():
try:
title_value = self._valuespec.from_html_vars(varprefix)
except:
pass
title = self._title_function(title_value)
except:
title = self._valuespec.title()
if not title:
title = _("(no title)")
html.begin_foldable_container("valuespec_foldable", varprefix, self._open,
title, False)
html.help(self._valuespec.help())
self._valuespec.render_input(varprefix, value)
html.end_foldable_container()
def set_focus(self, varprefix):
self._valuespec.set_focus(varprefix)
def canonical_value(self):
return self._valuespec.canonical_value()
def default_value(self):
return self._valuespec.default_value()
def value_to_text(self, value):
return self._valuespec.value_to_text(value)
def from_html_vars(self, varprefix):
return self._valuespec.from_html_vars(varprefix)
def validate_datatype(self, value, varprefix):
self._valuespec.validate_datatype(value, varprefix)
def validate_value(self, value, varprefix):
self._valuespec.validate_value(value, varprefix)
ValueSpec.custom_validate(self, value, varprefix)
# Transforms the value from one representation to
# another while being completely transparent to the user.
# forth: function that converts a value into the representation
# needed by the encapsulated vs
# back: function that converts a value created by the encapsulated
# vs back to the outer representation
class Transform(ValueSpec):
def __init__(self, valuespec, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._valuespec = valuespec
self._back = kwargs.get("back")
self._forth = kwargs.get("forth")
def forth(self, value):
if self._forth:
return self._forth(value)
else:
return value
def back(self, value):
if self._back:
return self._back(value)
else:
return value
def title(self):
if self._title:
return self._title
else:
return self._valuespec.title()
def help(self):
if self._help:
return self._help
else:
return self._valuespec.help()
def render_input(self, varprefix, value, **kwargs):
self.classtype_info()
self._valuespec.render_input(varprefix, self.forth(value), **kwargs)
def set_focus(self, *args):
self._valuespec.set_focus(*args)
def canonical_value(self):
return self.back(self._valuespec.canonical_value())
def default_value(self):
return self.back(self._valuespec.default_value())
def value_to_text(self, value):
return self._valuespec.value_to_text(self.forth(value))
def from_html_vars(self, varprefix):
return self.back(self._valuespec.from_html_vars(varprefix))
def validate_datatype(self, value, varprefix):
self._valuespec.validate_datatype(self.forth(value), varprefix)
def validate_value(self, value, varprefix):
self._valuespec.validate_value(self.forth(value), varprefix)
ValueSpec.custom_validate(self, value, varprefix)
class LDAPDistinguishedName(TextUnicode):
def __init__(self, **kwargs):
TextUnicode.__init__(self, **kwargs)
self.enforce_suffix = kwargs.get('enforce_suffix')
def validate_value(self, value, varprefix):
TextAscii.validate_value(self, value, varprefix)
# Check whether or not the given DN is below a base DN
if self.enforce_suffix and value and not value.lower().endswith(self.enforce_suffix.lower()):
raise MKUserError(varprefix, _('Does not ends with "%s".') % self.enforce_suffix)
ValueSpec.custom_validate(self, value, varprefix)
class Password(TextAscii):
def __init__(self, **kwargs):
self._is_stored_plain = kwargs.get("is_stored_plain", True)
kwargs.setdefault("autocomplete", False)
if self._is_stored_plain:
plain_help = _("The password entered here is stored in plain text within the "
"monitoring site. This usually needed because the monitoring "
"process needs to have access to the unencrypted password "
"because it needs to submit it to authenticate with remote systems. ")
if "help" in kwargs:
kwargs["help"] += "<br><br>" + plain_help
else:
kwargs["help"] = plain_help
TextAscii.__init__(self, attrencode = True, **kwargs)
def render_input(self, varprefix, value):
self.classtype_info()
if value == None:
value = ""
if self._label:
html.write(self._label)
html.nbsp()
kwargs = {
"size": self._size,
}
if self._autocomplete == False:
kwargs["autocomplete"] = "new-password"
html.password_input(varprefix, str(value), **kwargs)
def password_plaintext_warning(self):
if self._is_stored_plain:
html.span(_("<br>Please note that Check_MK needs this password in clear"
"<br>text during normal operation and thus stores it unencrypted"
"<br>on the Check_MK server."))
def value_to_text(self, value):
if value == None:
return _("none")
else:
return '******'
class PasswordSpec(Password):
def __init__(self, **kwargs):
self._hidden = kwargs.get('hidden', False)
if self._hidden:
kwargs["type"] = "password"
Password.__init__(self, **kwargs)
def render_input(self, varprefix, value):
self.classtype_info()
TextAscii.render_input(self, varprefix, value, hidden=self._hidden)
if not value:
html.icon_button("#", _(u"Randomize password"), "random",
onclick="vs_passwordspec_randomize(this);")
if self._hidden:
html.icon_button("#", _(u"Show/Hide password"), "showhide",
onclick="vs_toggle_hidden(this);")
self.password_plaintext_warning()
class PasswordFromStore(CascadingDropdown):
def __init__(self, *args, **kwargs):
kwargs["choices"] = [
("password", _("Password"), Password(
allow_empty = kwargs.get("allow_empty", True),
)),
("store", _("Stored password"), DropdownChoice(
choices = self._password_choices,
sorted = True,
invalid_choice = "complain",
invalid_choice_title = _("Password does not exist or using not permitted"),
invalid_choice_error = _("The configured password has either be removed or you "
"are not permitted to use this password. Please choose "
"another one."),
)),
]
kwargs["orientation"] = "horizontal"
CascadingDropdown.__init__(self, *args, **kwargs)
def _password_choices(self):
import wato
return [ (ident, pw["title"]) for ident, pw
in wato.PasswordStore().usable_passwords().items() ]
def IndividualOrStoredPassword(*args, **kwargs):
return Transform(
PasswordFromStore(*args, **kwargs),
forth = lambda v: ("password", v) if type(v) != tuple else v,
)
class FileUpload(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._allow_empty = kwargs.get('allow_empty', True)
self._allowed_extensions = kwargs.get('allowed_extensions')
self._allow_empty_content = kwargs.get('allow_empty_content', True)
def canonical_value(self):
if self._allow_empty:
return None
else:
return ''
def validate_value(self, value, varprefix):
file_name, mime_type, content = value
if not self._allow_empty and (content == '' or file_name == ''):
raise MKUserError(varprefix, _('Please select a file.'))
if not self._allow_empty_content and len(content) == 0:
raise MKUserError(varprefix, _('The selected file is empty. Please select a non-empty file.')
)
if self._allowed_extensions != None:
matched = False
for extension in self._allowed_extensions:
if file_name.endswith(extension):
matched = True
break
if not matched:
raise MKUserError(varprefix, _("Invalid file name extension. Allowed are: %s")
% ", ".join(self._allowed_extensions))
self.custom_validate(value, varprefix)
def render_input(self, varprefix, value):
self.classtype_info()
html.upload_file(varprefix)
def from_html_vars(self, varprefix):
# returns a triple of (filename, mime-type, content)
return html.uploaded_file(varprefix)
class ImageUpload(FileUpload):
def __init__(self, max_size=None, show_current_image=False, *args, **kwargs):
self._max_size = max_size
self._show_current_image = show_current_image
FileUpload.__init__(self, *args, **kwargs)
def render_input(self, varprefix, value):
self.classtype_info()
if self._show_current_image and value:
html.open_table()
html.open_tr()
html.td(_("Current image:"))
html.td(html.render_img("data:image/png;base64,%s" % base64.b64encode(value)))
html.close_tr()
html.open_tr()
html.td(_("Upload new:"))
html.open_td()
super(ImageUpload, self).render_input(varprefix, value)
html.close_td()
html.close_tr()
html.close_table()
else:
super(ImageUpload, self).render_input(varprefix, value)
def validate_value(self, value, varprefix):
from PIL import Image
from StringIO import StringIO
file_name, mime_type, content = value
if file_name[-4:] != '.png' \
or mime_type != 'image/png' \
or not content.startswith('\x89PNG'):
raise MKUserError(varprefix, _('Please choose a PNG image.'))
try:
im = Image.open(StringIO(content))
except IOError:
raise MKUserError(varprefix, _('Please choose a valid PNG image.'))
if self._max_size:
w, h = im.size
max_w, max_h = self._max_size
if w > max_w or h > max_h:
raise MKUserError(varprefix, _('Maximum image size: %dx%dpx') % (max_w, max_h))
ValueSpec.custom_validate(self, value, varprefix)
class UploadOrPasteTextFile(Alternative):
def __init__(self, **kwargs):
file_title = kwargs.get("file_title", _("File"))
allow_empty = kwargs.get("allow_empty", True)
kwargs["elements"] = [
FileUpload(
title = _("Upload %s") % file_title,
allow_empty = allow_empty),
TextAreaUnicode(
title = _("Content of %s") % file_title,
allow_empty = allow_empty,
cols = 80,
rows = "auto"),
]
if kwargs.get("default_mode", "text") == "upload":
kwargs["match"] = lambda *args: 0
else:
kwargs["match"] = lambda *args: 1
kwargs.setdefault("style", "dropdown")
Alternative.__init__(self, **kwargs)
def from_html_vars(self, varprefix):
value = Alternative.from_html_vars(self, varprefix)
# Convert textarea value to format of upload field
if type(value) != tuple:
value = (None, None, value)
return value
class TextOrRegExp(Alternative):
_text_valuespec_class = TextAscii
_regex_valuespec_class = RegExp
def __init__(self, **kwargs):
allow_empty = kwargs.pop("allow_empty", True)
if "text_valuespec" in kwargs:
vs_text = kwargs.pop("text_valuespec")
else:
vs_text = self._text_valuespec_class(
title = _("Explicit match"),
allow_empty = allow_empty,
)
vs_regex = self._regex_valuespec_class(
mode = RegExp.prefix,
title = _("Regular expression match"),
allow_empty = allow_empty,
)
kwargs.update({
"elements": [
vs_text,
Transform(
vs_regex,
forth = lambda v: v[1:], # strip of "~"
back = lambda v: "~" + v, # add "~"
),
],
# Use RegExp field when value is prefixed with "~"
"match" : lambda v: 1 if v and v[0] == "~" else 0,
"style" : "dropdown",
"orientation" : "horizontal",
})
super(TextOrRegExp, self).__init__(**kwargs)
class TextOrRegExpUnicode(TextOrRegExp):
_default_valuespec_class = TextUnicode
_regex_valuespec_class = RegExpUnicode
class IconSelector(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
self._allow_empty = kwargs.get('allow_empty', True)
self._empty_img = kwargs.get('emtpy_img', 'empty')
self._exclude = [
'trans',
'empty',
]
@classmethod
def categories(cls):
import config# FIXME: Clean this up. But how?
return config.wato_icon_categories
@classmethod
def category_alias(cls, category_name):
return dict(cls.categories()).get(category_name, category_name)
# All icons within the images/icons directory have the ident of a category
# witten in the PNG meta data. For the default images we have done this scripted.
# During upload of user specific icons, the meta data is added to the images.
def available_icons(self, only_local=False):
dirs = [
os.path.join(cmk.paths.omd_root, "local/share/check_mk/web/htdocs/images/icons"),
]
if not only_local:
dirs.append(os.path.join(cmk.paths.omd_root, "share/check_mk/web/htdocs/images/icons"))
valid_categories = dict(self.categories()).keys()
from PIL import Image
#
# Read all icons from the icon directories
#
icons = {}
for dir in dirs:
try:
files = os.listdir(dir)
except OSError:
continue
for file_name in files:
file_path = dir + "/" + file_name
if file_name[-4:] == '.png' and os.path.isfile(file_path):
# extract the category from the meta data
try:
im = Image.open(file_path)
except IOError, e:
if "%s" % e == "cannot identify image file":
continue # Silently skip invalid files
else:
raise
category = im.info.get('Comment')
if category not in valid_categories:
category = 'misc'
icon_name = file_name[:-4]
icons[icon_name] = category
for exclude in self._exclude:
try:
del icons[exclude]
except KeyError:
pass
return icons
def available_icons_by_category(self, icons):
by_cat = {}
for icon_name, category_name in icons.items():
by_cat.setdefault(category_name, [])
by_cat[category_name].append(icon_name)
icon_categories = []
for category_name, category_alias in self.categories():
if category_name in by_cat:
icon_categories.append((category_name, category_alias, by_cat[category_name]))
return icon_categories
def render_icon(self, icon_name, onclick = '', title = '', id = ''):
if not icon_name:
icon_name = self._empty_img
icon = html.render_icon(icon_name, help=title, middle=True, id=id)
if onclick:
icon = html.render_a(icon, href="javascript:void(0)", onclick=onclick)
return icon
def render_input(self, varprefix, value):
# Handle complain phase with validation errors correctly and get the value
# from the HTML vars
if value is None:
value = html.var(varprefix + "_value")
self.classtype_info()
if not value:
value = self._empty_img
html.hidden_field(varprefix + "_value", value or '', varprefix + "_value", add_var = True)
if value:
content = self.render_icon(value, '', _('Choose another Icon'), id = varprefix + '_img')
else:
content = _('Select an Icon')
html.popup_trigger(
content, varprefix + '_icon_selector', 'icon_selector',
url_vars=[
('value', value),
('varprefix', varprefix),
('allow_empty', '1' if self._allow_empty else '0'),
('back', html.makeuri([])),
],
resizable=True,
)
def render_popup_input(self, varprefix, value):
html.open_div(class_="icons", id_="%s_icons" % varprefix)
icons = self.available_icons()
available_icons = self.available_icons_by_category(icons)
active_category = icons.get(value, available_icons[0][0])
# Render tab navigation
html.open_ul()
for category_name, category_alias, icons in available_icons:
html.open_li(class_="active" if active_category == category_name else None)
# TODO: TEST
html.a(category_alias, href="javascript:vs_iconselector_toggle(\'%s\', \'%s\')" % (varprefix, category_name),
id_="%s_%s_nav" % (varprefix, category_name), class_="%s_nav" % varprefix)
html.close_li()
html.close_ul()
# Now render the icons grouped by category
empty = ['empty'] if self._allow_empty else []
for category_name, category_alias, icons in available_icons:
html.open_div(id_="%s_%s_container" % (varprefix, category_name),
class_=["icon_container", "%s_container" % varprefix],
style="display:none;" if active_category != category_name else None)
for icon in empty + sorted(icons):
html.open_a(
href=None,
class_="icon",
onclick='vs_iconselector_select(event, \'%s\', \'%s\')' % (varprefix, icon),
title=icon,
)
html.write_html(self.render_icon(icon, id=varprefix + '_i_' + icon, title=icon))
html.span(icon)
html.close_a()
html.close_div()
html.open_div(class_="buttons")
html.jsbutton("_toggle_names", _("Toggle names"),
onclick="vs_iconselector_toggle_names(event, %s)" % json.dumps(varprefix))
import config# FIXME: Clean this up. But how?
if config.user.may('wato.icons'):
back_param = '&back='+html.urlencode(html.var('back')) if html.has_var('back') else ''
html.buttonlink('wato.py?mode=icons' + back_param, _('Manage'))
html.close_div()
html.close_div()
def from_html_vars(self, varprefix):
icon = html.var(varprefix + '_value')
if icon == 'empty':
return None
else:
return icon
def value_to_text(self, value):
# TODO: This is a workaround for a bug. This function needs to return str objects right now.
return "%s" % self.render_icon(value)
def validate_datatype(self, value, varprefix):
if value is not None and type(value) != str:
raise MKUserError(varprefix, _("The type is %s, but should be str") % type(value))
def validate_value(self, value, varprefix):
if not self._allow_empty and not value:
raise MKUserError(varprefix, _("You need to select an icon."))
if value and value not in self.available_icons():
raise MKUserError(varprefix, _("The selected icon image does not exist."))
class TimeofdayRanges(Transform):
def __init__(self, **args):
self._count = args.get("count", 3)
Transform.__init__(
self,
Tuple(
elements = [ TimeofdayRange(allow_empty=True, allow_24_00=True) for x in range(self._count) ],
orientation = "float",
separator = " ",
),
forth = lambda outter: tuple((outter + [None]*self._count)[0:self._count]),
back = lambda inner: [ x for x in inner if x != None ],
**args
)
class Fontsize(Float):
def __init__(self, **kwargs):
kwargs.setdefault("title", _("Font size"))
kwargs.setdefault("default_value", 10)
kwargs["size"] = 5
kwargs["unit"] = _("pt")
super(Fontsize, self).__init__(**kwargs)
class Color(ValueSpec):
def __init__(self, **kwargs):
kwargs["regex"] = "#[0-9]{3,6}"
kwargs["regex_error"] = _("The color needs to be given in hex format.")
ValueSpec.__init__(self, **kwargs)
self._on_change = kwargs.get("on_change")
self._allow_empty = kwargs.get("allow_empty", True)
def render_input(self, varprefix, value):
self.classtype_info()
if not value:
value = "#FFFFFF"
html.javascript_file("js/colorpicker.js")
# Holds the actual value for form submission
html.hidden_field(varprefix + "_value", value or '', varprefix + "_value", add_var = True)
indicator = html.render_div('', id_="%s_preview" % varprefix,
class_="cp-preview",
style="background-color:%s" % value)
# TODO(rh): Please take a look at this hard coded HTML
# FIXME: Rendering with HTML class causes bug in html popup_trigger function.
# Reason is HTML class and the escaping.
menu_content = "<div id=\"%s_picker\" class=\"cp-small\"></div>" % varprefix
menu_content += "<div class=\"cp-input\">" \
"%s" \
"<input id=\"%s_input\" type=\"text\"></input></div>" % \
(_("Hex color:"), varprefix)
menu_content += "<script language=\"javascript\">" \
"vs_color_pickers[\"%s\"] = ColorPicker(document.getElementById(\"%s_picker\")," \
" function(hex, hsv, rgb) { vs_update_color_picker(\"%s\", hex, false); }" \
");" \
"document.getElementById(\"%s_input\").oninput = function() { " \
" vs_update_color_picker(\"%s\", this.value, true); " \
"};" \
"vs_update_color_picker(\"%s\", \"%s\", true);" \
"</script>" % \
(varprefix, varprefix, varprefix, varprefix, varprefix, varprefix, value)
html.popup_trigger(indicator, varprefix + '_popup',
menu_content=menu_content,
cssclass="colorpicker",
onclose=self._on_change)
def from_html_vars(self, varprefix):
color = html.var(varprefix + '_value')
if color == '':
return None
else:
return color
def value_to_text(self, value):
return value
def validate_datatype(self, value, varprefix):
if value is not None and type(value) != str:
raise MKUserError(varprefix, _("The type is %s, but should be str") % type(value))
def validate_value(self, value, varprefix):
if not self._allow_empty and not value:
raise MKUserError(varprefix, _("You need to select a color."))
class SSHKeyPair(ValueSpec):
def __init__(self, **kwargs):
ValueSpec.__init__(self, **kwargs)
def render_input(self, varprefix, value):
self.classtype_info()
if value:
html.write(_("Fingerprint: %s") % self.value_to_text(value))
html.hidden_field(varprefix, self._encode_key_for_url(value), add_var=True)
else:
html.write(_("Key pair will be generated when you save."))
def value_to_text(self, value):
return self._get_key_fingerprint(value)
def from_html_vars(self, varprefix):
if html.has_var(varprefix):
return self._decode_key_from_url(html.var(varprefix))
else:
return self._generate_ssh_key(varprefix)
@staticmethod
def _encode_key_for_url(value):
return "|".join(value)
@staticmethod
def _decode_key_from_url(text):
return text.split("|")
@classmethod
def _generate_ssh_key(cls, varprefix):
key = RSA.generate(4096)
private_key = key.exportKey('PEM')
pubkey = key.publickey()
public_key = pubkey.exportKey('OpenSSH')
return (private_key, public_key)
@classmethod
def _get_key_fingerprint(cls, value):
private_key, public_key = value
key = base64.b64decode(public_key.strip().split()[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))
class SchedulePeriod(CascadingDropdown):
def __init__(self, from_end=True, **kwargs):
if from_end:
from_end_choice = [
("month_end", _("At the end of every month at day"),
Integer(minvalue=1, maxvalue=28, unit=_("from the end"))),
]
else:
from_end_choice = []
CascadingDropdown.__init__(self,
title = _("Period"),
orientation = "horizontal",
choices = [
( "day", _("Every day"), ),
( "week", _("Every week on..."),
Weekday(title = _("Day of the week"))),
( "month_begin", _("At the beginning of every month at day"),
Integer(minvalue=1, maxvalue=28)),
] + from_end_choice
)
class CAorCAChain(UploadOrPasteTextFile):
def __init__(self, **args):
args.setdefault("title", _("Certificate Chain (Root / Intermediate Certificate)"))
args.setdefault("file_title", _("CRT/PEM File"))
UploadOrPasteTextFile.__init__(self, **args)
def from_html_vars(self, varprefix):
value = Alternative.from_html_vars(self, varprefix)
if type(value) == tuple:
value = value[2] # FileUpload sends (filename, mime-type, content)
return value
def validate_value(self, value, varprefix):
try:
self.analyse_cert(value)
except Exception, e:
# FIXME TODO: Cleanup this general exception catcher
raise MKUserError(varprefix, _("Invalid certificate file"))
def analyse_cert(self, value):
from OpenSSL import crypto
cert = crypto.load_certificate(crypto.FILETYPE_PEM, value)
titles = {
"C" : _("Country"),
"ST" : _("State or Province Name"),
"L" : _("Locality Name"),
"O" : _("Organization Name"),
"CN" : _("Common Name"),
}
cert_info = {}
for what, x509, title in [
( "issuer", cert.get_issuer(), _("Issuer") ),
( "subject", cert.get_subject(), _("Subject") ),
]:
cert_info[what] = {}
for key, val in x509.get_components():
if key in titles:
cert_info[what][titles[key]] = val.decode("utf8")
return cert_info
def value_to_text(self, value):
cert_info = self.analyse_cert(value)
text = "<table>"
for what, title in [
( "issuer", _("Issuer") ),
( "subject", _("Subject") ),
]:
text += "<tr><td>%s:</td><td>" % title
for title, value in sorted(cert_info[what].items()):
text += "%s: %s<br>" % (title, value)
text += "</tr>"
text += "</table>"
return text
def ListOfCAs(**args):
args.setdefault("title", _("CAs to accept"))
args.setdefault("help", _("Only accepting HTTPS connections with a server which certificate "
"is signed with one of the CAs that are listed here. That way it is guaranteed "
"that it is communicating only with the authentic update server. "
"If you use self signed certificates for you server then enter that certificate "
"here."))
args.setdefault("add_label", _("Add new CA certificate or chain"))
args.setdefault("empty_text", _("You need to enter at least one CA. Otherwise no SSL connection can be made."))
args.setdefault("allow_empty", False)
return ListOf(
CAorCAChain(),
movable = False,
**args
)
class SiteChoice(DropdownChoice):
def __init__(self, **kwargs):
kwargs.setdefault("title", _("Site"))
kwargs.setdefault("help", _("Specify the site of your choice"))
kwargs.setdefault("default_value", self._site_default_value)
kwargs.setdefault("invalid_choice_error", _("The configured site is not known to this site."))
kwargs.update({
"choices": self._site_choices,
"invalid_choice": "complain",
"invalid_choice_title": _("Unknown site (%s)"),
})
super(SiteChoice, self).__init__(**kwargs)
def _site_default_value(self):
import watolib
if watolib.is_wato_slave_site():
return False
import config
default_value = config.site_attribute_default_value()
if default_value:
return default_value
else:
return self.canonical_value()
def _site_choices(self):
import config
return config.site_attribute_choices()
class TimeperiodSelection(DropdownChoice):
def __init__(self, **kwargs):
kwargs.setdefault("no_preselect", True)
kwargs.setdefault("no_preselect_title", _("Select a timeperiod"))
DropdownChoice.__init__(self, choices=self._get_choices, **kwargs)
def _get_choices(self):
import watolib
timeperiods = watolib.load_timeperiods()
elements = [
(name, "%s - %s" % (name, tp["alias"])) for (name, tp) in timeperiods.items()
]
always = ("24X7", _("Always"))
if always not in elements:
elements.insert(0, always)
return elements
class TimeperiodValuespec(ValueSpec):
tp_toggle_var = "tp_toggle" # Used by GUI switch
tp_current_mode = "tp_active" # The actual set mode
# "0" - no timespecific settings
# "1" - timespecific settings active
tp_default_value_key = "tp_default_value" # Used in valuespec
tp_values_key = "tp_values" # Used in valuespec
def __init__(self, valuespec):
super(TimeperiodValuespec, self).__init__(
title = valuespec.title(),
help = valuespec.help()
)
self._enclosed_valuespec = valuespec
def default_value(self):
# If nothing is configured, simply return the default value of the enclosed valuespec
return self._enclosed_valuespec.default_value()
def render_input(self, varprefix, value):
# The display mode differs when the valuespec is activated
vars_copy = html.vars.copy()
# The timeperiod mode can be set by either the GUI switch or by the value itself
# GUI switch overrules the information stored in the value
if html.has_var(self.tp_toggle_var):
is_active = self._is_switched_on()
else:
is_active = self._is_active(value)
# Set the actual used mode
html.hidden_field(self.tp_current_mode, "%d" % is_active)
mode = _("Disable") if is_active else _("Enable")
vars_copy[self.tp_toggle_var] = "%d" % (not is_active)
toggle_url = html.makeuri(vars_copy.items())
html.buttonlink(toggle_url, _("%s timespecific parameters") % mode, style=["position: absolute", "right: 18px;"])
if is_active:
value = self._get_timeperiod_value(value)
self._get_timeperiod_valuespec().render_input(varprefix, value)
else:
value = self._get_timeless_value(value)
return self._enclosed_valuespec.render_input(varprefix, value)
def value_to_text(self, value):
text = ""
if self._is_active(value):
# TODO/Phantasm: highlight currently active timewindow
text += self._get_timeperiod_valuespec().value_to_text(value)
else:
text += self._enclosed_valuespec.value_to_text(value)
return text
def from_html_vars(self, varprefix):
if html.var(self.tp_current_mode) == "1":
# Fetch the timespecific settings
parameters = self._get_timeperiod_valuespec().from_html_vars(varprefix)
if parameters[self.tp_values_key]:
return parameters
else:
# Fall back to enclosed valuespec data when no timeperiod is set
return parameters[self.tp_default_value_key]
else:
# Fetch the data from the enclosed valuespec
return self._enclosed_valuespec.from_html_vars(varprefix)
def canonical_value(self):
return self._enclosed_valuespec.canonical_value()
def validate_datatype(self, value, varprefix):
if self._is_active(value):
self._get_timeperiod_valuespec().validate_datatype(value, varprefix)
else:
self._enclosed_valuespec.validate_datatype(value, varprefix)
def validate_value(self, value, varprefix):
if self._is_active(value):
self._get_timeperiod_valuespec().validate_value(value, varprefix)
else:
self._enclosed_valuespec.validate_value(value, varprefix)
def _get_timeperiod_valuespec(self):
return Dictionary(
elements = [
(self.tp_values_key,
ListOf(
Tuple(
elements = [
TimeperiodSelection(
title = _("Match only during timeperiod"),
help = _("Match this rule only during times where the "
"selected timeperiod from the monitoring "
"system is active."),
),
self._enclosed_valuespec
]
),
title = _("Configured timeperiod parameters"),
)
),
(self.tp_default_value_key,
Transform(
self._enclosed_valuespec,
title = _("Default parameters when no timeperiod matches")
)
),
],
optional_keys = False,
)
# Checks whether the tp-mode is switched on through the gui
def _is_switched_on(self):
return html.var(self.tp_toggle_var) == "1"
# Checks whether the value itself already uses the tp-mode
def _is_active(self, value):
if isinstance(value, dict) and self.tp_default_value_key in value:
return True
else:
return False
# Returns simply the value or converts a plain value to a tp-value
def _get_timeperiod_value(self, value):
if isinstance(value, dict) and self.tp_default_value_key in value:
return value
else:
return {self.tp_values_key: [], self.tp_default_value_key: value}
# Returns simply the value or converts tp-value back to a plain value
def _get_timeless_value(self, value):
if isinstance(value, dict) and self.tp_default_value_key in value:
return value.get(self.tp_default_value_key)
else:
return value
| huiyiqun/check_mk | web/htdocs/valuespec.py | Python | gpl-2.0 | 177,705 |
#!/usr/bin/python3
import os
from gi.repository import Gtk, Gdk
import pageutils
class ModulePage(pageutils.BaseListView):
def __init__(self, parent):
store = Gtk.ListStore(str, str, str, str, str, str, str, bool, str)
pageutils.BaseListView.__init__(self, store)
self.parent = parent
self.selected_path = None
self.create_text_column(0, "Status")
self.create_text_column(1, "Type")
self.create_text_column(2, "Name")
self.create_text_column(3, "Description")
self.get_updates()
parent.lg_proxy.connect("ExtensionListUpdate", self.get_updates)
parent.lg_proxy.add_status_change_callback(self.on_status_change)
self.tree_view.set_tooltip_column(8)
self.popup = Gtk.Menu()
self.view_source = Gtk.MenuItem('View Source')
self.view_source.connect("activate", self.on_view_source)
self.popup.append(self.view_source)
reload_code = Gtk.MenuItem('Reload Code')
reload_code.connect("activate", self.on_reload_code)
self.popup.append(reload_code)
self.view_web_page = Gtk.MenuItem('View Web Page')
self.view_web_page.connect("activate", self.on_view_web_page)
self.popup.append(self.view_web_page)
self.popup.show_all()
self.tree_view.connect("button-press-event", self.on_button_press_event)
def on_view_source(self, menu_item):
tree_iter = self.store.get_iter(self.selected_path)
folder = self.store.get_value(tree_iter, 5)
os.system("xdg-open \"" + folder + "\" &")
def on_reload_code(self, menu_item):
tree_iter = self.store.get_iter(self.selected_path)
uuid = self.store.get_value(tree_iter, 4)
xlet_type = self.store.get_value(tree_iter, 1)
self.parent.lg_proxy.ReloadExtension(uuid, xlet_type.upper())
def on_view_web_page(self, menu_item):
tree_iter = self.store.get_iter(self.selected_path)
url = self.store.get_value(tree_iter, 6)
os.system("xdg-open \"" + url + "\" &")
def on_button_press_event(self, treeview, event):
x = int(event.x)
y = int(event.y)
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
self.selected_path = path
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
tree_iter = self.store.get_iter(self.selected_path)
if event.button == 3:
if pthinfo is not None:
uuid = self.store.get_value(tree_iter, 4)
url = self.store.get_value(tree_iter, 6)
self.view_web_page.set_sensitive(url != "")
self.view_source.set_label(uuid + " (View Source)")
self.popup.popup(None, None, None, None, event.button, event.time)
return True
elif event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS:
if pthinfo is not None:
error = self.store.get_value(tree_iter, 7)
if error:
self.parent.activate_page("log")
def on_status_change(self, online):
if online:
self.get_updates()
def get_updates(self):
success, data = self.parent.lg_proxy.GetExtensionList()
if success:
self.store.clear()
for item in data:
self.store.append([item["status"],
item["type"],
item["name"],
item["description"],
item["uuid"],
item["folder"],
item["url"],
item["error"] == "true",
item["error_message"]])
| Fantu/Cinnamon | files/usr/share/cinnamon/cinnamon-looking-glass/page_extensions.py | Python | gpl-2.0 | 3,877 |
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Library for quoting text, email style """
__revision__ = "$Id$"
import cgi
from invenio.htmlutils import HTMLWasher
from HTMLParser import HTMLParseError
def email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<div class="commentbox">', "</div>"),
linebreak_html='<br/>',
indent_block=True,
wash_p=True):
"""
Takes a typical mail quoted text, e.g.::
hello,
you told me:
>> Your mother was a hamster and your father smelt of elderberries
I must tell you that I'm not convinced. Then in this discussion:
>>>> Is there someone else up there we could talk to?
>> No. Now, go away, or I shall taunt you a second time-a!
I think we're not going to be friends!
and returns HTML formatted output, e.g.::
hello,<br/>
you told me:<br/>
<div>
Your mother was a hamster and your father smelt of elderberries
</div>
I must tell you that I'm not convinced. Then in this discussion:
<div>
<div>
Is there someone else up there we could talk to?
</div>
No. Now, go away, or I shall taunt you a second time-a!
</div>
I think we're not going to be friends!
The behaviour is different when C{indent_block} is C{True} or C{False}.
When C{True} the when C{indent_html} is only added at each change of
level of indentation, while it is added for each line when C{False}.
For eg::
>> a
>> b
>>>> c
would result in (if C{True})::
<div class="commentbox">
a<br/>
b<br/>
<div class="commentbox">
c<br/>
</div>
</div>
or would be (if C{False})::
<div class="commentbox"> a</div><br/>
<div class="commentbox"> b</div><br/>
<div class="commentbox"><div class="commentbox"> c</div></div><br/>
@param text: the text in quoted format
@param tabs_before: number of tabulations before each line
@param indent_txt: quote separator in email (default:'>>')
@param linebreak_txt: line separator in email
@param indent_html: tuple of (opening, closing) html tags.
default: ('<div class="commentbox">', "</div>")
@param linebreak_html: line separator in html (default: '<br/>')
@param indent_block: if indentation should be done per 'block'
i.e. only at changes of indentation level
(+1, -1) or at each line.
@param wash_p: if each line should be washed or simply escaped.
@return: string containing html formatted output
"""
# If needed, instantiate the HTMLWasher for later
if wash_p:
washer = HTMLWasher()
# Some initial values
out = ""
nb_indent = 0
(indent_html_open, indent_html_close) = indent_html
# Clean off any newlines from around the input
text = text.strip('\n')
# Iterate over the lines in our input
lines = text.split(linebreak_txt)
for line in lines:
# Calculate how indented this line is
new_nb_indent = 0
while True:
if line.startswith(indent_txt):
new_nb_indent += 1
line = line[len(indent_txt):]
else:
break
# In this case we are indenting the entire block
if indent_block:
# This line is more indented than the previous one,
# therefore, open some indentation.
if (new_nb_indent > nb_indent):
for dummy in range(nb_indent, new_nb_indent):
out += tabs_before*"\t" + indent_html_open + "\n"
tabs_before += 1
# This line is less indented than the previous one,
# therefore, close some indentation.
elif (new_nb_indent < nb_indent):
for dummy in range(new_nb_indent, nb_indent):
tabs_before -= 1
out += (tabs_before)*"\t" + indent_html_close + "\n"
# This line is as indented as the previous one,
# therefore, only add the needed tabs.
else:
out += (tabs_before)*"\t"
# And in this case we are indenting each line separately
else:
out += tabs_before*"\t" + new_nb_indent * indent_html_open
# We can wash this line...
if wash_p:
try:
line = washer.wash(line)
except HTMLParseError:
# Line contained something like "foo<bar"
line = cgi.escape(line)
# ...or simply escape it as it is.
else:
line = cgi.escape(line)
# Add the needed tabs for the nicer visual formatting
if indent_block:
out += tabs_before*"\t"
# Add the current line to the output
out += line
# In case we are indenting each line separately,
# close all previously opened indentation.
if not indent_block:
out += new_nb_indent * indent_html_close
# Add the line break to the output after each line
out += linebreak_html + "\n"
# Reset the current line's indentation level
nb_indent = new_nb_indent
# In case we are indenting the entire block,
# close all previously opened indentation.
if indent_block:
for dummy in range(0, nb_indent):
tabs_before -= 1
out += (tabs_before)*"\t" + indent_html_close + "\n"
# Return the output
return out
def email_quote_txt(text,
indent_txt='>>',
linebreak_input="\n",
linebreak_output="\n",
escape_p=False):
"""
Takes a text and returns it in a typical mail quoted format, e.g.::
C'est un lapin, lapin de bois.
>>Quoi?
Un cadeau.
>>What?
A present.
>>Oh, un cadeau.
will return::
>>C'est un lapin, lapin de bois.
>>>>Quoi?
>>Un cadeau.
>>>>What?
>>A present.
>>>>Oh, un cadeau.
@param text: the string to quote
@param indent_txt: the string used for quoting (default: '>>')
@param linebreak_input: in the text param, string used for linebreaks
@param linebreak_output: linebreak used for output
@param escape_p: if True, escape the text before returning it
@return: the text as a quoted string
"""
out= ""
if text:
lines = text.split(linebreak_input)
for line in lines:
out += indent_txt + line + linebreak_output
if escape_p:
out = cgi.escape(out)
return out
def escape_email_quoted_text(
text,
indent_txt='>>',
linebreak_txt='\n',
wash_p=True):
"""
Escape text using an email-like indenting rule.
As an example, this text::
>>Brave Sir Robin ran away...
<img src="malicious_script />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
will be escaped like this::
>>Brave Sir Robin ran away...
<img src="malicious_script />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
@param text: the string to escape
@param indent_txt: the string used for quoting
@param linebreak_txt: in the text param, string used for linebreaks
@param wash_p: if each line should be washed or simply escaped.
"""
if wash_p:
washer = HTMLWasher()
lines = text.split(linebreak_txt)
output = ''
for line in lines:
line = line.strip()
nb_indent = 0
while True:
if line.startswith(indent_txt):
nb_indent += 1
line = line[len(indent_txt):]
else:
break
if wash_p:
output += (nb_indent * indent_txt) + washer.wash(line, render_unallowed_tags=True) + linebreak_txt
else:
output += (nb_indent * indent_txt) + cgi.escape(line) + linebreak_txt
nb_indent = 0
return output[:-1]
| CERNDocumentServer/invenio | modules/webmessage/lib/webmessage_mailutils.py | Python | gpl-2.0 | 9,479 |
# -*- coding: utf-8 -*-
"""
Global tables and re-usable fields
"""
# =============================================================================
# Import S3 meta fields into global namespace
#
exec("from applications.%s.modules.s3.s3fields import *" % request.application)
# Faster for Production (where app-name won't change):
#from applications.eden.modules.s3.s3fields import *
# =============================================================================
# Representations for Auth Users & Groups
def s3_user_represent(id):
""" Represent a User as their email address """
table = db.auth_user
user = db(table.id == id).select(table.email,
limitby=(0, 1),
cache=(cache.ram, 10)).first()
if user:
return user.email
return None
def s3_avatar_represent(id, tablename="auth_user", _class="avatar"):
""" Represent a User as their profile picture or Gravatar """
table = db[tablename]
email = None
image = None
if tablename == "auth_user":
user = db(table.id == id).select(table.email,
table.image,
limitby=(0, 1),
cache=(cache.ram, 10)).first()
if user:
email = user.email.strip().lower()
image = user.image
elif tablename == "pr_person":
user = db(table.id == id).select(table.pe_id,
table.picture,
limitby=(0, 1),
cache=(cache.ram, 10)).first()
if user:
image = user.picture
ctable = db.pr_contact
query = (ctable.pe_id == id) & (ctable.contact_method == "EMAIL")
email = db(query).select(ctable.value,
limitby=(0, 1),
cache=(cache.ram, 10)).first()
if email:
email = email.value
if image:
url = URL(c="default", f="download",
args=image)
elif email:
# If no Image uploaded, try Gravatar, which also provides a nice fallback identicon
hash = md5.new(email).hexdigest()
url = "http://www.gravatar.com/avatar/%s?s=50&d=identicon" % hash
else:
url = "http://www.gravatar.com/avatar/00000000000000000000000000000000?d=mm"
return IMG(_src=url,
_class=_class,
_height=50, _width=50)
def s3_role_represent(id):
""" Represent a Role by Name """
table = db.auth_group
role = db(table.id == id).select(table.role,
limitby=(0, 1),
cache=(cache.ram, 10)).first()
if role:
return role.role
return None
# =============================================================================
# Record authorship meta-fields
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
update=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
def s3_authorstamp():
return (s3_meta_created_by(),
s3_meta_modified_by())
# =============================================================================
# Record ownership meta-fields
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=lambda id: \
id and s3_user_represent(id) or UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_role = S3ReusableField("owned_by_role", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
# Role of the Organisation the record belongs to
s3_meta_owned_by_organisation = S3ReusableField("owned_by_organisation", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
# Role of the Facility the record belongs to
s3_meta_owned_by_facility = S3ReusableField("owned_by_facility", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
def s3_ownerstamp():
return (s3_meta_owned_by_user(),
s3_meta_owned_by_role(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_facility())
# =============================================================================
# Common meta-fields
def s3_meta_fields():
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_role(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_facility())
return fields
# =============================================================================
response.s3.all_meta_field_names = [field.name for field in
[s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_role()
]]
# =============================================================================
# Reusable field for scheduler task links
#
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
# =============================================================================
# Reusable roles fields for map layer permissions management (GIS)
role_required = S3ReusableField("role_required", db.auth_group,
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
zero=T("Public"))),
widget = S3AutocompleteWidget(
"auth",
"group",
fieldname="role"),
represent = s3_role_represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
roles_permitted = S3ReusableField("roles_permitted", db.auth_group,
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
multiple=True)),
# @ToDo
#widget = S3CheckboxesWidget(lookup_table_name = "auth_group",
# lookup_field_name = "role",
# multiple = True),
represent = s3_role_represent,
label = T("Roles Permitted"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Roles Permitted"),
T("If this record should be restricted then select which role(s) are permitted to access the record here."))),
ondelete = "RESTRICT")
# =============================================================================
# Other reusable fields
# -----------------------------------------------------------------------------
# Reusable name field to include in other table definitions
name_field = S3ReusableField("name", length=64,
label=T("Name"), required=IS_NOT_EMPTY())
# -----------------------------------------------------------------------------
# Reusable comments field to include in other table definitions
s3_comments = S3ReusableField("comments", "text",
label = T("Comments"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated."))))
# -----------------------------------------------------------------------------
# Reusable currency field to include in other table definitions
#
# @ToDo: Move to a Finance module
#
currency_type_opts = deployment_settings.get_fin_currencies()
default_currency = deployment_settings.get_fin_currency_default()
currency_type = S3ReusableField("currency_type", "string",
length = 3,
#notnull=True,
requires = IS_IN_SET(currency_type_opts.keys(),
zero=None),
default = default_currency,
label = T("Currency"),
#represent = lambda opt: \
# currency_type_opts.get(opt, UNKNOWN_OPT),
writable = deployment_settings.get_fin_currency_writable())
# =============================================================================
# Addresses
#
# These fields are populated onaccept from location_id
#
# Labels that need gis_config data are set by gis.set_config() calling
# gis.update_gis_config_dependent_options()
#
address_building_name = S3ReusableField("building_name",
label=T("Building Name"),
readable=False,
writable=False)
address_address = S3ReusableField("address",
label=T("Address"),
readable=False,
writable=False)
address_postcode = S3ReusableField("postcode",
label=deployment_settings.get_ui_label_postcode(),
readable=False,
writable=False)
address_L4 = S3ReusableField("L4",
#label=gis.get_location_hierarchy("L4"),
readable=False,
writable=False)
address_L3 = S3ReusableField("L3",
#label=gis.get_location_hierarchy("L3"),
readable=False,
writable=False)
address_L2 = S3ReusableField("L2",
#label=gis.get_location_hierarchy("L2"),
readable=False,
writable=False)
address_L1 = S3ReusableField("L1",
#label=gis.get_location_hierarchy("L1"),
readable=False,
writable=False)
address_L0 = S3ReusableField("L0",
label=T("Country"), # L0 Location Name never varies except with a Translation
readable=False,
writable=False)
def address_fields():
# return multiple reusable fields
fields = (
address_building_name(),
address_address(),
address_postcode(),
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(),
)
return fields
# Hide Address fields in Create forms
# inc list_create (list_fields over-rides)
def address_hide(table):
table.building_name.readable = False
table.address.readable = False
table.L4.readable = False
table.L3.readable = False
table.L2.readable = False
table.L1.readable = False
table.L0.readable = False
table.postcode.readable = False
return
def address_onvalidation(form):
"""
Write the Postcode & Street Address fields from the Location
- used by pr_address, org_office & cr_shelter
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in form.vars:
table = db.gis_location
# Read Postcode & Street Address
query = (table.id == form.vars.location_id)
location = db(query).select(table.addr_street,
table.addr_postcode,
table.name,
table.level,
table.parent,
table.path,
limitby=(0, 1)).first()
if location:
form.vars.address = location.addr_street
form.vars.postcode = location.addr_postcode
if location.level == "L0":
form.vars.L0 = location.name
elif location.level == "L1":
form.vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
form.vars.L0 = country.name
else:
if location.level is None:
form.vars.building_name = location.name
# Get Names of ancestors at each level
gis.get_parent_per_level(form.vars,
form.vars.location_id,
feature=location,
ids=False,
names=True)
def address_update(table, record_id):
"""
Write the Postcode & Street Address fields from the Location
- used by asset_asset
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
locations = db.gis_location
# Read Postcode & Street Address
query = (table.id == record_id) & \
(locations.id == table.location_id)
location = db(query).select(locations.addr_street,
locations.addr_postcode,
locations.name,
locations.level,
locations.parent,
locations.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
vars.address = location.addr_street
vars.postcode = location.addr_postcode
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (locations.id == location.parent)
country = db(query).select(locations.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
if location.level is None:
vars.building_name = location.name
# Get Names of ancestors at each level
gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
# =============================================================================
# Default CRUD strings
ADD_RECORD = T("Add Record")
LIST_RECORDS = T("List Records")
s3.crud_strings = Storage(
title_create = ADD_RECORD,
title_display = T("Record Details"),
title_list = LIST_RECORDS,
title_update = T("Edit Record"),
title_search = T("Search Records"),
subtitle_create = T("Add New Record"),
subtitle_list = T("Available Records"),
label_list_button = LIST_RECORDS,
label_create_button = ADD_RECORD,
label_delete_button = T("Delete Record"),
msg_record_created = T("Record added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No Records currently available"),
msg_match = T("Matching Records"),
msg_no_match = T("No Matching Records"))
# =============================================================================
# Common tables
# Import Files
# @ToDo: Replace with Importer UI which is accessible to non-Admins
import_type_opts = {
"asset_asset": T("Assets"),
"hrm_person": T("Human Resources"),
"inv_inv_item": T("Inventory Items"),
"supply_item_category": T("Supply Item Categories"),
"inv_warehouse": T("Warehouses")
}
tablename = "admin_import_file"
table = db.define_table(tablename,
Field("type", label = T("Type"),
comment = A(T("Download Template"),
_id="dl_template",
_class="hidden"),
requires = IS_IN_SET(import_type_opts),
represent = lambda opt: import_type_opts.get(opt,
NONE)),
Field("filename",
readable=False, # Just shows up in List View
writable=False,
label = T("File name")),
Field("file", "upload", autodelete=True,
requires = IS_UPLOAD_FILENAME(extension="csv"),
uploadfolder = os.path.join(request.folder,
"uploads",
"imports"),
comment = DIV( _class="tooltip",
_title="%s|%s" % (T("Import File"),
T("Upload a CSV file formatted according to the Template."))),
label = T("Import File")),
s3_comments(),
*s3_timestamp())
# -----------------------------------------------------------------------------
# Theme
# @ToDo: Fix or remove completely
#tablename = "admin_theme"
#table = db.define_table(tablename,
# Field("name",
# requires = [IS_NOT_EMPTY(),
# IS_NOT_ONE_OF(db,
# "%s.name" % tablename)]),
# Field("logo"),
# Field("header_background"),
# Field("col_background", requires = IS_HTML_COLOUR()),
# Field("col_txt", requires = IS_HTML_COLOUR()),
# Field("col_txt_background", requires = IS_HTML_COLOUR()),
# Field("col_txt_border", requires = IS_HTML_COLOUR()),
# Field("col_txt_underline", requires = IS_HTML_COLOUR()),
# Field("col_menu", requires = IS_HTML_COLOUR()),
# Field("col_highlight", requires = IS_HTML_COLOUR()),
# Field("col_input", requires = IS_HTML_COLOUR()),
# Field("col_border_btn_out", requires = IS_HTML_COLOUR()),
# Field("col_border_btn_in", requires = IS_HTML_COLOUR()),
# Field("col_btn_hover", requires = IS_HTML_COLOUR()),
# )
# -----------------------------------------------------------------------------
# Settings - systemwide
# @ToDo: Move these to deployment_settings
#tablename = "s3_setting"
#table = db.define_table(tablename,
# #Field("admin_name"),
# #Field("admin_email"),
# #Field("admin_tel"),
# Field("theme", db.admin_theme,
# requires = IS_IN_DB(db, "admin_theme.id",
# "admin_theme.name",
# zero=None),
# represent = lambda name: \
# db(db.admin_theme.id == name).select(db.admin_theme.name,
# limitby=(0, 1)).first().name),
# *s3_timestamp())
# Define CRUD strings (NB These apply to all Modules' "settings" too)
ADD_SETTING = T("Add Setting")
LIST_SETTINGS = T("List Settings")
s3.crud_strings["setting"] = Storage(
title_create = ADD_SETTING,
title_display = T("Setting Details"),
title_list = LIST_SETTINGS,
title_update = T("Edit Setting"),
title_search = T("Search Settings"),
subtitle_create = T("Add New Setting"),
subtitle_list = T("Settings"),
label_list_button = LIST_SETTINGS,
label_create_button = ADD_SETTING,
msg_record_created = T("Setting added"),
msg_record_modified = T("Setting updated"),
msg_record_deleted = T("Setting deleted"),
msg_list_empty = T("No Settings currently defined"))
# =============================================================================
| flavour/porto | models/00_tables.py | Python | mit | 25,033 |
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import (
Categorical,
Timedelta,
Timestamp,
)
import pandas._testing as tm
def test_cast_1d_array_like_from_scalar_categorical():
# see gh-19565
#
# Categorical result from scalar did not maintain
# categories and ordering of the passed dtype.
cats = ["a", "b", "c"]
cat_type = CategoricalDtype(categories=cats, ordered=False)
expected = Categorical(["a", "a"], categories=cats)
result = construct_1d_arraylike_from_scalar("a", len(expected), cat_type)
tm.assert_categorical_equal(result, expected)
def test_cast_1d_array_like_from_timestamp():
# check we dont lose nanoseconds
ts = Timestamp.now() + Timedelta(1)
res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
assert res[0] == ts
def test_cast_1d_array_like_from_timedelta():
# check we dont lose nanoseconds
td = Timedelta(1)
res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))
assert res[0] == td
def test_cast_1d_array_like_mismatched_datetimelike():
td = np.timedelta64("NaT", "ns")
dt = np.datetime64("NaT", "ns")
with pytest.raises(TypeError, match="Cannot cast"):
construct_1d_arraylike_from_scalar(td, 2, dt.dtype)
with pytest.raises(TypeError, match="Cannot cast"):
construct_1d_arraylike_from_scalar(np.timedelta64(4, "ns"), 2, dt.dtype)
with pytest.raises(TypeError, match="Cannot cast"):
construct_1d_arraylike_from_scalar(dt, 2, td.dtype)
with pytest.raises(TypeError, match="Cannot cast"):
construct_1d_arraylike_from_scalar(np.datetime64(4, "ns"), 2, td.dtype)
| rs2/pandas | pandas/tests/dtypes/cast/test_construct_from_scalar.py | Python | bsd-3-clause | 1,786 |
#!/usr/bin/env python
import os
import SocketServer
import threading
import random
import time
from Crypto.Cipher.AES import AESCipher
key = 'XXXXXXXXXXXXXXXX' # obviously, this is not the real key.
secret_data = 'This is not the real secret data'
class threadedserver(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def pkcs7_pad(s):
l = len(s)
needed = 16 - (l % 16)
return s + (chr(needed) * needed)
def pkcs7_unpad(s):
# this is not actually used, but we wanted to put it in so you could see how to implement it.
assert len(s) % 16 == 0
assert len(s) >= 16
last_char = s[-1]
count = ord(last_char)
assert 0 < count <= 16
assert s[-count:] == last_char * count
return s[:-count]
def oracle(s):
# so, this is simulated. In reality we'd have to run javascript on a target web browser
# and capture the traffic. That's pretty hard to do in a way that scales, though, so we
# simulate it instead.
# This uses ECB mode.
return AESCipher(key).encrypt(pkcs7_pad('GET /' + s.decode('hex') + secret_data))
class incoming(SocketServer.StreamRequestHandler):
def handle(self):
self.request.send("Please send the path you'd like them to visit, hex-encoded.\n")
data = self.request.recv(4096).strip('\n')
self.request.send(oracle(data).encode('hex') + '\n')
self.request.close()
class ReusableTCPServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
if __name__ == "__main__":
HOST = '0.0.0.0'
PORT = 65414
SocketServer.TCPServer.allow_reuse_address = True
server = ReusableTCPServer((HOST, PORT), incoming)
server.serve_forever()
| Oksisane/PicoCTF-2014-Write-ups | cryptography/server.py | Python | gpl-3.0 | 1,639 |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
from keras.models import Sequential
from keras.utils import np_utils
from hep_ml.losses import BinFlatnessLossFunction
from hep_ml.gradientboosting import UGradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
trainingFilePath = 'training.csv'
testFilePath = 'test.csv'
def get_training_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(trainingFilePath)
data = []
y = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
label_indices = dict((l, i) for i, l in enumerate(labels))
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
label = values[label_indices['signal']]
ID = values[0]
data.append(filtered)
y.append(float(label))
ids.append(ID)
return ids, np.array(data), np.array(y)
def get_test_data():
filter_out = ['id', 'min_ANNmuon', 'production', 'mass', 'signal', 'SPDhits', 'IP', 'IPSig', 'isolationc']
f = open(testFilePath)
data = []
ids = []
for i, l in enumerate(f):
if i == 0:
labels = l.rstrip().split(',')
continue
values = l.rstrip().split(',')
filtered = []
for v, l in zip(values, labels):
if l not in filter_out:
filtered.append(float(v))
ID = values[0]
data.append(filtered)
ids.append(ID)
return ids, np.array(data)
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
# get training data
ids, X, y = get_training_data()
print('Data shape:', X.shape)
# shuffle the data
np.random.seed(248)
np.random.shuffle(X)
np.random.seed(248)
np.random.shuffle(y)
print('Signal ratio:', np.sum(y) / y.shape[0])
# preprocess the data
X, scaler = preprocess_data(X)
y = np_utils.to_categorical(y)
# split into training / evaluation data
nb_train_sample = int(len(y) * 0.97)
X_train = X[:nb_train_sample]
X_eval = X[nb_train_sample:]
y_train = y[:nb_train_sample]
y_eval = y[nb_train_sample:]
print('Train on:', X_train.shape[0])
print('Eval on:', X_eval.shape[0])
# deep pyramidal MLP, narrowing with depth
model = Sequential()
model.add(Dropout(0.15))
model.add(Dense(X_train.shape[1],200))
model.add(PReLU((200,)))
model.add(Dropout(0.13))
model.add(Dense(200, 150))
model.add(PReLU((150,)))
model.add(Dropout(0.12))
model.add(Dense(150,100))
model.add(PReLU((100,)))
model.add(Dropout(0.11))
model.add(Dense(100, 50))
model.add(PReLU((50,)))
model.add(Dropout(0.09))
model.add(Dense(50, 30))
model.add(PReLU((30,)))
model.add(Dropout(0.07))
model.add(Dense(30, 25))
model.add(PReLU((25,)))
model.add(Dense(25, 2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# train model
model.fit(X_train, y_train, batch_size=128, nb_epoch=75, validation_data=(X_eval, y_eval), verbose=2, show_accuracy=True)
# generate submission
ids, X = get_test_data()
print('Data shape:', X.shape)
X, scaler = preprocess_data(X, scaler)
predskeras = model.predict(X, batch_size=256)[:, 1]
print("Load the training/test data using pandas")
train = pd.read_csv(trainingFilePath)
test = pd.read_csv(testFilePath)
print("Eliminate SPDhits, which makes the agreement check fail")
features = list(train.columns[1:-5])
print("Train a UGradientBoostingClassifier")
loss = BinFlatnessLossFunction(['mass'], n_bins=15, uniform_label=0)
clf = UGradientBoostingClassifier(loss=loss, n_estimators=50, subsample=0.1,
max_depth=6, min_samples_leaf=10,
learning_rate=0.1, train_features=features, random_state=11)
clf.fit(train[features + ['mass']], train['signal'])
fb_preds = clf.predict_proba(test[features])[:,1]
print("Train a Random Forest model")
rf = RandomForestClassifier(n_estimators=250, n_jobs=-1, criterion="entropy", random_state=1)
rf.fit(train[features], train["signal"])
print("Train a XGBoost model")
params = {"objective": "binary:logistic",
"eta": 0.2,
"max_depth": 10,
"min_child_weight": 1,
"silent": 1,
"colsample_bytree": 0.7,
"seed": 1}
num_trees=300
gbm = xgb.train(params, xgb.DMatrix(train[features], train["signal"]), num_trees)
print("Make predictions on the test set")
test_probs = (0.30*rf.predict_proba(test[features])[:,1]) + (0.30*gbm.predict(xgb.DMatrix(test[features])))+(0.30*predskeras) + (0.10*fb_preds)
submission = pd.DataFrame({"id": test["id"], "prediction": test_probs})
submission.to_csv("rf_xgboost_keras.csv", index=False)
| korbonits/kaggle-tau-to-three-muons | ensemble_model.py | Python | mit | 5,133 |
# -*- coding: utf-8 -*-
"""Script to compact all Brython scripts in a single one."""
import datetime
import os
import re
import sys
import tarfile
import zipfile
import javascript_minifier
if(sys.version_info[0]!=3):
raise ValueError("This script only works with Python 3")
# path of parent directory
pdir = os.path.dirname(os.getcwd())
# version info
version = [3, 3, 0, "alpha", 0]
implementation = [3, 2, 5, 'alpha', 0]
# version name
vname = '.'.join(str(x) for x in implementation[:3])
if implementation[3] == 'rc':
vname += 'rc%s' % implementation[4]
abs_path = lambda _pth: os.path.join(os.path.dirname(os.getcwd()), 'www', 'src', _pth)
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# update version number
with open(abs_path('version_info.js'), 'w') as vinfo_file_out:
# implementation[2] = now
vinfo_file_out.write('__BRYTHON__.implementation = %s\n' % implementation)
vinfo_file_out.write('__BRYTHON__.__MAGIC__ = "%s"\n' %
'.'.join(['%s' % _i for _i in implementation[:3]]))
vinfo_file_out.write('__BRYTHON__.version_info = %s\n' % str(version))
vinfo_file_out.write('__BRYTHON__.compiled_date = "%s"\n' % str(datetime.datetime.now()))
# builtin module names = list of scripts in src/libs
vinfo_file_out.write('__BRYTHON__.builtin_module_names = ["posix","sys","errno", "time",')
_modules=['"%s"' % fname.split('.')[0]
for fname in os.listdir(abs_path('libs')) if fname.endswith('.js')]
_modules.sort() #sort modules so that git diff's don't change between runs
vinfo_file_out.write(',\n '.join(_modules))
# add Python scripts in Lib that start with _ and arent found in CPython Lib
# using sys.executable to find stdlib dir doesn't work under linux.
stdlib_path = os.path.dirname(os.__file__)
# stdlib_path = os.path.join(os.path.dirname(sys.executable),'Lib')
stdlib_mods = [f for f in os.listdir(stdlib_path) if f.startswith('_')]
stdlib_mods.sort()
brython_mods = [f for f in os.listdir(abs_path('Lib'))
if f.startswith('_') and f != '__pycache__']
brython_py_builtins = [os.path.splitext(x)[0]
for x in brython_mods if x not in stdlib_mods]
brython_py_builtins.sort()
vinfo_file_out.write(',\n ' + ',\n '.join(
['"%s"' % f for f in brython_py_builtins]))
vinfo_file_out.write(']\n')
#log.info("Finished Writing file: " + abs_path('version_info.js'))
import make_stdlib_static
# build brython.js from base Javascript files
sources = [
'brython_builtins', 'version_info', 'py2js',
'py_object', 'py_type', 'py_utils', 'py_builtin_functions',
'py_exceptions', 'py_range_slice', 'py_bytes', 'js_objects',
'stdlib_paths', 'py_import', 'py_float', 'py_int', 'py_long_int',
'py_complex', 'py_list', 'py_string', 'py_dict', 'py_set', 'py_dom',
'py_generator', 'builtin_modules', 'py_import_hooks', 'async'
]
res = '// brython.js brython.info\n'
res += '// version %s\n' % version
res += '// implementation %s\n' % implementation
res += '// version compiled from commented, indented source files '
res += 'at github.com/brython-dev/brython\n'
src_size = 0
for fname in sources:
src = open(abs_path(fname)+'.js').read() + '\n'
src_size += len(src)
res += javascript_minifier.minify(src)
res = res.replace('context', 'C')
with open(abs_path('brython.js'), 'w') as the_brythonjs_file_output:
the_brythonjs_file_output.write(res)
print(('size : originals %s compact %s gain %.2f' %
(src_size, len(res), 100 * (src_size - len(res)) / src_size)))
sys.path.append("scripts")
try:
import make_VFS # isort:skip
except ImportError:
print("Cannot find make_VFS, so we won't make py_VFS.js")
make_VFS = None
sys.exit()
make_VFS.process(os.path.join(pdir, 'www', 'src', 'py_VFS.js'))
# make distribution with core + libraries
with open(os.path.join(pdir, 'www', 'src', 'brython_dist.js'), 'w') as distrib_file:
distrib_file.write(open(os.path.join(pdir, 'www', 'src', 'brython.js')).read())
distrib_file.write(open(os.path.join(pdir, 'www', 'src', 'py_VFS.js')).read())
| harukaeru/Brython-Django | static/brython/scripts/make_dist.py | Python | mit | 4,186 |
"""
Example 6
Just another example
9/1/2016
"""
###Variables
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
###Main
print x
print y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e | chrisortman/CIS-121 | k0765042/lpthy/ex6.py | Python | mit | 411 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Newspipe - A web news aggregator.
# Copyright (C) 2010-2021 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information: https://sr.ht/~cedric/newspipe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 1.10 $"
__date__ = "$Date: 2010/12/07 $"
__revision__ = "$Date: 2016/11/22 $"
__copyright__ = "Copyright (c) Cedric Bonhomme"
__license__ = "AGPLv3"
import glob
import logging
import operator
import os
import re
import subprocess
import sys
import urllib
from collections import Counter
from contextlib import contextmanager
import sqlalchemy
from flask import request
from newspipe.bootstrap import application
from newspipe.controllers import ArticleController
from newspipe.lib.utils import clear_string
try:
from urlparse import urlparse, parse_qs, urlunparse
except:
from urllib.parse import urlparse, parse_qs, urlunparse, urljoin
logger = logging.getLogger(__name__)
ALLOWED_EXTENSIONS = set(["xml", "opml", "json"])
def is_safe_url(target):
"""
Ensures that a redirect target will lead to the same server.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Looks at various hints to find the redirect target.
"""
for target in request.args.get("next"), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
def allowed_file(filename):
"""
Check if the uploaded file is allowed.
"""
return "." in filename and filename.rsplit(".", 1)[1] in ALLOWED_EXTENSIONS
@contextmanager
def opened_w_error(filename, mode="r"):
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def fetch(id, feed_id=None):
"""
Fetch the feeds in a new processus.
The default crawler ("asyncio") is launched with the manager.
"""
env = os.environ.copy()
env["FLASK_APP"] = "runserver.py"
cmd = [
sys.exec_prefix + "/bin/flask",
"fetch_asyncio",
"--user-id",
str(id),
]
if feed_id:
cmd.extend(["--feed-id", str(feed_id)])
return subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
def history(user_id, year=None, month=None):
"""
Sort articles by year and month.
"""
articles_counter = Counter()
articles = ArticleController(user_id).read()
if None != year:
articles = articles.filter(sqlalchemy.extract("year", "Article.date") == year)
if None != month:
articles = articles.filter(
sqlalchemy.extract("month", "Article.date") == month
)
for article in articles.all():
if None != year:
articles_counter[article.date.month] += 1
else:
articles_counter[article.date.year] += 1
return articles_counter, articles
def clean_url(url):
"""
Remove utm_* parameters
"""
parsed_url = urlparse(url)
qd = parse_qs(parsed_url.query, keep_blank_values=True)
filtered = dict((k, v) for k, v in qd.items() if not k.startswith("utm_"))
return urlunparse(
[
parsed_url.scheme,
parsed_url.netloc,
urllib.parse.quote(urllib.parse.unquote(parsed_url.path)),
parsed_url.params,
urllib.parse.urlencode(filtered, doseq=True),
parsed_url.fragment,
]
).rstrip("=")
def load_stop_words():
"""
Load the stop words and return them in a list.
"""
stop_words_lists = glob.glob(
os.path.join(application.config["BASE_DIR"], "web/var/stop_words/*.txt")
)
stop_words = []
for stop_wods_list in stop_words_lists:
with opened_w_error(stop_wods_list, "r") as (stop_wods_file, err):
if err:
stop_words = []
else:
stop_words += stop_wods_file.read().split(";")
return stop_words
def top_words(articles, n=10, size=5):
"""
Return the n most frequent words in a list.
"""
stop_words = load_stop_words()
words = Counter()
wordre = re.compile(r"\b\w{%s,}\b" % size, re.I)
for article in articles:
for word in [
elem.lower()
for elem in wordre.findall(clear_string(article.content))
if elem.lower() not in stop_words
]:
words[word] += 1
return words.most_common(n)
def tag_cloud(tags):
"""
Generates a tags cloud.
"""
tags.sort(key=operator.itemgetter(0))
max_tag = max([tag[1] for tag in tags])
return "\n".join(
[
("<font size=%d>%s</font>" % (min(1 + count * 7 / max_tag, 7), word))
for (word, count) in tags
]
)
| JARR-aggregator/JARR | newspipe/lib/misc_utils.py | Python | agpl-3.0 | 5,626 |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
# def deleteDuplicates(self, head):
# """
# :type head: ListNode
# :rtype: ListNode
# """
# if head is None:
# return None
# temp = ListNode(2147483647)
# temp.next = head
# pos = head
# head = temp
# last = head
# while pos is not None:
# if last.val == pos.val:
# last.next = pos.next
# else:
# last = pos
# pos = pos.next
# return head.next
def deleteDuplicates(self, head):
if head is None:
return None
pos = head
while pos is not None and pos.next is not None:
if pos.val == pos.next.val:
pos.next = pos.next.next
else:
pos = pos.next
return head
| qiyuangong/leetcode | python/083_Remove_Duplicates_from_Sorted_List.py | Python | mit | 1,004 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import coverage # pylint: disable=import-error
coverage.process_startup()
# pylint: disable=bare-except
except: # nopep8
pass
| JosuaKrause/quick_server | test/cov.py | Python | mit | 191 |
#
# From https://github.com/rguthrie3/BiLSTM-CRF/blob/master/model.py
#
import dynet
import numpy as np
class CRF():
def __init__(self, model, id_to_tag):
self.id_to_tag = id_to_tag
self.tag_to_id = {tag: id for id, tag in id_to_tag.items()}
self.n_tags = len(self.id_to_tag)
self.b_id = len(self.tag_to_id)
self.e_id = len(self.tag_to_id) + 1
self.transitions = model.add_lookup_parameters((self.n_tags+2,
self.n_tags+2),
name="transitions")
def score_sentence(self, observations, tags):
assert len(observations) == len(tags)
score_seq = [0]
score = dynet.scalarInput(0)
tags = [self.b_id] + tags
for i, obs in enumerate(observations):
# print self.b_id
# print self.e_id
# print obs.value()
# print tags
# print self.transitions
# print self.transitions[tags[i+1]].value()
score = score \
+ dynet.pick(self.transitions[tags[i + 1]], tags[i])\
+ dynet.pick(obs, tags[i + 1])
score_seq.append(score.value())
score = score + dynet.pick(self.transitions[self.e_id], tags[-1])
return score
def viterbi_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in
observations]
viterbi_tags, viterbi_score = self.viterbi_decoding(observations)
if viterbi_tags != tags:
gold_score = self.score_sentence(observations, tags)
return (viterbi_score - gold_score), viterbi_tags
else:
return dynet.scalarInput(0), viterbi_tags
def neg_log_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in observations]
gold_score = self.score_sentence(observations, tags)
forward_score = self.forward(observations)
return forward_score - gold_score
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dynet.pick(scores, argmax_score)
max_score_expr_broadcast = dynet.concatenate([max_score_expr] * (self.n_tags+2))
return max_score_expr + dynet.log(
dynet.sum_dims(dynet.transpose(dynet.exp(scores - max_score_expr_broadcast)), [1]))
init_alphas = [-1e10] * (self.n_tags + 2)
init_alphas[self.b_id] = 0
for_expr = dynet.inputVector(init_alphas)
for idx, obs in enumerate(observations):
# print "obs: ", obs.value()
alphas_t = []
for next_tag in range(self.n_tags+2):
obs_broadcast = dynet.concatenate([dynet.pick(obs, next_tag)] * (self.n_tags + 2))
# print "for_expr: ", for_expr.value()
# print "transitions next_tag: ", self.transitions[next_tag].value()
# print "obs_broadcast: ", obs_broadcast.value()
next_tag_expr = for_expr + self.transitions[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dynet.concatenate(alphas_t)
terminal_expr = for_expr + self.transitions[self.e_id]
alpha = log_sum_exp(terminal_expr)
return alpha
def viterbi_decoding(self, observations):
backpointers = []
init_vvars = [-1e10] * (self.n_tags + 2)
init_vvars[self.b_id] = 0 # <Start> has all the probability
for_expr = dynet.inputVector(init_vvars)
trans_exprs = [self.transitions[idx] for idx in range(self.n_tags + 2)]
for obs in observations:
bptrs_t = []
vvars_t = []
for next_tag in range(self.n_tags + 2):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
bptrs_t.append(best_tag_id)
vvars_t.append(dynet.pick(next_tag_expr, best_tag_id))
for_expr = dynet.concatenate(vvars_t) + obs
backpointers.append(bptrs_t)
# Perform final transition to terminal
terminal_expr = for_expr + trans_exprs[self.e_id]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dynet.pick(terminal_expr, best_tag_id)
# Reverse over the backpointers to get the best path
best_path = [best_tag_id] # Start with the tag that was best for terminal
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop() # Remove the start symbol
best_path.reverse()
assert start == self.b_id
# Return best path and best path's score
return best_path, path_score | onurgu/ner-tagger-tensorflow | crf.py | Python | mit | 5,119 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-31 下午1:29
# @Author : Tom.Lee
# @File : config_parser.py
# @Product : PyCharm
# @Docs :
# @Source :
from oslo_config import cfg
from oslo_config import types
class ConfigManager(object):
PortType = types.Integer(1, 65535)
default_opts = [
cfg.StrOpt(
'bind_host',
default='0.0.0.0',
help='IP address to listen on.'),
cfg.Opt(
'bind_port', # 只有Opt类型才能指定PortType
type=PortType,
default=9292,
help='Port number to listen on.')
]
default_opt = cfg.ListOpt(
'enabled_api',
default=['ec2', 'api_compute'],
help='List of APIs to enable by default.')
cli_opts = [
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output'),
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output'),
]
rabbit_group = cfg.OptGroup(
name='RABBIT',
title='RABBIT options'
)
rabbit_opt = cfg.BoolOpt(
'ssl',
default=False,
help='use ssl for connection')
rabbit_opts = [
cfg.StrOpt(
'host',
default='localhost',
help='IP/hostname to listen on.'),
cfg.IntOpt(
'port',
default=5672,
help='Port number to listen on.')
]
def __init__(self):
self.conf = cfg.CONF
self._register_opts()
def _register_opts(self):
# default
self.conf.register_opt(self.default_opt)
self.conf.register_opts(self.default_opts)
# rabbit
self.conf.register_group(self.rabbit_group)
self.conf.register_opts(self.rabbit_opts, self.rabbit_group)
self.conf.register_opt(self.rabbit_opt, self.rabbit_group)
# cli
self.conf.register_cli_opts(self.cli_opts)
self.conf(default_config_files=['config.conf'])
@property
def bind_port(self):
return getattr(self.conf, 'bind_port', None)
@property
def bind_host(self):
return getattr(self.conf, 'bind_host', None)
config_manager = ConfigManager()
if __name__ == '__main__':
print config_manager.bind_port
print config_manager.bind_host
| amlyj/pythonStudy | OpenStack/oslo_/config/config_parser.py | Python | mit | 2,480 |
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
SEED = 42
GRID_PRECISION = 100
rng = np.random.RandomState(SEED)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = (0, 1, 2)
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, GRID_PRECISION),
np.linspace(-7, 7, GRID_PRECISION))
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for _, offset in enumerate(clusters_separation):
np.random.seed(SEED)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.concatenate([X1, X2], axis=0)
# Add outliers
X = np.concatenate([X, np.random.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white',
s=20, edgecolor='k')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black',
s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| herilalaina/scikit-learn | examples/covariance/plot_outlier_detection.py | Python | bsd-3-clause | 5,242 |
#!/usr/bin/python3
import argparse
import sys
from usersync import config, lp, sync
PROVIDERS = {
'lp': lp,
}
def _configured_users(args, cfg):
provider = PROVIDERS[args.provider]
users = {}
for team in cfg.get('team', []):
users.update(provider.get_people(team))
for user in cfg.get('users', []):
users.update({user: provider.get_ssh_keys(user)})
return users
def _get_args():
parser = argparse.ArgumentParser(
description='''Syncronize user accounts defined somewhere like
like launchpad.net into a local group. Much easier to use
and administer than something like ldap''')
parser.add_argument('--provider', default='lp',
choices=PROVIDERS.keys(),
help='Team/sshkey provider. default=%(default)s')
parser.add_argument('--config', default=config.DEFAULT,
help='Config file to use. default=%(default)s')
parser.add_argument('--backupdir', default='/root/disabled-users',
help='''Directory to back up deleted user\'s home
directories to. default=%(default)s''')
parser.add_argument('--dryrun', action='store_true',
help='Make no changes, just display actions')
return parser.parse_args()
def main():
args = _get_args()
sync.DRY_RUN = args.dryrun
cfg = config.load(args.config)
group = cfg.get('local_group', ['users'])[0]
local = sync.get_local_users(group)
users = _configured_users(args, cfg)
sync.delete_users(local, users, args.backupdir)
sync.add_users(local, users, group)
sync.update_users(local, users)
#TODO sudo
if __name__ == '__main__':
sys.exit(main())
| doanac/user-sync | usersync/main.py | Python | gpl-3.0 | 1,786 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RuntimeRawExtension(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, raw=None):
"""
RuntimeRawExtension - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'raw': 'str'
}
self.attribute_map = {
'raw': 'Raw'
}
self._raw = raw
@property
def raw(self):
"""
Gets the raw of this RuntimeRawExtension.
Raw is the underlying serialization of this object.
:return: The raw of this RuntimeRawExtension.
:rtype: str
"""
return self._raw
@raw.setter
def raw(self, raw):
"""
Sets the raw of this RuntimeRawExtension.
Raw is the underlying serialization of this object.
:param raw: The raw of this RuntimeRawExtension.
:type: str
"""
if raw is None:
raise ValueError("Invalid value for `raw`, must not be `None`")
self._raw = raw
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RuntimeRawExtension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| djkonro/client-python | kubernetes/client/models/runtime_raw_extension.py | Python | apache-2.0 | 3,127 |
# -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
import os
version = '1.3.1'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'jquery_maskedinput', 'test_jquery.maskedinput.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.jquery_maskedinput',
version=version,
description="Fanstatic packaging of jquery.maskedinput",
long_description=long_description,
classifiers=[],
keywords='',
author='Andreas Kaiser',
author_email='disko@binary-punks.com',
url='https://github.com/disko/js.jquery_maskedinput',
license='BSD',
packages=find_packages(),
namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
setup_requires=[],
install_requires=[
'fanstatic',
'js.jquery',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'jquery.maskedinput = js.jquery_maskedinput:library',
],
},
)
| tonthon/js.jquery_maskedinput | setup.py | Python | bsd-3-clause | 1,111 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JSON to Fdt dumper
See JSONDeviceTree.md for format
@author: Neil 'superna' Armstrong <superna9999@gmail.com>
"""
import argparse
from pyfdt.pyfdt import FdtJsonParse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Device Tree Blob JSON to DTB')
parser.add_argument('--format', dest = 'format' ,help="output format (dts, dtb or json), default to dts", default = "dts")
parser.add_argument('in_filename', help="input filename")
parser.add_argument('out_filename', help="output filename")
args = parser.parse_args()
if args.format not in ('dts', 'dtb', 'json'):
raise Exception('Invalid Output Format')
with open(args.in_filename) as infile:
fdt = FdtJsonParse(infile.read())
if args.format == "dts":
with open(args.out_filename, 'wb') as outfile:
outfile.write(fdt.to_dts())
elif args.format == "dtb":
with open(args.out_filename, 'wb') as outfile:
outfile.write(fdt.to_dtb())
elif args.format == "json":
with open(args.out_filename, 'wb') as outfile:
outfile.write(fdt.to_json())
| superna9999/pyfdt | jsonfdtdump.py | Python | apache-2.0 | 1,186 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library functions for ContextRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# The negative value used in padding the invalid weights.
_NEGATIVE_PADDING_VALUE = -100000
def filter_weight_value(weights, values, valid_mask):
"""Filters weights and values based on valid_mask.
_NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to
avoid their contribution in softmax. 0 will be set for the invalid elements in
the values.
Args:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means
valid and False means invalid.
Returns:
weights: A float Tensor of shape [batch_size, input_size, context_size].
values: A float Tensor of shape [batch_size, context_size,
projected_dimension].
Raises:
ValueError: If shape of doesn't match.
"""
w_batch_size, _, w_context_size = weights.shape
v_batch_size, v_context_size, _ = values.shape
m_batch_size, m_context_size = valid_mask.shape
if w_batch_size != v_batch_size or v_batch_size != m_batch_size:
raise ValueError("Please make sure the first dimension of the input"
" tensors are the same.")
if w_context_size != v_context_size:
raise ValueError("Please make sure the third dimension of weights matches"
" the second dimension of values.")
if w_context_size != m_context_size:
raise ValueError("Please make sure the third dimension of the weights"
" matches the second dimension of the valid_mask.")
valid_mask = valid_mask[..., tf.newaxis]
# Force the invalid weights to be very negative so it won't contribute to
# the softmax.
weights += tf.transpose(
tf.cast(tf.math.logical_not(valid_mask), weights.dtype) *
_NEGATIVE_PADDING_VALUE,
perm=[0, 2, 1])
# Force the invalid values to be 0.
values *= tf.cast(valid_mask, values.dtype)
return weights, values
def compute_valid_mask(num_valid_elements, num_elements):
"""Computes mask of valid entries within padded context feature.
Args:
num_valid_elements: A int32 Tensor of shape [batch_size].
num_elements: An int32 Tensor.
Returns:
A boolean Tensor of the shape [batch_size, num_elements]. True means
valid and False means invalid.
"""
batch_size = num_valid_elements.shape[0]
element_idxs = tf.range(num_elements, dtype=tf.int32)
batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1])
num_valid_elements = num_valid_elements[..., tf.newaxis]
valid_mask = tf.less(batch_element_idxs, num_valid_elements)
return valid_mask
def project_features(features, projection_dimension, is_training, normalize):
"""Projects features to another feature space.
Args:
features: A float Tensor of shape [batch_size, features_size,
num_features].
projection_dimension: A int32 Tensor.
is_training: A boolean Tensor (affecting batch normalization).
normalize: A boolean Tensor. If true, the output features will be l2
normalized on the last dimension.
Returns:
A float Tensor of shape [batch, features_size, projection_dimension].
"""
# TODO(guanhangwu) Figure out a better way of specifying the batch norm
# params.
batch_norm_params = {
"is_training": is_training,
"decay": 0.97,
"epsilon": 0.001,
"center": True,
"scale": True
}
batch_size, _, num_features = features.shape
features = tf.reshape(features, [-1, num_features])
projected_features = slim.fully_connected(
features,
num_outputs=projection_dimension,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params)
projected_features = tf.reshape(projected_features,
[batch_size, -1, projection_dimension])
if normalize:
projected_features = tf.math.l2_normalize(projected_features, axis=-1)
return projected_features
def attention_block(input_features, context_features, bottleneck_dimension,
output_dimension, attention_temperature, valid_mask,
is_training):
"""Generic attention block.
Args:
input_features: A float Tensor of shape [batch_size, input_size,
num_input_features].
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
output_dimension: A int32 Tensor representing the last dimension of the
output feature.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
valid_mask: A boolean Tensor of shape [batch_size, context_size].
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, input_size, output_dimension].
"""
with tf.variable_scope("AttentionBlock"):
queries = project_features(
input_features, bottleneck_dimension, is_training, normalize=True)
keys = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
values = project_features(
context_features, bottleneck_dimension, is_training, normalize=True)
weights = tf.matmul(queries, keys, transpose_b=True)
weights, values = filter_weight_value(weights, values, valid_mask)
weights = tf.nn.softmax(weights / attention_temperature)
features = tf.matmul(weights, values)
output_features = project_features(
features, output_dimension, is_training, normalize=False)
return output_features
def compute_box_context_attention(box_features, context_features,
valid_context_size, bottleneck_dimension,
attention_temperature, is_training):
"""Computes the attention feature from the context given a batch of box.
Args:
box_features: A float Tensor of shape [batch_size, max_num_proposals,
height, width, channels]. It is pooled features from first stage
proposals.
context_features: A float Tensor of shape [batch_size, context_size,
num_context_features].
valid_context_size: A int32 Tensor of shape [batch_size].
bottleneck_dimension: A int32 Tensor representing the bottleneck dimension
for intermediate projections.
attention_temperature: A float Tensor. It controls the temperature of the
softmax for weights calculation. The formula for calculation as follows:
weights = exp(weights / temperature) / sum(exp(weights / temperature))
is_training: A boolean Tensor (affecting batch normalization).
Returns:
A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels].
"""
_, context_size, _ = context_features.shape
valid_mask = compute_valid_mask(valid_context_size, context_size)
channels = box_features.shape[-1]
# Average pools over height and width dimension so that the shape of
# box_features becomes [batch_size, max_num_proposals, channels].
box_features = tf.reduce_mean(box_features, [2, 3])
output_features = attention_block(box_features, context_features,
bottleneck_dimension, channels.value,
attention_temperature, valid_mask,
is_training)
# Expands the dimension back to match with the original feature map.
output_features = output_features[:, :, tf.newaxis, tf.newaxis, :]
return output_features
| tombstone/models | research/object_detection/meta_architectures/context_rcnn_lib.py | Python | apache-2.0 | 8,672 |
import signal
import time
import wiringpi as wp
from rasp.allgo_utils import PCA9685
from rasp.allgo_utils import ultrasonic as uls
LOW = 0
HIGH = 1
OUTPUT = wp.OUTPUT
INPUT = wp.INPUT
CAR_DIR_FW = 0
CAR_DIR_BK = 1
CAR_DIR_LF = 2
CAR_DIR_RF = 3
CAR_DIR_ST = 4
DIR_DISTANCE_ALERT = 20
preMillis = 0
keepRunning = 1
OUT = [5, 0, 1, 2, 3] # 5:front_left_led, 0:front_right_led, 1:rear_right_led, 2:rear_left_led, 3:ultra_trig
IN = [21, 22, 26, 23] # 21:left_IR, 22:center_IR, 26:right_IR, 23:ultra_echo
ULTRASONIC_TRIG = 3 # TRIG port is to use as output signal
ULTRASONIC_ECHO = 23 # ECHO port is to use as input signal
# An instance of the motor & buzzer
pca9685 = PCA9685()
#Ultrasonic ultra; # An instance of the ultrasonic sensor
ultra = uls(ULTRASONIC_TRIG,ULTRASONIC_ECHO)
# distance range: 2cm ~ 5m
# angular range: 15deg
# resolution: 3mm
"""
void setup();
void loop();
void checkUltra();
void intHandler(int dummy);
"""
def setup():
wp.wiringPiSetup() # Initialize wiringPi to load Raspbarry Pi PIN numbering scheme
"""
for(i=0; i<sizeof(OUT); i++){
pinMode(OUT[i], OUTPUT); // Set the pin as output mode
wp.digitalWrite(OUT[i], LOW); // Transmit HIGH or LOW value to the pin(5V ~ 0V)
}"""
for i in range(len(OUT)):
wp.pinMode(OUT[i],OUTPUT)
wp.digitalWrite(OUT[i], LOW)
for i in range(len(IN)):
wp.pinMode(IN[i],INPUT)
def check_ultra():
disValue=ultra.distance()
print("Distance:%.2f\t"%disValue)
def action(menu):
global curMillis
if menu==0:
pca9685.go_forward();
time.sleep(20);
pca9685.stop();
elif menu== 1:
pca9685.go_back();
time.sleep(20);
pca9685.stop();
elif menu== 2:
# frount left
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
time.sleep(20);
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
elif menu== 3:
#// frount right
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
time.sleep(20);
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
elif menu== 4:
#// rear left
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
time.sleep(20);
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
elif menu== 5:
# rear right
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
time.sleep(20);
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
elif menu ==6:
#ultrasonic
check_ultra();
elif menu== 9:
pca9685.go_right();
time.sleep(5);
pca9685.stop();
elif menu== 10:
pca9685.go_left();
time.sleep(5);
pca9685.stop();
elif menu== 8:
print("Beeping for 2 seconds\n");
pca9685.on_buzz();
time.sleep(2);
pca9685.off_buzz();
elif menu== 11:
print("EXIT\n");
keepRunning = 0;
else:
print("Check the list again\n")
print("\n")
menu = -1
def loop():
"""// return the cu
time(el
time since your arduino started) in milliseconds(1/1000 second)"""
llinevalue = 0
clinevalue = 0
rlinevalue = 0
print 'This is a diagnostic program for your mobile robot.\n'
print '0: go foward\n1: go backward\n2: front left led\n3: frount right led\n4: rear left led\n5: rear right led\n6: ultrasonic\n7: IR\n8: buzzer\n9:go right\n10: go left\n11: Exit the program\n'
print('Please select one of them: ')
menu = int(input())
action(menu)
menu = -1
"""// obstacle detection and move to another derection.
void checkUltra(){
float disValue = ultra.ReadDista
timeter();
printf("ultrasonic: %f\n",disValue);
"""
def signal_handler(dummy):
print("SIGNAL INTERRUPT",dummy)
time.sleep(1000)
keepRunning = 0;
#sda
def main (**kwargs):
setup()
signal.signal(signal.SIGINT, signal_handler)
while keepRunning:
loop()
return 0
main()
| IsmoilovMuhriddin/allgo | rasp/diagnosis.py | Python | mit | 3,928 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Contains the logic for `aq update cluster systemlist`. """
from aquilon.exceptions_ import NotFoundException
from aquilon.aqdb.model import SystemList
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.update_resource import CommandUpdateResource
from aquilon.worker.dbwrappers.host import hostname_to_host
class CommandUpdateClusterSystemList(CommandUpdateResource):
required_parameters = ["cluster"]
resource_class = SystemList
def update_resource(self, dbresource, session, logger, member, priority, **_):
if member is not None:
dbhost = hostname_to_host(session, member)
try:
entry = dbresource.entries[dbhost]
except KeyError:
raise NotFoundException("{0} does not have a SystemList entry."
.format(dbhost))
if priority is not None:
entry.priority = priority
def render(self, **kwargs):
super(CommandUpdateClusterSystemList, self).render(hostname=None,
metacluster=None,
comments=None,
**kwargs)
| guillaume-philippon/aquilon | lib/aquilon/worker/commands/update_cluster_systemlist.py | Python | apache-2.0 | 1,990 |
from flask import Flask
from flask import request
from subprocess import call
import git, json, os, sys
newname = "gitlistener"
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6') #Loading a 3rd party library C
buff = create_string_buffer(len(newname)+1) #Note: One larger than the name (man prctl says that)
buff.value = newname #Null terminated string as it should be
libc.prctl(15, byref(buff), 0, 0, 0) #Refer to "#define" of "/usr/include/linux/prctl.h" for the misterious value 16 & arg[3..5] are zero as the man page says.
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
if request.method == 'POST':
repo = git.Repo('/var/www/lunch_app')
print repo.git.status()
print repo.git.pull()
f = open("keyfile.txt")
pw = f.read()
os.popen("sudo service apache2 reload", "w").write(pw)
else:
print "Wrong"
return "Ran"
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5001)
| WilliamMarti/gitlistener | gitlistener.py | Python | mit | 1,001 |
# -*- coding: utf-8 -*-
"""Coregistration between different coordinate frames."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD-3-Clause
import configparser
import fnmatch
from glob import glob, iglob
import os
import os.path as op
import stat
import sys
import re
import shutil
from functools import reduce
import numpy as np
from .io import read_fiducials, write_fiducials, read_info
from .io.constants import FIFF
from .io.meas_info import Info
from .io._digitization import _get_data_as_dict_from_dig
# keep get_mni_fiducials for backward compat (no burden to keep in this
# namespace, too)
from ._freesurfer import (_read_mri_info, get_mni_fiducials, # noqa: F401
estimate_head_mri_t) # noqa: F401
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces, # noqa: E501,F401
write_source_spaces)
from .surface import (read_surface, write_surface, _normalize_vectors,
complete_surface_info, decimate_surface,
_DistanceQuery)
from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import (rotation, rotation3d, scaling, translation, Transform,
_read_fs_xfm, _write_fs_xfm, invert_transform,
combine_transforms, _quat_to_euler,
_fit_matched_points, apply_trans,
rot_to_quat, _angle_between_quats)
from .utils import (get_config, get_subjects_dir, logger, pformat, verbose,
warn, has_nibabel, fill_doc, _validate_type,
_check_subject, _check_option)
from .viz._3d import _fiducial_coords
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
mri_dirname = os.path.join(subject_dirname, 'mri')
mri_transforms_dirname = os.path.join(subject_dirname, 'mri', 'transforms')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
_head_fnames = (os.path.join(bem_dirname, 'outer_skin.surf'),
head_bem_fname)
_high_res_head_fnames = (os.path.join(bem_dirname, '{subject}-head-dense.fif'),
os.path.join(surf_dirname, 'lh.seghead'),
os.path.join(surf_dirname, 'lh.smseghead'))
def _make_writable(fname):
"""Make a file writable."""
os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def _make_writable_recursive(path):
"""Recursively set writable."""
if sys.platform.startswith('win'):
return # can't safely set perms
for root, dirs, files in os.walk(path, topdown=False):
for f in dirs + files:
_make_writable(os.path.join(root, f))
def _find_head_bem(subject, subjects_dir, high_res=False):
"""Find a high resolution head."""
# XXX this should be refactored with mne.surface.get_head_surf ...
fnames = _high_res_head_fnames if high_res else _head_fnames
for fname in fnames:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
return path
@fill_doc
def coregister_fiducials(info, fiducials, tol=0.01):
"""Create a head-MRI transform by aligning 3 fiducial points.
Parameters
----------
%(info_not_none)s
fiducials : str | list of dict
Fiducials in MRI coordinate space (either path to a ``*-fiducials.fif``
file or list of fiducials as returned by :func:`read_fiducials`.
Returns
-------
trans : Transform
The device-MRI transform.
.. note:: The :class:`mne.Info` object fiducials must be in the
head coordinate space.
"""
if isinstance(info, str):
info = read_info(info)
if isinstance(fiducials, str):
fiducials, coord_frame_to = read_fiducials(fiducials)
else:
coord_frame_to = FIFF.FIFFV_COORD_MRI
frames_from = {d['coord_frame'] for d in info['dig']}
if len(frames_from) > 1:
raise ValueError("info contains fiducials from different coordinate "
"frames")
else:
coord_frame_from = frames_from.pop()
coords_from = _fiducial_coords(info['dig'])
coords_to = _fiducial_coords(fiducials, coord_frame_to)
trans = fit_matched_points(coords_from, coords_to, tol=tol)
return Transform(coord_frame_from, coord_frame_to, trans)
@verbose
def create_default_subject(fs_home=None, update=False, subjects_dir=None,
verbose=None):
"""Create an average brain subject for subjects without structural MRI.
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
%(verbose)s
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant
files from Freesurfer into the current subjects_dir, and also adds the
auxiliary files provided by MNE.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
raise ValueError(
"FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
raise IOError('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
raise IOError("Freesurfer fsaverage seems to be incomplete: No "
"directory named %s found in %s" % (name, fs_src))
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
raise IOError(
"Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
elif (not update) and os.path.exists(dest):
raise IOError(
"Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
_make_writable_recursive(dest)
# copy files from mne
source_fname = os.path.join(os.path.dirname(__file__), 'data', 'fsaverage',
'fsaverage-%s.fif')
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
_make_writable_recursive(dest_bem)
for name in ('fiducials', 'head', 'inner_skull-bem', 'trans'):
if not os.path.exists(dest_fname % name):
shutil.copy(source_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid.
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
from scipy.spatial.distance import cdist
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
X, Y, Z = pts.T
xbins, ybins, zbins = np.nonzero(H)
x = xax[xbins]
y = yax[ybins]
z = zax[zbins]
mids = np.c_[x, y, z] + res / 2.
# each point belongs to at most one voxel center, so figure those out
# (cKDTree faster than BallTree for these small problems)
tree = _DistanceQuery(mids, method='cKDTree')
_, mid_idx = tree.query(pts)
# then figure out which to actually use based on proximity
# (take advantage of sorting the mid_idx to get our mapping of
# pts to nearest voxel midpoint)
sort_idx = np.argsort(mid_idx)
bounds = np.cumsum(
np.concatenate([[0], np.bincount(mid_idx, minlength=len(mids))]))
assert len(bounds) == len(mids) + 1
out = list()
for mi, mid in enumerate(mids):
# Now we do this:
#
# use_pts = pts[mid_idx == mi]
#
# But it's faster for many points than making a big boolean indexer
# over and over (esp. since each point can only belong to a single
# voxel).
use_pts = pts[sort_idx[bounds[mi]:bounds[mi + 1]]]
if not len(use_pts):
out.append([np.inf] * 3)
else:
out.append(
use_pts[np.argmin(cdist(use_pts, mid[np.newaxis])[:, 0])])
out = np.array(out, float).reshape(-1, 3)
out = out[np.abs(out - mids).max(axis=1) < res / 2.]
# """
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix.
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(np.dot, trans)
return trans
_ALLOW_ANALITICAL = True
# XXX this function should be moved out of coreg as used elsewhere
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans',
weights=None):
"""Find a transform between matched sets of points.
This minimizes the squared distance between two matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
trans : array, shape (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts. Only returned if out=='trans'.
params : array, shape (n_params, )
A single tuple containing the rotation, translation, and scaling
parameters in that order (as applicable).
"""
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
raise ValueError("src_pts and tgt_pts must have same shape (got "
"{}, {})".format(src_pts.shape, tgt_pts.shape))
if weights is not None:
weights = np.asarray(weights, src_pts.dtype)
if weights.ndim != 1 or weights.size not in (src_pts.shape[0], 1):
raise ValueError("weights (shape=%s) must be None or have shape "
"(%s,)" % (weights.shape, src_pts.shape[0],))
weights = weights[:, np.newaxis]
param_info = (bool(rotate), bool(translate), int(scale))
del rotate, translate, scale
# very common use case, rigid transformation (maybe with one scale factor,
# with or without weighted errors)
if param_info in ((True, True, 0), (True, True, 1)) and _ALLOW_ANALITICAL:
src_pts = np.asarray(src_pts, float)
tgt_pts = np.asarray(tgt_pts, float)
if weights is not None:
weights = np.asarray(weights, float)
x, s = _fit_matched_points(
src_pts, tgt_pts, weights, bool(param_info[2]))
x[:3] = _quat_to_euler(x[:3])
x = np.concatenate((x, [s])) if param_info[2] else x
else:
x = _generic_fit(src_pts, tgt_pts, param_info, weights, x0)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = np.dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _generic_fit(src_pts, tgt_pts, param_info, weights, x0):
from scipy.optimize import leastsq
if param_info[1]: # translate
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = np.dot(src_pts, trans.T)
d = tgt_pts - est
if weights is not None:
d *= weights
return d.ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = np.dot(src_pts, trans.T)[:, :3]
d = tgt_pts - est
if weights is not None:
d *= weights
return d.ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(np.dot, (translation(tx, ty, tz),
rotation(rx, ry, rz),
scaling(s, s, s)))
est = np.dot(src_pts, trans.T)[:, :3]
d = tgt_pts - est
if weights is not None:
d *= weights
return d.ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
elif param_info == (True, True, 3):
def error(x):
rx, ry, rz, tx, ty, tz, sx, sy, sz = x
trans = reduce(np.dot, (translation(tx, ty, tz),
rotation(rx, ry, rz),
scaling(sx, sy, sz)))
est = np.dot(src_pts, trans.T)[:, :3]
d = tgt_pts - est
if weights is not None:
d *= weights
return d.ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1, 1, 1)
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
return x
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory.
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject, skip_fiducials, subjects_dir):
"""Find all files of an mri relevant for source transformation.
Parameters
----------
subject : str
Name of the mri subject.
skip_fiducials : bool
Do not scale the MRI fiducials. If False, an IOError will be raised
if no fiducials file can be found.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths : dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('inflated', 'white', 'orig', 'orig_avg', 'inflated_avg',
'inflated_pre', 'pial', 'pial_avg', 'smoothwm', 'white_avg',
'seghead', 'smseghead')
if os.getenv('_MNE_FEW_SURFACES', '') == 'true': # for testing
surf_names = surf_names[:4]
for surf_name in surf_names:
for hemi in ('lh.', 'rh.'):
name = hemi + surf_name
path = surf_fname.format(subjects_dir=subjects_dir,
subject=subject, name=name)
if os.path.exists(path):
paths['surf'].append(pformat(surf_fname, name=name))
surf_fname = os.path.join(bem_dirname, '{name}')
surf_names = ('inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf')
for surf_name in surf_names:
path = surf_fname.format(subjects_dir=subjects_dir,
subject=subject, name=surf_name)
if os.path.exists(path):
paths['surf'].append(pformat(surf_fname, name=surf_name))
del surf_names, surf_name, path, hemi
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)').replace('\\', '\\\\')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
del bem, path, bem_pattern, re_pattern
# fiducials
if skip_fiducials:
paths['fid'] = []
else:
paths['fid'] = _find_fiducials_files(subject, subjects_dir)
# check that we found at least one
if len(paths['fid']) == 0:
raise IOError("No fiducials file found for %s. The fiducials "
"file should be named "
"{subject}/bem/{subject}-fiducials.fif. In "
"order to scale an MRI without fiducials set "
"skip_fiducials=True." % subject)
# duplicate files (curvature and some surfaces)
paths['duplicate'] = []
path = os.path.join(surf_dirname, '{name}')
surf_fname = os.path.join(surf_dirname, '{name}')
surf_dup_names = ('curv', 'sphere', 'sphere.reg', 'sphere.reg.avg')
for surf_dup_name in surf_dup_names:
for hemi in ('lh.', 'rh.'):
name = hemi + surf_dup_name
path = surf_fname.format(subjects_dir=subjects_dir,
subject=subject, name=name)
if os.path.exists(path):
paths['duplicate'].append(pformat(surf_fname, name=name))
del surf_dup_name, name, path, hemi
# transform files (talairach)
paths['transforms'] = []
transform_fname = os.path.join(mri_transforms_dirname, 'talairach.xfm')
path = transform_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
paths['transforms'].append(transform_fname)
del transform_fname, path
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
# find MRIs
mri_dir = mri_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(mri_dir), '*.mgz')
paths['mri'] = [os.path.join(mri_dir, f) for f in fnames]
return paths
def _find_fiducials_files(subject, subjects_dir):
"""Find fiducial files."""
fid = []
# standard fiducials
if os.path.exists(fid_fname.format(subjects_dir=subjects_dir,
subject=subject)):
fid.append(fid_fname)
# fiducials with subject name
pattern = pformat(fid_fname_general, subjects_dir=subjects_dir,
subject=subject, head='*')
regex = pformat(fid_fname_general, subjects_dir=subjects_dir,
subject=subject, head='(.+)').replace('\\', '\\\\')
for path in iglob(pattern):
match = re.match(regex, path)
head = match.group(1).replace(subject, '{subject}')
fid.append(pformat(fid_fname_general, head=head))
return fid
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory.
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
return bool(_find_head_bem(subject, subjects_dir) or
_find_head_bem(subject, subjects_dir, high_res=True))
def _is_scaled_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is a scaled mri subject.
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_scaled_mri_subject : bool
Whether ``subject`` is a scaled mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if not _is_mri_subject(subject, subjects_dir):
return False
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
return os.path.exists(fname)
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern.
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain.
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
raise IOError("%r does not seem to be a scaled mri subject: %r does "
"not exist." % (subject, fname))
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject.
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
"""Assemble parameters for scaling.
Returns
-------
subjects_dir : str
Subjects directory.
subject_from : str
Name of the source subject.
scale : array
Scaling factor, either shape=() for uniform scaling or shape=(3,) for
non-uniform scaling.
uniform : bool
Whether scaling is uniform.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError("Need to provide either both subject_from and scale "
"parameters, or neither.")
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
assert n_params in (1, 3)
scale = cfg['scale']
scale = np.atleast_1d(scale)
if scale.ndim != 1 or scale.shape[0] not in (1, 3):
raise ValueError("Invalid shape for scale parameer. Need scalar "
"or array of length 3. Got shape %s."
% (scale.shape,))
n_params = len(scale)
return subjects_dir, subject_from, scale, n_params == 1
@verbose
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None, verbose=None):
"""Scale a bem file.
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
%(verbose)s
"""
subjects_dir, subject_from, scale, uniform = \
_scale_params(subject_to, subject_from, scale, subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File already exists: %s" % dst)
surfs = read_bem_surfaces(src)
for surf in surfs:
surf['rr'] *= scale
if not uniform:
assert len(surf['nn']) > 0
surf['nn'] /= scale
_normalize_vectors(surf['nn'])
write_bem_surfaces(dst, surfs)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
r"""Scale labels to match a brain that was previously created by scaling.
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, scale, _ = _scale_params(
subject_to, subject_from, scale, subjects_dir)
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
@verbose
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None, skip_fiducials=False, labels=True,
annot=False, verbose=None):
"""Create a scaled copy of an MRI subject.
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
skip_fiducials : bool
Do not scale the MRI fiducials. If False (default), an IOError will be
raised if no fiducials file can be found.
labels : bool
Also scale all labels (default True).
annot : bool
Copy ``*.annot`` files to the new location (default False).
%(verbose)s
See Also
--------
scale_bem : Add a scaled BEM to a scaled MRI.
scale_labels : Add labels to a scaled MRI.
scale_source_space : Add a source space to a scaled MRI.
Notes
-----
This function will automatically call :func:`scale_bem`,
:func:`scale_labels`, and :func:`scale_source_space` based on expected
filename patterns in the subject directory.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, skip_fiducials, subjects_dir)
scale = np.atleast_1d(scale)
if scale.shape == (3,):
if np.isclose(scale[1], scale[0]) and np.isclose(scale[2], scale[0]):
scale = scale[0] # speed up scaling conditionals using a singleton
elif scale.shape != (1,):
raise ValueError('scale must have shape (3,) or (1,), got %s'
% (scale.shape,))
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if not overwrite:
raise IOError("Subject directory for %s already exists: %r"
% (subject_to, dest))
shutil.rmtree(dest)
logger.debug('create empty directory structure')
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
logger.debug('save MRI scaling parameters')
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
logger.debug('surf files [in mm]')
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
logger.debug('BEM files [in m]')
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir,
verbose=False)
logger.debug('fiducials [in m]')
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src, verbose=False)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe, verbose=False)
logger.debug('MRIs [nibabel]')
os.mkdir(mri_dirname.format(subjects_dir=subjects_dir,
subject=subject_to))
for fname in paths['mri']:
mri_name = os.path.basename(fname)
_scale_mri(subject_to, mri_name, subject_from, scale, subjects_dir)
logger.debug('Transforms')
for mri_name in paths['mri']:
if mri_name.endswith('T1.mgz'):
os.mkdir(mri_transforms_dirname.format(subjects_dir=subjects_dir,
subject=subject_to))
for fname in paths['transforms']:
xfm_name = os.path.basename(fname)
_scale_xfm(subject_to, xfm_name, mri_name,
subject_from, scale, subjects_dir)
break
logger.debug('duplicate files')
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
logger.debug('source spaces')
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir, verbose=False)
logger.debug('labels [in m]')
os.mkdir(os.path.join(subjects_dir, subject_to, 'label'))
if labels:
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
logger.debug('copy *.annot files')
# they don't contain scale-dependent information
if annot:
src_pattern = os.path.join(subjects_dir, subject_from, 'label',
'*.annot')
dst_dir = os.path.join(subjects_dir, subject_to, 'label')
for src_file in iglob(src_pattern):
shutil.copy(src_file, dst_dir)
@verbose
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Scale a source space for an mri created with scale_mri().
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to run in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
%(verbose)s
Notes
-----
When scaling volume source spaces, the source (vertex) locations are
scaled, but the reference to the MRI volume is left unchanged. Transforms
are updated so that source estimates can be plotted on the original MRI
volume.
"""
subjects_dir, subject_from, scale, uniform = \
_scale_params(subject_to, subject_from, scale, subjects_dir)
# if n_params==1 scale is a scalar; if n_params==3 scale is a (3,) array
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match(r"(oct|ico|vol)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# additional tags for volume source spaces
for key in ('vox_mri_t', 'src_mri_t'):
# maintain transform to original MRI volume ss['mri_volume_name']
if key in ss:
ss[key]['trans'][:3] *= scale[:, np.newaxis]
# distances and patch info
if uniform:
if ss['dist'] is not None:
ss['dist'] *= scale[0]
# Sometimes this is read-only due to how it's read
ss['nearest_dist'] = ss['nearest_dist'] * scale
ss['dist_limit'] = ss['dist_limit'] * scale
else: # non-uniform scaling
ss['nn'] /= scale
_normalize_vectors(ss['nn'])
if ss['dist'] is not None:
add_dist = True
dist_limit = float(np.abs(sss[0]['dist_limit']))
elif ss['nearest'] is not None:
add_dist = True
dist_limit = 0
if add_dist:
logger.info("Recomputing distances, this might take a while")
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
def _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir):
"""Scale an MRI by setting its affine."""
subjects_dir, subject_from, scale, _ = _scale_params(
subject_to, subject_from, scale, subjects_dir)
if not has_nibabel():
warn('Skipping MRI scaling for %s, please install nibabel')
return
import nibabel
fname_from = op.join(mri_dirname.format(
subjects_dir=subjects_dir, subject=subject_from), mri_fname)
fname_to = op.join(mri_dirname.format(
subjects_dir=subjects_dir, subject=subject_to), mri_fname)
img = nibabel.load(fname_from)
zooms = np.array(img.header.get_zooms())
zooms[[0, 2, 1]] *= scale
img.header.set_zooms(zooms)
# Hack to fix nibabel problems, see
# https://github.com/nipy/nibabel/issues/619
img._affine = img.header.get_affine() # or could use None
nibabel.save(img, fname_to)
def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale,
subjects_dir):
"""Scale a transform."""
subjects_dir, subject_from, scale, _ = _scale_params(
subject_to, subject_from, scale, subjects_dir)
# The nibabel warning should already be there in MRI step, if applicable,
# as we only get here if T1.mgz is present (and thus a scaling was
# attempted) so we can silently return here.
if not has_nibabel():
return
fname_from = os.path.join(
mri_transforms_dirname.format(
subjects_dir=subjects_dir, subject=subject_from), xfm_fname)
fname_to = op.join(
mri_transforms_dirname.format(
subjects_dir=subjects_dir, subject=subject_to), xfm_fname)
assert op.isfile(fname_from), fname_from
assert op.isdir(op.dirname(fname_to)), op.dirname(fname_to)
# The "talairach.xfm" file stores the ras_mni transform.
#
# For "from" subj F, "to" subj T, F->T scaling S, some equivalent vertex
# positions F_x and T_x in MRI (Freesurfer RAS) coords, knowing that
# we have T_x = S @ F_x, we want to have the same MNI coords computed
# for these vertices:
#
# T_mri_mni @ T_x = F_mri_mni @ F_x
#
# We need to find the correct T_ras_mni (talaraich.xfm file) that yields
# this. So we derive (where † indicates inversion):
#
# T_mri_mni @ S @ F_x = F_mri_mni @ F_x
# T_mri_mni @ S = F_mri_mni
# T_ras_mni @ T_mri_ras @ S = F_ras_mni @ F_mri_ras
# T_ras_mni @ T_mri_ras = F_ras_mni @ F_mri_ras @ S⁻¹
# T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri
#
# prepare the scale (S) transform
scale = np.atleast_1d(scale)
scale = np.tile(scale, 3) if len(scale) == 1 else scale
S = Transform('mri', 'mri', scaling(*scale)) # F_mri->T_mri
#
# Get the necessary transforms of the "from" subject
#
xfm, kind = _read_fs_xfm(fname_from)
assert kind == 'MNI Transform File', kind
_, _, F_mri_ras, _, _ = _read_mri_info(mri_name, units='mm')
F_ras_mni = Transform('ras', 'mni_tal', xfm)
del xfm
#
# Get the necessary transforms of the "to" subject
#
mri_name = op.join(mri_dirname.format(
subjects_dir=subjects_dir, subject=subject_to), op.basename(mri_name))
_, _, T_mri_ras, _, _ = _read_mri_info(mri_name, units='mm')
T_ras_mri = invert_transform(T_mri_ras)
del mri_name, T_mri_ras
# Finally we construct as above:
#
# T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri
#
# By moving right to left through the equation.
T_ras_mni = \
combine_transforms(
combine_transforms(
combine_transforms(
T_ras_mri, invert_transform(S), 'ras', 'mri'),
F_mri_ras, 'ras', 'ras'),
F_ras_mni, 'ras', 'mni_tal')
_write_fs_xfm(fname_to, T_ras_mni['trans'], kind)
def _read_surface(filename):
bem = dict()
if filename is not None and op.exists(filename):
if filename.endswith('.fif'):
bem = read_bem_surfaces(filename, verbose=False)[0]
else:
try:
bem = read_surface(filename, return_dict=True)[2]
bem['rr'] *= 1e-3
complete_surface_info(bem, copy=False)
except Exception:
raise ValueError(
"Error loading surface from %s (see "
"Terminal for details)." % filename)
return bem
@fill_doc
class Coregistration(object):
"""Class for MRI<->head coregistration.
Parameters
----------
info : instance of Info | None
The measurement info.
%(subject)s
%(subjects_dir)s
fiducials : list | dict | str
The fiducials given in the MRI (surface RAS) coordinate
system. If a dict is provided it must be a dict with 3 entries
with keys 'lpa', 'rpa' and 'nasion' with as values coordinates in m.
If a list it must be a list of DigPoint instances as returned
by the read_fiducials function.
If set to 'estimated', the fiducials are initialized
automatically using fiducials defined in MNI space on fsaverage
template. If set to 'auto', one tries to find the fiducials
in a file with the canonical name (``bem/{subject}-fiducials.fif``)
and if abstent one falls back to 'estimated'. Defaults to 'auto'.
Attributes
----------
trans : instance of Transform
MRI<->Head coordinate transformation.
See Also
--------
mne.scale_mri
Notes
-----
Internal computation quantities parameters are in the following units:
- rotation are in radians
- translation are in m
- scale are in scale proportion
If using a scale mode, the :func:`~mne.scale_mri` should be used
to create a surrogate MRI subject with the proper scale factors.
"""
def __init__(self, info, subject, subjects_dir=None, fiducials='auto'):
_validate_type(info, (Info, None), 'info')
self._info = info
self._subject = _check_subject(subject, subject)
self._subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
self._scale_mode = None
self._rot_trans = None
self._default_parameters = \
np.array([0., 0., 0., 0., 0., 0., 1., 1., 1.])
self._rotation = self._default_parameters[:3]
self._translation = self._default_parameters[3:6]
self._scale = self._default_parameters[6:9]
self._icp_iterations = 20
self._icp_angle = 0.2
self._icp_distance = 0.2
self._icp_scale = 0.2
self._icp_fid_matches = ('nearest', 'matched')
self._icp_fid_match = self._icp_fid_matches[0]
self._lpa_weight = 1.
self._nasion_weight = 10.
self._rpa_weight = 1.
self._hsp_weight = 1.
self._eeg_weight = 1.
self._hpi_weight = 1.
self._extra_points_filter = None
self._setup_digs()
self._setup_bem()
self._setup_fiducials(fiducials)
self.reset()
def _setup_digs(self):
if self._info is None:
self._dig_dict = dict(
hpi=np.zeros((1, 3)),
dig_ch_pos_location=np.zeros((1, 3)),
hsp=np.zeros((1, 3)),
rpa=np.zeros((1, 3)),
nasion=np.zeros((1, 3)),
lpa=np.zeros((1, 3)),
)
else:
self._dig_dict = _get_data_as_dict_from_dig(
dig=self._info['dig'],
exclude_ref_channel=False
)
# adjustments:
# set weights to 0 for None input
# convert fids to float arrays
for k, w_atr in zip(['nasion', 'lpa', 'rpa', 'hsp', 'hpi'],
['_nasion_weight', '_lpa_weight',
'_rpa_weight', '_hsp_weight', '_hpi_weight']):
if self._dig_dict[k] is None:
self._dig_dict[k] = np.zeros((0, 3))
setattr(self, w_atr, 0)
elif k in ['rpa', 'nasion', 'lpa']:
self._dig_dict[k] = np.array([self._dig_dict[k]], float)
def _setup_bem(self):
# find high-res head model (if possible)
high_res_path = _find_head_bem(self._subject, self._subjects_dir,
high_res=True)
low_res_path = _find_head_bem(self._subject, self._subjects_dir,
high_res=False)
if high_res_path is None and low_res_path is None:
raise RuntimeError("No standard head model was "
f"found for subject {self._subject}")
if high_res_path is not None:
self._bem_high_res = _read_surface(high_res_path)
logger.info(f'Using high resolution head model in {high_res_path}')
else:
self._bem_high_res = _read_surface(low_res_path)
logger.info(f'Using low resolution head model in {low_res_path}')
if low_res_path is None:
# This should be very rare!
warn('No low-resolution head found, decimating high resolution '
'mesh (%d vertices): %s' % (len(self._bem_high_res.surf.rr),
high_res_path,))
# Create one from the high res one, which we know we have
rr, tris = decimate_surface(self._bem_high_res.surf.rr,
self._bem_high_res.surf.tris,
n_triangles=5120)
# directly set the attributes of bem_low_res
self._bem_low_res = complete_surface_info(
dict(rr=rr, tris=tris), copy=False, verbose=False)
else:
self._bem_low_res = _read_surface(low_res_path)
def _setup_fiducials(self, fids):
_validate_type(fids, (str, dict, list))
# find fiducials file
if fids == 'auto':
fid_files = _find_fiducials_files(self._subject,
self._subjects_dir)
if len(fid_files) > 0:
# Read fiducials from disk
fid_filename = fid_files[0].format(
subjects_dir=self._subjects_dir, subject=self._subject)
logger.info(f'Using fiducials from: {fid_filename}.')
fids, _ = read_fiducials(fid_filename)
else:
fids = 'estimated'
if fids == 'estimated':
logger.info('Estimating fiducials from fsaverage.')
fids = get_mni_fiducials(self._subject, self._subjects_dir)
if isinstance(fids, list):
fid_coords = _fiducial_coords(fids)
else:
assert isinstance(fids, dict)
fid_coords = np.array([fids['lpa'], fids['nasion'], fids['rpa']],
dtype=float)
self._fid_points = fid_coords
# does not seem to happen by itself ... so hard code it:
self._reset_fiducials()
def _reset_fiducials(self): # noqa: D102
if self._fid_points is not None:
self._lpa = self._fid_points[0:1]
self._nasion = self._fid_points[1:2]
self._rpa = self._fid_points[2:3]
def _update_params(self, rot=None, tra=None, sca=None,
force_update_omitted=False):
if force_update_omitted:
tra = self._translation
rot_changed = False
if rot is not None:
rot_changed = True
self._last_rotation = self._rotation.copy()
self._rotation = rot
tra_changed = False
if rot_changed or tra is not None:
if tra is None:
tra = self._translation
tra_changed = True
self._last_translation = self._translation.copy()
self._translation = tra
self._head_mri_t = rotation(*self._rotation).T
self._head_mri_t[:3, 3] = \
-np.dot(self._head_mri_t[:3, :3], tra)
self._transformed_dig_hpi = \
apply_trans(self._head_mri_t, self._dig_dict['hpi'])
self._transformed_dig_eeg = \
apply_trans(
self._head_mri_t, self._dig_dict['dig_ch_pos_location'])
self._transformed_dig_extra = \
apply_trans(self._head_mri_t,
self._filtered_extra_points)
self._transformed_orig_dig_extra = \
apply_trans(self._head_mri_t, self._dig_dict['hsp'])
self._mri_head_t = rotation(*self._rotation)
self._mri_head_t[:3, 3] = np.array(tra)
if tra_changed or sca is not None:
if sca is None:
sca = self._scale
self._last_scale = self._scale.copy()
self._scale = sca
self._mri_trans = np.eye(4)
self._mri_trans[:, :3] *= sca
self._transformed_high_res_mri_points = \
apply_trans(self._mri_trans,
self._processed_high_res_mri_points)
self._update_nearest_calc()
if tra_changed:
self._nearest_transformed_high_res_mri_idx_orig_hsp = \
self._nearest_calc.query(self._transformed_orig_dig_extra)[1]
self._nearest_transformed_high_res_mri_idx_hpi = \
self._nearest_calc.query(self._transformed_dig_hpi)[1]
self._nearest_transformed_high_res_mri_idx_eeg = \
self._nearest_calc.query(self._transformed_dig_eeg)[1]
self._nearest_transformed_high_res_mri_idx_rpa = \
self._nearest_calc.query(
apply_trans(self._head_mri_t, self._dig_dict['rpa']))[1]
self._nearest_transformed_high_res_mri_idx_nasion = \
self._nearest_calc.query(
apply_trans(self._head_mri_t, self._dig_dict['nasion']))[1]
self._nearest_transformed_high_res_mri_idx_lpa = \
self._nearest_calc.query(
apply_trans(self._head_mri_t, self._dig_dict['lpa']))[1]
def set_scale_mode(self, scale_mode):
"""Select how to fit the scale parameters.
Parameters
----------
scale_mode : None | str
The scale mode can be 'uniform', '3-axis' or disabled.
Defaults to None.
* 'uniform': 1 scale factor is recovered.
* '3-axis': 3 scale factors are recovered.
* None: do not scale the MRI.
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._scale_mode = scale_mode
return self
def set_grow_hair(self, value):
"""Compensate for hair on the digitizer head shape.
Parameters
----------
value : float
Move the back of the MRI head outwards by ``value`` (mm).
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._grow_hair = value
self._update_params(self._rotation, self._translation, self._scale)
return self
def set_rotation(self, rot):
"""Set the rotation parameter.
Parameters
----------
rot : array, shape (3,)
The rotation parameter (in radians).
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._update_params(rot=np.array(rot))
return self
def set_translation(self, tra):
"""Set the translation parameter.
Parameters
----------
tra : array, shape (3,)
The translation parameter (in m.).
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._update_params(tra=np.array(tra))
return self
def set_scale(self, sca):
"""Set the scale parameter.
Parameters
----------
sca : array, shape (3,)
The scale parameter.
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._update_params(sca=np.array(sca))
return self
def _update_nearest_calc(self):
self._nearest_calc = _DistanceQuery(
self._processed_high_res_mri_points * self._scale)
@property
def _filtered_extra_points(self):
if self._extra_points_filter is None:
return self._dig_dict['hsp']
else:
return self._dig_dict['hsp'][self._extra_points_filter]
@property
def _parameters(self):
return np.concatenate((self._rotation, self._translation, self._scale))
@property
def _last_parameters(self):
return np.concatenate((self._last_rotation,
self._last_translation, self._last_scale))
@property
def _changes(self):
move = np.linalg.norm(self._last_translation - self._translation) * 1e3
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(rotation(*self._rotation)[:3, :3]),
rot_to_quat(rotation(*self._last_rotation)[:3, :3])))
percs = 100 * (self._scale - self._last_scale) / self._last_scale
return move, angle, percs
@property
def _nearest_transformed_high_res_mri_idx_hsp(self):
return self._nearest_calc.query(
apply_trans(self._head_mri_t, self._filtered_extra_points))[1]
@property
def _has_hpi_data(self):
return (self._has_mri_data and
len(self._nearest_transformed_high_res_mri_idx_hpi) > 0)
@property
def _has_eeg_data(self):
return (self._has_mri_data and
len(self._nearest_transformed_high_res_mri_idx_eeg) > 0)
@property
def _has_lpa_data(self):
return (np.any(self._lpa) and np.any(self._dig_dict['lpa']))
@property
def _has_nasion_data(self):
return (np.any(self._nasion) and np.any(self._dig_dict.nasion))
@property
def _has_rpa_data(self):
return (np.any(self._rpa) and np.any(self._dig_dict['rpa']))
@property
def _processed_high_res_mri_points(self):
return self._get_processed_mri_points('high')
@property
def _processed_low_res_mri_points(self):
return self._get_processed_mri_points('low')
def _get_processed_mri_points(self, res):
bem = self._bem_low_res if res == 'low' else self._bem_high_res
points = bem['rr'].copy()
if self._grow_hair:
assert len(bem['nn']) # should be guaranteed by _read_surface
scaled_hair_dist = (1e-3 * self._grow_hair /
np.array(self._scale))
hair = points[:, 2] > points[:, 1]
points[hair] += bem['nn'][hair] * scaled_hair_dist
return points
@property
def _has_mri_data(self):
return len(self._transformed_high_res_mri_points) > 0
@property
def _has_dig_data(self):
return (self._has_mri_data and
len(self._nearest_transformed_high_res_mri_idx_hsp) > 0)
@property
def _orig_hsp_point_distance(self):
mri_points = self._transformed_high_res_mri_points[
self._nearest_transformed_high_res_mri_idx_orig_hsp]
hsp_points = self._transformed_orig_dig_extra
return np.linalg.norm(mri_points - hsp_points, axis=-1)
def _log_dig_mri_distance(self, prefix):
errs_nearest = self.compute_dig_mri_distances()
logger.info(f'{prefix} median distance: '
f'{np.median(errs_nearest * 1000):6.2f} mm')
@property
def scale(self):
"""Get the current scale factor.
Returns
-------
scale : ndarray, shape (3,)
The scale factors.
"""
return self._scale.copy()
@verbose
def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1.,
verbose=None):
"""Find rotation and translation to fit all 3 fiducials.
Parameters
----------
lpa_weight : float
Relative weight for LPA. The default value is 1.
nasion_weight : float
Relative weight for nasion. The default value is 10.
rpa_weight : float
Relative weight for RPA. The default value is 1.
%(verbose)s
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
logger.info('Aligning using fiducials')
self._log_dig_mri_distance('Start')
n_scale_params = self._n_scale_params
if n_scale_params == 3:
# enfore 1 even for 3-axis here (3 points is not enough)
logger.info("Enforcing 1 scaling parameter for fit "
"with fiducials.")
n_scale_params = 1
self._lpa_weight = lpa_weight
self._nasion_weight = nasion_weight
self._rpa_weight = rpa_weight
head_pts = np.vstack((self._dig_dict['lpa'],
self._dig_dict['nasion'],
self._dig_dict['rpa']))
mri_pts = np.vstack((self._lpa, self._nasion, self._rpa))
weights = [lpa_weight, nasion_weight, rpa_weight]
if n_scale_params == 0:
mri_pts *= self._scale # not done in fit_matched_points
x0 = self._parameters
x0 = x0[:6 + n_scale_params]
est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params',
scale=n_scale_params, weights=weights)
if n_scale_params == 0:
self._update_params(rot=est[:3], tra=est[3:6])
else:
assert est.size == 7
est = np.concatenate([est, [est[-1]] * 2])
assert est.size == 9
self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9])
self._log_dig_mri_distance('End ')
return self
def _setup_icp(self, n_scale_params):
head_pts = list()
mri_pts = list()
weights = list()
if self._has_dig_data and self._hsp_weight > 0: # should be true
head_pts.append(self._filtered_extra_points)
mri_pts.append(self._processed_high_res_mri_points[
self._nearest_transformed_high_res_mri_idx_hsp])
weights.append(np.full(len(head_pts[-1]), self._hsp_weight))
for key in ('lpa', 'nasion', 'rpa'):
if getattr(self, f'_has_{key}_data'):
head_pts.append(self._dig_dict[key])
if self._icp_fid_match == 'matched':
mri_pts.append(getattr(self, f'_{key}'))
else:
assert self._icp_fid_match == 'nearest'
mri_pts.append(self._processed_high_res_mri_points[
getattr(
self,
'_nearest_transformed_high_res_mri_idx_%s'
% (key,))])
weights.append(np.full(len(mri_pts[-1]),
getattr(self, '_%s_weight' % key)))
if self._has_eeg_data and self._eeg_weight > 0:
head_pts.append(self._dig_dict['dig_ch_pos_location'])
mri_pts.append(self._processed_high_res_mri_points[
self._nearest_transformed_high_res_mri_idx_eeg])
weights.append(np.full(len(mri_pts[-1]), self._eeg_weight))
if self._has_hpi_data and self._hpi_weight > 0:
head_pts.append(self._dig_dict['hpi'])
mri_pts.append(self._processed_high_res_mri_points[
self._nearest_transformed_high_res_mri_idx_hpi])
weights.append(np.full(len(mri_pts[-1]), self._hpi_weight))
head_pts = np.concatenate(head_pts)
mri_pts = np.concatenate(mri_pts)
weights = np.concatenate(weights)
if n_scale_params == 0:
mri_pts *= self._scale # not done in fit_matched_points
return head_pts, mri_pts, weights
def set_fid_match(self, match):
"""Set the strategy for fitting anatomical landmark (fiducial) points.
Parameters
----------
match : 'nearest' | 'matched'
Alignment strategy; ``'nearest'`` aligns anatomical landmarks to
any point on the head surface; ``'matched'`` aligns to the fiducial
points in the MRI.
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
_check_option('match', match, self._icp_fid_matches)
self._icp_fid_match = match
return self
@verbose
def fit_icp(self, n_iterations=20, lpa_weight=1., nasion_weight=10.,
rpa_weight=1., hsp_weight=1., eeg_weight=1., hpi_weight=1.,
callback=None, verbose=None):
"""Find MRI scaling, translation, and rotation to match HSP.
Parameters
----------
n_iterations : int
Maximum number of iterations.
lpa_weight : float
Relative weight for LPA. The default value is 1.
nasion_weight : float
Relative weight for nasion. The default value is 10.
rpa_weight : float
Relative weight for RPA. The default value is 1.
hsp_weight : float
Relative weight for HSP. The default value is 1.
eeg_weight : float
Relative weight for EEG. The default value is 1.
hpi_weight : float
Relative weight for HPI. The default value is 1.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed the keyword arguments ``iteration``
and ``n_iterations``.
%(verbose)s
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
logger.info('Aligning using ICP')
self._log_dig_mri_distance('Start ')
n_scale_params = self._n_scale_params
self._lpa_weight = lpa_weight
self._nasion_weight = nasion_weight
self._rpa_weight = rpa_weight
self._hsp_weight = hsp_weight
self._eeg_weight = eeg_weight
self._hsp_weight = hpi_weight
# Initial guess (current state)
est = self._parameters
est = est[:[6, 7, None, 9][n_scale_params]]
# Do the fits, assigning and evaluating at each step
for iteration in range(n_iterations):
head_pts, mri_pts, weights = self._setup_icp(n_scale_params)
est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params,
x0=est, out='params', weights=weights)
if n_scale_params == 0:
self._update_params(rot=est[:3], tra=est[3:6])
elif n_scale_params == 1:
est = np.array(list(est) + [est[-1]] * 2)
self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9])
else:
self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9])
angle, move, scale = self._changes
self._log_dig_mri_distance(f' ICP {iteration + 1:2d} ')
if angle <= self._icp_angle and move <= self._icp_distance and \
all(scale <= self._icp_scale):
break
if callback is not None:
callback(iteration, n_iterations)
self._log_dig_mri_distance('End ')
return self
@property
def _n_scale_params(self):
if self._scale_mode is None:
n_scale_params = 0
elif self._scale_mode == 'uniform':
n_scale_params = 1
else:
n_scale_params = 3
return n_scale_params
def omit_head_shape_points(self, distance):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance (in m.). A value of distance <= 0 excludes nothing.
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
distance = float(distance)
if distance <= 0:
return
# find the new filter
mask = self._orig_hsp_point_distance <= distance
n_excluded = np.sum(~mask)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# set the filter
self._extra_points_filter = mask
self._update_params(force_update_omitted=True)
return self
def compute_dig_mri_distances(self):
"""Compute distance between head shape points and MRI skin surface.
Returns
-------
dist : array, shape (n_points,)
The distance of the head shape points to the MRI skin surface.
See Also
--------
mne.dig_mri_distances
"""
# we don't use `dig_mri_distances` here because it should be much
# faster to use our already-determined nearest points
hsp_points, mri_points, _ = self._setup_icp(0)
hsp_points = apply_trans(self._head_mri_t, hsp_points)
return np.linalg.norm(mri_points - hsp_points, axis=-1)
@property
def trans(self):
"""Return the head-mri transform."""
return Transform('head', 'mri', self._head_mri_t)
def reset(self):
"""Reset all the parameters affecting the coregistration.
Returns
-------
self : Coregistration
The modified Coregistration object.
"""
self._grow_hair = 0.
self.set_rotation(self._default_parameters[:3])
self.set_translation(self._default_parameters[3:6])
self.set_scale(self._default_parameters[6:9])
self._extra_points_filter = None
self._update_nearest_calc()
return self
| bloyl/mne-python | mne/coreg.py | Python | bsd-3-clause | 74,115 |
# -*- coding: utf-8 -*-
"""test_replay."""
import os
import pytest
from cookiecutter import replay, main, exceptions
def test_get_replay_file_name():
"""Make sure that replay.get_file_name generates a valid json file path."""
exp_replay_file_name = os.path.join('foo', 'bar.json')
assert replay.get_file_name('foo', 'bar') == exp_replay_file_name
@pytest.mark.parametrize(
'invalid_kwargs', ({'no_input': True},
{'extra_context': {}},
{'no_input': True, 'extra_context': {}},)
)
def test_raise_on_invalid_mode(invalid_kwargs):
"""Test `cookiecutter` raise exception on unacceptable `replay` request."""
with pytest.raises(exceptions.InvalidModeException):
main.cookiecutter('foo', replay=True, **invalid_kwargs)
def test_main_does_not_invoke_dump_but_load(mocker):
"""Test `cookiecutter` calling correct functions on `replay`."""
mock_prompt = mocker.patch('cookiecutter.main.prompt_for_config')
mock_gen_context = mocker.patch('cookiecutter.main.generate_context')
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
mock_replay_dump = mocker.patch('cookiecutter.main.dump')
mock_replay_load = mocker.patch('cookiecutter.main.load')
main.cookiecutter('tests/fake-repo-tmpl/', replay=True)
assert not mock_prompt.called
assert not mock_gen_context.called
assert not mock_replay_dump.called
assert mock_replay_load.called
assert mock_gen_files.called
def test_main_does_not_invoke_load_but_dump(mocker):
"""Test `cookiecutter` calling correct functions on non-replay launch."""
mock_prompt = mocker.patch('cookiecutter.main.prompt_for_config')
mock_gen_context = mocker.patch('cookiecutter.main.generate_context')
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
mock_replay_dump = mocker.patch('cookiecutter.main.dump')
mock_replay_load = mocker.patch('cookiecutter.main.load')
main.cookiecutter('tests/fake-repo-tmpl/', replay=False)
assert mock_prompt.called
assert mock_gen_context.called
assert mock_replay_dump.called
assert not mock_replay_load.called
assert mock_gen_files.called
| luzfcb/cookiecutter | tests/replay/test_replay.py | Python | bsd-3-clause | 2,207 |
from PyQt5 import QtGui, QtCore, QtWidgets
from idaapi import PluginForm
from DIE.Lib import BpHandler
import DIE.UI.Die_Icons
class BreakpointView(PluginForm):
"""
DIE Value View
"""
def __init__(self):
super(BreakpointView, self).__init__()
self.bp_handler = None
self.bp_tree_widget = None
self.die_icons = None
def Show(self):
return PluginForm.Show(self,
"Breakpoint View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the view is created
"""
self.bp_tree_widget = QtWidgets.QTreeWidget()
self.bp_handler = BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
# Get parent widget
self.parent = self.FormToPyQtWidget(form)
self._add_parser_data()
toolbar = QtWidgets.QToolBar()
action_refresh = QtWidgets.QAction(self.die_icons.icon_refresh, "Refresh", toolbar)
action_refresh.triggered.connect(self.refresh)
toolbar.addAction(action_refresh)
layout = QtWidgets.QGridLayout()
layout.addWidget(toolbar)
layout.addWidget(self.bp_tree_widget)
self.parent.setLayout(layout)
def refresh(self):
"""
Reload the view with current values
"""
self._add_parser_data()
def _add_parser_data(self):
"""
Add data to the breakpoint widget model
"""
if self.bp_tree_widget is not None:
self.bp_tree_widget.clear()
else:
self.bp_tree_widget = QtWidgets.QTreeWidget()
root_item = self.bp_tree_widget.invisibleRootItem()
self.bp_tree_widget.setHeaderLabel("Breakpoints")
# Excluded Modules
module_item = QtWidgets.QTreeWidgetItem()
module_item.setText(0, "Excluded Modules")
module_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for module in self.bp_handler.excluded_modules:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, module)
module_item.insertChild(row, current_row_item)
row += 1
# Excluded Functions
function_item = QtWidgets.QTreeWidgetItem()
function_item.setText(0, "Excluded Functions")
function_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for function in self.bp_handler.excluded_funcNames:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, function)
function_item.insertChild(row, current_row_item)
row += 1
# Excluded Addresses
ea_item = QtWidgets.QTreeWidgetItem()
ea_item.setText(0, "Excluded Addresses")
ea_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for ea in self.bp_handler.excluded_bp_ea:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, hex(ea))
ea_item.insertChild(row, current_row_item)
row += 1
current_row = 0
if module_item.childCount() > 0:
root_item.insertChild(current_row, module_item)
current_row += 1
if function_item.childCount() > 0:
root_item.insertChild(current_row, function_item)
current_row += 1
if ea_item.childCount() > 0:
root_item.insertChild(current_row, ea_item)
current_row += 1
_bp_view = None
def get_view():
return _bp_view
def initialize():
global _bp_view
_bp_view = BreakpointView()
| ynvb/DIE | DIE/UI/BPView.py | Python | mit | 3,987 |
import win32net
import win32netcon
shinfo={}
shinfo['netname']='python test'
shinfo['type']=win32netcon.STYPE_DISKTREE
shinfo['remark']='data files'
shinfo['permissions']=0
shinfo['max_uses']=-1
shinfo['current_uses']=0
shinfo['path']='c:\\my_data'
shinfo['passwd']=''
server='servername'
win32net.NetShareAdd(server,2,shinfo)
| ActiveState/code | recipes/Python/303341_creating_share_windows/recipe-303341.py | Python | mit | 330 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Libhugin documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 30 15:21:12 2013.
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
###################################################
# Trick to mock the depedencies for readthedocs #
###################################################
# Copied from:
# http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-
# on-libraries-that-depend-on-c-modules
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock
def __getattr__(self, name):
return Mock
MOCK_MODULES = []
with open('../pip_requirements.txt', 'r') as handle:
for line in handle:
if line.startswith('#: '):
sys.modules[line[3:].strip()] = Mock()
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
todo_include_todos = True
# General information about the project.
project = 'Libhugin'
copyright = '2013, Christoph Piechula'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_options = {
'index_logo': '_static/rabe.jpg',
'index_logo_height': '10px'
}
html_theme_path = ['_themes']
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'flask'
else:
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Libhugindoc'
# -- Options for LaTeX output -------------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# }
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Libhugin.tex', 'Libhugin Documentation', 'Christoph Piechula',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libhugin', 'Libhugin Documentation',
['Christoph Piechula'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Libhugin', 'Libhugin Documentation', 'Christoph Piechula',
'Libhugin', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Libhugin'
epub_author = 'Christoph Piechula'
epub_publisher = 'Christoph Piechula'
epub_copyright = '2013, Christoph Piechula'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
'yapsy': ('https://yapsy.readthedocs.org/en/latest/', None)
}
# Make __init__ and Class.__doc__ visible:
autoclass_content = 'init'
autodoc_member_order = 'bysource'
| qitta/libhugin | docs/conf.py | Python | gpl-3.0 | 11,701 |
import functools
import logging
import os
from lintreview.tools import Tool
from lintreview.tools import run_command
from lintreview.utils import in_path
from lintreview.utils import npm_exists
log = logging.getLogger(__name__)
class Sasslint(Tool):
name = 'sasslint'
def check_dependencies(self):
"""
See if sass-lint is on the system path.
"""
return in_path('sass-lint') or npm_exists('sass-lint')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext == '.sass' or ext == '.scss'
def process_files(self, files):
"""
Run code checks with sass-lint.
Only a single process is made for all files
to save resources.
"""
log.debug('Processing %s files with %s', files, self.name)
cmd = 'sass-lint'
if npm_exists('sass-lint'):
cmd = os.path.join(
os.getcwd(),
'node_modules',
'.bin',
'sass-lint')
command = [cmd, '-f', 'checkstyle', '-v']
command += files
if self.options.get('ignore'):
command += ['--ignore ', self.options.get('ignore')]
if self.options.get('config'):
command += ['--config', self.apply_base(self.options['config'])]
output = run_command(
command,
ignore_error=True)
filename_converter = functools.partial(
self._relativize_filename,
files)
self._process_checkstyle(output, filename_converter)
| zoidbergwill/lint-review | lintreview/tools/sasslint.py | Python | mit | 1,616 |
import smtplib
# Sample code for sending email
# Make sure to check 'email' documentation to construct messages!
def prompt(prompt):
return raw_input(prompt).strip()
fromaddr = prompt("From: ")
toaddrs = prompt("To: ").split()
print "Enter message, end with ^D (Unix) or ^Z (Windows):"
# Add the From: and To: headers at the start!
msg = ("From: %s\r\nTo: %s\r\n\r\n"
% (fromaddr, ", ".join(toaddrs)))
while 1:
try:
line = raw_input()
except EOFError:
break
if not line:
break
msg = msg + line
print "Message length is " + repr(len(msg))
server = smtplib.SMTP('mail.luc.edu')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| aarestad/gradschool-stuff | xml-class/hw3/src/smtp.py | Python | gpl-2.0 | 714 |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
"""
Publish and subscribe to MQTT messages.
Additional information at http://mqtt.org and
http://ibmstreams.github.io/streamsx.messaging
"""
from future.builtins import *
from streamsx.topology.topology import *
from streamsx.topology import schema
class MqttStreams(object):
"""
A simple connector to a MQTT broker for publishing
string tuples to MQTT topics, and
subscribing to MQTT topics and creating streams.
A connector is for a specific MQTT Broker as specified in
the configuration object config. Any number of publish()and subscribe()
connections may be created from a single mqtt_streams connector.
Sample use:
::
topo = Topology("An MQTT application")
# define configuration information
config = {}
config['clientID'] = "test_MQTTpublishClient"
config['qos'] = int("1") #(needs to be int vs long)
config['keepAliveInterval'] = int(20) (needs to be int vs long)
config['commandTimeout'] = 30000 (needs to be int vs long)
config['period'] = 5000 (needs to be int vs long)
config['messageQueueSize'] = 10 (needs to be int vs long)
config['reconnectionBound'] = int(20)
config['retain'] = True
config['password'] = "foobar"
config['trustStore'] = "/tmp/no-such-trustStore"
config['trustStorePassword'] = "woohoo"
config['keyStore'] = "/tmp/no-such-keyStore"
config['keyStorePassword'] = "woohoo"
# create the connector's configuration property map
config['serverURI'] = "tcp://localhost:1883"
config['userID'] = "user1id"
config[' password'] = "user1passwrd"
# create the connector
mqstream = MqttStreams(topo, config)
# publish a python source stream to the topic "python.topic1"
topic = "python.topic1"
src = topo.source(test_functions.mqtt_publish)
mqs = mqstream.publish(src, topic)
# subscribe to the topic "python.topic1"
topic = ["python.topic1", ]
mqs = mqstream.subscribe(topic)
mqs.print()
Configuration properties apply to publish and
subscribe unless stated otherwise.
serverURI
Required String. URI to the MQTT server, either
tcp://<hostid>[:<port>]}
or ssl://<hostid>[:<port>]}.
The port defaults to 1883 for "tcp:" and 8883 for "ssl:" URIs.
clientID
Optional String. A unique identifier for a connection
to the MQTT server.
he MQTT broker only allows a single
onnection for a particular clientID.
By default a unique client ID is automatically
generated for each use of publish() and subscribe().
The specified clientID is used for the first
publish() or subscribe() use and
suffix is added for each subsequent uses.
keepAliveInterval
Optional Integer. Automatically generate a MQTT
ping message to the server if a message or ping hasn't been
sent or received in the last keelAliveInterval seconds.
Enables the client to detect if the server is no longer available
without having to wait for the TCP/IP timeout.
A value of 0 disables keepalive processing.
The default is 60.
commandTimeout
Optional Long. The maximum time in milliseconds
to wait for a MQTT connect or publish action to complete.
A value of 0 causes the client to wait indefinitely.
The default is 0.
period
Optional Long. The time in milliseconds before
attempting to reconnect to the server following a connection failure.
The default is 60000.
userID
Optional String. The identifier to use when authenticating
with a server configured to require that form of authentication.
password
Optional String. The identifier to use when authenticating
with server configured to require that form of authentication.
trustStore
Optional String. The pathname to a file containing the
public certificate of trusted MQTT servers. If a relative path
is specified, the path is relative to the application directory.
Required when connecting to a MQTT server with an
ssl:/... serverURI.
trustStorePassword
Required String when trustStore is used.
The password needed to access the encrypted trustStore file.
keyStore
Optional String. The pathname to a file containing the
MQTT client's public private key certificates.
If a relative path is specified, the path is relative to the
application directory.
Required when an MQTT server is configured to use SSL client authentication.
keyStorePassword
Required String when keyStore is used.
The password needed to access the encrypted keyStore file.
messageQueueSize
[subscribe] Optional Integer. The size, in number
of messages, of the subscriber's internal receive buffer. Received
messages are added to the buffer prior to being converted to a
stream tuple. The receiver blocks when the buffer is full.
The default is 50.
retain
[publish] Optional Boolean. Indicates if messages should be
retained on the MQTT server. Default is false.
qos
Optional Integer. The default
MQTT quality of service used for message handling.
The default is 0.
"""
def __init__(self, topology, config):
self.topology = topology
self.config = config.copy()
self.opCnt = 0
def publish(self, pub_stream, topic):
parms = self.config.copy()
parms['topic'] = topic
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
# convert pub_stream outputport schema from spl po to spl rstring type
forOp = pub_stream._map(streamsx.topology.functions.identity, schema.CommonSchema.String)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSink")
op.addInputPort(outputPort=forOp.oport)
op.setParameters(parms)
return None
def subscribe(self, topic):
parms = self.config.copy()
if (parms['retain'] is not None):
del parms['retain']
parms['topics'] = topic
parms['topicOutAttrName'] = "topic"
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSource")
oport = op.addOutputPort(schema=schema.StreamSchema("tuple<rstring topic, rstring string>"))
op.setParameters(parms)
pop = self.topology.graph.addPassThruOperator()
pop.addInputPort(outputPort=oport)
pOport = pop.addOutputPort(schema=schema.CommonSchema.String)
return Stream(self.topology, pOport)
| wmarshall484/streamsx.topology | com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/mqtt.py | Python | apache-2.0 | 7,558 |
# -*- coding: utf-8 -*-
"""Page model for Cloud Intel / Reports / Reports"""
from cached_property import cached_property
from navmazing import NavigateToAttribute, NavigateToSibling
from cfme.utils.wait import wait_for
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from cfme.utils.timeutil import parsetime
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from . import CloudIntelReportsView, ReportsMultiBoxSelect
from widgetastic.utils import ParametrizedLocator
from widgetastic.widget import Text, Checkbox, View, ParametrizedView, Table as VanillaTable
from widgetastic.exceptions import NoSuchElementException
from widgetastic_manageiq import PaginationPane, Table
from widgetastic_patternfly import Button, Input, BootstrapSelect, Tab, CandidateNotFound
from cfme.web_ui.expression_editor_widgetastic import ExpressionEditor
class CustomReportFormCommon(CloudIntelReportsView):
report_title = Text("#explorer_title_text")
menu_name = Input("name")
title = Input("title")
base_report_on = BootstrapSelect("chosen_model")
report_fields = ReportsMultiBoxSelect(
move_into="Move selected fields down",
move_from="Move selected fields up",
available_items="available_fields",
chosen_items="selected_fields"
)
cancel_after = BootstrapSelect("chosen_queue_timeout")
@View.nested
class consolidation(Tab): # noqa
column1 = BootstrapSelect("chosen_pivot1")
column2 = BootstrapSelect("chosen_pivot2")
column3 = BootstrapSelect("chosen_pivot3")
@View.nested
class formatting(Tab): # noqa
page_size = BootstrapSelect("pdf_page_size")
@View.nested
class styling(Tab): # noqa
pass
@View.nested
class filter(Tab): # noqa
filter_show_costs = BootstrapSelect("cb_show_typ")
filter_owner = BootstrapSelect("cb_owner_id")
filter_tag_cat = BootstrapSelect("cb_tag_cat")
filter_tag_value = BootstrapSelect("cb_tag_value")
interval_end = BootstrapSelect("cb_end_interval_offset")
primary_filter = ExpressionEditor()
secondary_filter = ExpressionEditor()
@View.nested
class summary(Tab): # noqa
sort_by = BootstrapSelect("chosen_sort1")
sort_order = BootstrapSelect("sort_order")
show_breaks = BootstrapSelect("sort_group")
sort_by_2 = BootstrapSelect("chosen_sort2")
row_limit = BootstrapSelect("row_limit")
@View.nested
class charts(Tab): # noqa
chart_type = BootstrapSelect("chosen_graph")
chart_mode = BootstrapSelect("chart_mode")
values_to_show = BootstrapSelect("chosen_count")
sum_other_values = Checkbox("chosen_other")
@View.nested
class timeline(Tab): # noqa
based_on = BootstrapSelect("chosen_tl")
position = BootstrapSelect("chosen_position")
cancel_button = Button("Cancel")
class NewCustomReportView(CustomReportFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.report_title.text == "Adding a new Report" and
self.reports.tree.currently_selected == ["All Reports"]
)
class EditCustomReportView(CustomReportFormCommon):
save_button = Button("Save")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.reports.tree.currently_selected == [
"All Reports",
"My Company (All EVM Groups)",
"Custom",
self.context["object"].menu_name
] and
self.report_title.text == 'Editing Report "{}"'.format(self.context["object"].menu_name)
)
class CustomReportDetailsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
reload_button = Button(title="Reload current display")
paginator = PaginationPane()
@View.nested
class report_info(Tab): # noqa
TAB_NAME = "Report Info"
queue_button = Button("Queue")
@View.nested
class saved_reports(Tab): # noqa
TAB_NAME = "Saved Reports"
table = Table(".//div[@id='records_div']/table")
paginator = PaginationPane()
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.report_info.is_active() and
self.reports.tree.currently_selected == [
"All Reports",
"My Company (All EVM Groups)",
"Custom",
self.context["object"].menu_name
] and
self.title.text == 'Report "{}"'.format(self.context["object"].menu_name)
)
class AllReportsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
reports_table = VanillaTable(".//div[@id='report_list_div']/table")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.reports.tree.currently_selected == ["All Reports"] and
self.title.text == "All Reports" and
self.reports_table.is_displayed
)
class AllCustomReportsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.reports.tree.currently_selected == [
"All Reports",
"My Company (All EVM Groups)",
"Custom"
] and
self.title.text == "Custom Reports"
)
class CustomReport(Updateable, Navigatable):
_default_dict = {
"menu_name": None,
"title": None,
"base_report_on": None,
"report_fields": None,
"cancel_after": None,
"consolidation": None,
"formatting": None,
"styling": None,
"filter": None,
"summary": None,
"charts": None,
"timeline": None
}
def __init__(self, appliance=None, **values):
Navigatable.__init__(self, appliance=appliance)
# We will override the original dict
self.__dict__ = dict(self._default_dict)
self.__dict__.update(values)
# We need to pass the knowledge whether it is a candu report
try:
self.is_candu
except AttributeError:
self.is_candu = False
def create(self, cancel=False):
view = navigate_to(self, "Add")
view.fill(self.__dict__)
view.add_button.click()
view = self.create_view(AllReportsView)
assert view.is_displayed
view.flash.assert_no_error()
view.flash.assert_message('Report "{}" was added'.format(self.menu_name))
def update(self, updates):
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
for attr, value in updates.items():
setattr(self, attr, value)
view = self.create_view(CustomReportDetailsView)
assert view.is_displayed
view.flash.assert_no_error()
if changed:
view.flash.assert_message(
'Report "{}" was saved'.format(self.menu_name))
else:
view.flash.assert_message(
'Edit of Report "{}" was cancelled by the user'.format(self.menu_name))
def delete(self, cancel=False):
view = navigate_to(self, "Details")
node = view.reports.tree.expand_path("All Reports", "My Company (All EVM Groups)", "Custom")
custom_reports_number = len(view.reports.tree.child_items(node))
view.configuration.item_select("Delete this Report from the Database",
handle_alert=not cancel)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
else:
# This check needs because after deleting the last custom report
# whole "My Company (All EVM Groups)" branch in the tree will be removed.
if custom_reports_number > 1:
view = self.create_view(AllCustomReportsView)
assert view.is_displayed
view.flash.assert_no_error()
view.flash.assert_message(
'Report "{}": Delete successful'.format(self.menu_name))
def get_saved_reports(self):
view = navigate_to(self, "Details")
results = []
try:
for _ in view.saved_reports.paginator.pages():
for row in view.saved_reports.table.rows():
results.append(
CustomSavedReport(self, row.run_at.text.encode("utf-8"),
row.queued_at.text.encode("utf-8"), self.is_candu)
)
except NoSuchElementException:
pass
return results
def queue(self, wait_for_finish=False):
view = navigate_to(self, "Details")
view.report_info.queue_button.click()
view.flash.assert_no_error()
if wait_for_finish:
# Get the queued_at value to always target the correct row
queued_at = view.saved_reports.table[0]["Queued At"].text
def _get_state():
row = view.saved_reports.table.row(queued_at=queued_at)
status = row.status.text.strip().lower()
assert status != "error"
return status == "complete"
wait_for(
_get_state,
delay=1,
message="wait for report generation finished",
fail_func=view.reload_button.click,
num_sec=300,
)
class CustomSavedReportDetailsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
table = VanillaTable(".//div[@id='report_html_div']/table")
paginator = PaginationPane()
@ParametrizedView.nested
class download(ParametrizedView): # noqa
PARAMETERS = ("format", )
ALL_LINKS = ".//a[starts-with(@name, 'download_choice__render_report_')]"
download_button = Button(title="Download")
link = Text(ParametrizedLocator(".//a[normalize-space()={format|quote}]"))
def __init__(self, *args, **kwargs):
ParametrizedView.__init__(self, *args, **kwargs)
self.download_button.click()
self.link.click()
@classmethod
def all(cls, browser):
return [(browser.text(e), ) for e in browser.elements(cls.ALL_LINKS)]
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.reports.tree.currently_selected == [
"All Reports",
"My Company (All EVM Groups)",
"Custom",
self.context["object"].report.menu_name,
self.context["object"].datetime_in_tree
] and
self.title.text == 'Saved Report "{} - {}"'.format(
self.context["object"].report.title,
self.context["object"].queued_datetime_in_title
)
)
class CustomSavedReport(Updateable, Pretty, Navigatable):
"""Custom Saved Report. Enables us to retrieve data from the table.
Args:
report: Report that we have data from.
run_datetime: Datetime of "Run At" of the report. That's what :py:func:`queue` returns.
queued_datetime: Datetime of "Queued At" of the report.
candu: If it is a C&U report, in that case it uses a different table.
"""
pretty_attrs = ["report", "run_datetime", "queued_datetime"]
def __init__(self, report, run_datetime, queued_datetime, candu=False, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.report = report
self.run_datetime = run_datetime
self.queued_datetime_in_title = parsetime.from_american_with_utc(
queued_datetime).to_saved_report_title_format()
self.datetime_in_tree = parsetime.from_american_with_utc(
self.run_datetime).to_iso_with_utc()
self.candu = candu
@cached_property
def data(self):
"""Retrieves data from the saved report.
Returns: :py:class:`SavedReportData`.
"""
view = navigate_to(self, "Details")
view.paginator.set_items_per_page(1000)
try:
headers = tuple([hdr.encode("utf-8") for hdr in view.table.headers])
body = []
for _ in view.paginator.pages():
for row in view.table.rows():
if not all([c[1].is_displayed for c in row]):
# This is a temporary workaround for cases we have row span
# greater that 1 column (e.g. in case of "Totals: ddd" column).
# TODO: Support this functionality in widgetastic. Issue:
# https://github.com/RedHatQE/widgetastic.core/issues/26
continue
row_data = tuple([row[header].text.encode("utf-8") for header in headers])
body.append(row_data)
except NoSuchElementException:
# No data found
return SavedReportData([], [])
else:
return SavedReportData(headers, body)
def download(self, extension):
view = navigate_to(self, "Details")
extensions_mapping = {"txt": "Text", "csv": "CSV", "pdf": "PDF"}
try:
view.download("Download as {}".format(extensions_mapping[extension]))
except NoSuchElementException:
raise ValueError("Unknown extention. check the extentions_mapping")
class SavedReportData(Pretty):
"""This class stores data retrieved from saved report.
Args:
headers: Tuple with header columns.
body: List of tuples with body rows.
"""
pretty_attrs = ["headers", "body"]
def __init__(self, headers, body):
self.headers = headers
self.body = body
@property
def rows(self):
for row in self.body:
yield dict(zip(self.headers, row))
def find_row(self, column, value):
if column not in self.headers:
return None
for row in self.rows:
if row[column] == value:
return row
def find_cell(self, column, value, cell):
try:
return self.find_row(column, value)[cell]
except TypeError:
return None
class CannedReportView(CustomReportDetailsView):
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.report_info.is_active() and
self.reports.tree.currently_selected == (["All Reports"] +
self.context["object"].path) and
self.title.text == 'Report "{}"'.format(self.context["object"].path[-1])
)
class CannedSavedReportView(CustomSavedReportDetailsView):
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.reports.is_opened and
self.reports.tree.currently_selected == (
["All Reports"] + self.context["object"].path
) and
self.title.text == 'Saved Report "{} - {}"'.format(
self.context["object"].path[-1],
self.context["object"].queued_datetime_in_title
)
)
class CannedSavedReport(CustomSavedReport, Navigatable):
"""As we cannot create or edit canned reports, we don't know their titles and so, so we
need to change the navigation a little bit for it to work correctly.
Args:
path_to_report: Iterable with path to report.
datetime: Datetime of "Run At" of the report. That's what :py:func:`queue_canned_report`
returns.
"""
def __init__(self, path_to_report, run_datetime, queued_datetime, candu=False, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.path = path_to_report
self.datetime = run_datetime
self.candu = candu
self.queued_datetime_in_title = parsetime.from_american_with_utc(
queued_datetime).to_saved_report_title_format()
self.datetime_in_tree = parsetime.from_american_with_utc(self.datetime).to_iso_with_utc()
def navigate(self):
navigate_to(self, "Details")
@classmethod
def new(cls, path):
return cls(path, *cls.queue_canned_report(path))
@classmethod
def queue_canned_report(cls, path):
"""Queue report from selection of pre-prepared reports.
Args:
*path: Path in tree after All Reports
Returns: Value of Run At in the table so the run can be then checked.
"""
cls.path = path
view = navigate_to(cls, "Info")
assert view.is_displayed
view.report_info.queue_button.click()
view.flash.assert_no_error()
view.flash.assert_message("Report has been successfully queued to run")
queued_at = view.saved_reports.table[0]["Queued At"].text
def _get_state():
row = view.saved_reports.table.row(queued_at=queued_at)
status = row.status.text.strip().lower()
assert status != "error"
return status == "complete"
wait_for(
_get_state,
delay=1,
message="wait for report generation finished",
fail_func=view.reload_button.click,
num_sec=300,
)
first_row = view.saved_reports.table[0]
return first_row.run_at.text, first_row.queued_at.text
def get_saved_canned_reports(self, *path):
view = navigate_to(self, "Info")
results = []
try:
for _ in view.saved_reports.paginator.pages():
for row in view.saved_reports.table.rows():
if not all([c[1].is_displayed for c in row]):
# This is a temporary workaround for cases we have row span
# greater that 1 column (e.g. in case of "Totals: ddd" column).
# TODO: Support this functionality in widgetastic. Issue:
# https://github.com/RedHatQE/widgetastic.core/issues/26
continue
results.append(
CannedSavedReport(
path,
row.run_at.text.encode("utf-8"),
row.queued_at.text.encode("utf-8")
)
)
except NoSuchElementException:
pass
return results
def delete(self, cancel=False):
view = navigate_to(self, "Info")
cell = view.saved_reports.table.row(run_at=self.datetime)[0]
cell.check()
view.configuration.item_select(
"Delete this Saved Report from the Database",
handle_alert=not cancel
)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
else:
view.flash.assert_no_error()
# TODO Doesn't work due to this BZ https://bugzilla.redhat.com/show_bug.cgi?id=1489387
# view.flash.assert_message("Successfully deleted Saved Report from the CFME Database")
@property
def exists(self):
try:
navigate_to(self, 'Info')
return True
except CandidateNotFound:
return False
def delete_if_exists(self):
if self.exists:
self.delete()
@navigator.register(CustomReport, "Add")
class CustomReportNew(CFMENavigateStep):
VIEW = NewCustomReportView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
self.prerequisite_view.reports.tree.click_path("All Reports")
self.prerequisite_view.configuration.item_select("Add a new Report")
@navigator.register(CustomReport, "Edit")
class CustomReportEdit(CFMENavigateStep):
VIEW = EditCustomReportView
prerequisite = NavigateToSibling("Details")
def step(self):
self.prerequisite_view.configuration.item_select("Edit this Report")
@navigator.register(CustomReport, "Details")
class CustomReportDetails(CFMENavigateStep):
VIEW = CustomReportDetailsView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
self.prerequisite_view.reports.tree.click_path(
"All Reports",
"My Company (All EVM Groups)",
"Custom",
self.obj.menu_name
)
self.view.report_info.select()
@navigator.register(CustomSavedReport, "Details")
class CustomSavedReportDetails(CFMENavigateStep):
VIEW = CustomSavedReportDetailsView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
self.prerequisite_view.reports.tree.click_path(
"All Reports",
"My Company (All EVM Groups)",
"Custom",
self.obj.report.menu_name,
self.obj.datetime_in_tree
)
@navigator.register(CannedSavedReport, "Details")
class CannedSavedReportDetails(CFMENavigateStep):
VIEW = CannedSavedReportView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
path = self.obj.path + [self.obj.datetime_in_tree]
self.prerequisite_view.reports.tree.click_path("All Reports", *path)
@navigator.register(CannedSavedReport, "Info")
class CannedReportInfo(CFMENavigateStep):
VIEW = CannedReportView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
self.prerequisite_view.reports.tree.click_path("All Reports", *self.obj.path)
@navigator.register(CustomReport, "All")
class CustomReportAll(CFMENavigateStep):
VIEW = AllReportsView
prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports")
def step(self):
self.prerequisite_view.reports.tree.click_path("All Reports")
| okolisny/integration_tests | cfme/intelligence/reports/reports.py | Python | gpl-2.0 | 22,519 |
#! /usr/bin/env python3
# Script for comparing the md5sum of a posix file with the md5sums of a multi chunk swift object
#
# swhashcomp dirkpetersen / Feb 2015
#
import swiftclient, sys, os, argparse, functools, hashlib, json
class KeyboardInterruptError(Exception): pass
def main():
c=create_sw_conn()
md5all = hashlib.md5()
print (" comparing swift object %s/%s with %s..." % (args.container, args.obj, args.locfile))
#headers, objects = c.get_container(args.container,prefix=args.prefix,full_listing=True)
headers = c.head_object(args.container, args.obj)
if 'x-static-large-object' in headers:
#print(headers['x-static-large-object'])
headers, body = c.get_object(args.container, args.obj, query_string='multipart-manifest=get')
if not os.path.isfile(args.locfile):
if 'md5sum' in headers:
if args.locfile.strip() == headers['md5sum']:
print(' md5sum:%s' % headers['md5sum'])
is_valid = True
else:
is_valid = False
else:
is_valid = check_segments(body,args.locfile.strip(),c)
else:
with open(args.locfile, 'rb') as f:
is_valid = check_manifest(body, f, md5all)
else:
is_valid=False
if os.path.isfile(args.locfile):
with open(args.locfile, 'rb') as f:
hasher = hashlib.md5(f.read()) # needed for compatiblity between python3 and python2
if hasher.hexdigest() == headers['etag']:
print(' md5sum:%s' % headers['etag'])
is_valid = True
else:
if args.locfile.strip() == headers['etag']:
print(' md5sum:%s' % headers['etag'])
is_valid = True
if is_valid:
print ("object %s/%s and '%s' are identical!" % (args.container, args.obj, args.locfile))
return 0
else:
print ("*** WARNING ***: object %s/%s and '%s' are different!" % (args.container, args.obj, args.locfile))
return 1
def check_manifest(manifest, body, md5all):
"""
check if a body is the same object described by the manifest
:param manifest: the raw body of the manifest from swift
:param body: a file like object to check against the manfiest
"""
manifest = json.loads(manifest.decode())
for segment in manifest:
print (" testing chunk %s" % segment['name'])
chunk = body.read(segment['bytes'])
hasher = hashlib.md5(chunk)
md5all.update(chunk)
if hasher.hexdigest() != segment['hash']:
print (' %s != %s' % (hasher.hexdigest(), segment['hash']))
return False
print(" md5sum:%s" % md5all.hexdigest())
return True
def check_segments(manifest,md5sum,c):
manifest = json.loads(manifest.decode())
digest = hashlib.md5()
for segment in manifest:
print (" please wait ... testing chunk %s" % segment['name'])
segment_container, segment_obj = parseSwiftUrl(segment['name'])
attributes, content = c.get_object(segment_container, segment_obj)
digest.update(content)
if digest.hexdigest() != md5sum:
print (' %s != %s' % (digest.hexdigest(), md5sum))
return False
return True
def create_sw_conn():
if args.authtoken and args.storageurl:
return swiftclient.Connection(preauthtoken=args.authtoken, preauthurl=args.storageurl)
else:
authtoken=os.environ.get("OS_AUTH_TOKEN")
storageurl=os.environ.get("OS_STORAGE_URL")
if authtoken and storageurl:
return swiftclient.Connection(preauthtoken=authtoken, preauthurl=storageurl)
else:
swift_auth=os.environ.get("ST_AUTH")
swift_user=os.environ.get("ST_USER")
swift_key=os.environ.get("ST_KEY")
if swift_auth and swift_user and swift_key:
return swiftclient.Connection(authurl=swift_auth,user=swift_user,key=swift_key)
def parseSwiftUrl(path):
path = path.lstrip('/')
components = path.split('/');
container = components[0];
obj = '/'.join(components[1:])
return container, obj
def parse_arguments():
"""
Gather command-line arguments.
"""
parser = argparse.ArgumentParser(prog='swhashcomp',
description='compare the md5sum of a local file or hash with the hash ' + \
'of a swift object folder after a data migration ' + \
'')
parser.add_argument( '--locfile', '-f', dest='locfile',
action='store',
help='a local or networked file to compare',
default='' )
parser.add_argument( '--container', '-c', dest='container',
action='store',
help='a container in the swift object store',
default='' )
parser.add_argument( '--obj', '-o', dest='obj',
action='store',
help='an object in a swift container',
default=None)
parser.add_argument( '--authtoken', '-a', dest='authtoken',
action='store',
help='a swift authentication token (required when storage-url is used)',
default=None)
parser.add_argument( '--storage-url', '-s', dest='storageurl',
action='store',
help='a swift storage url (required when authtoken is used)',
default=None)
args = parser.parse_args()
if not args.locfile:
parser.error('required option --locfile not given !')
if not args.container:
parser.error('required option --container not given !')
if not args.obj:
parser.error('required option --obj not given !')
return args
if __name__ == '__main__':
# Parse command-line arguments
args = parse_arguments()
sys.exit(main())
| trel/swift-commander | bin/swhashcomp.py | Python | apache-2.0 | 5,839 |
import os
import sys
import numpy as np
import re
from astropy.table import Table
from astropy.io import fits
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.io.util import parse_cameras, difference_camwords, validate_badamps
from desispec.workflow.exptable import summarize_exposure, default_obstypes_for_exptable, \
instantiate_exposure_table, get_exposure_table_column_defs, \
get_exposure_table_path, get_exposure_table_name, \
night_to_month
from desispec.workflow.utils import define_variable_from_environment, listpath, pathjoin, get_printable_banner
from desispec.workflow.tableio import write_table
def create_exposure_tables(nights=None, night_range=None, path_to_data=None, exp_table_path=None, obstypes=None, \
exp_filetype='csv', cameras=None, bad_cameras=None, badamps=None,
verbose=False, no_specprod=False, overwrite_files=False):
"""
Generates processing tables for the nights requested. Requires exposure tables to exist on disk.
Args:
nights: str, int, or comma separated list. The night(s) to generate procesing tables for.
night_range: str, comma separated pair of nights in form YYYYMMDD,YYYYMMDD for first_night,last_night
specifying the beginning and end of a range of nights to be generated.
last_night should be inclusive.
path_to_data: str. The path to the raw data and request*.json and manifest* files.
exp_table_path: str. Full path to where to exposure tables should be saved, WITHOUT the monthly directory included.
obstypes: str or comma separated list of strings. The exposure OBSTYPE's that you want to include in the exposure table.
exp_filetype: str. The file extension (without the '.') of the exposure tables.
verbose: boolean. Whether to give verbose output information or not. True prints more information.
no_specprod: boolean. Create exposure table in repository location rather than the SPECPROD location
overwrite_files: boolean. Whether to overwrite processing tables if they exist. True overwrites.
cameras: str. Explicitly define the cameras for which you want to reduce the data. Should be a comma separated
list. Only numbers assumes you want to reduce r, b, and z for that camera. Otherwise specify
separately [brz][0-9].
bad_cameras: str. Explicitly define the cameras that you don't want to reduce the data. Should be a comma
separated list. Only numbers assumes you want to reduce r, b, and z for that camera.
Otherwise specify separately [brz][0-9].
badamps: str. Define amplifiers that you know to be bad and should not be processed. Should be a list separated
by comma or semicolon. Saved list will converted to semicolons. Each entry should be of the
form {camera}{spectrograph}{amp}, i.e. [brz][0-9][A-D].
Returns: Nothing
"""
if nights is None and night_range is None:
raise ValueError("Must specify either nights or night_range")
elif nights is not None and night_range is not None:
raise ValueError("Must only specify either nights or night_range, not both")
if nights is None or nights=='all':
nights = list()
for n in listpath(os.getenv('DESI_SPECTRO_DATA')):
#- nights are 20YYMMDD
if re.match('^20\d{6}$', n):
nights.append(n)
else:
nights = [ int(val.strip()) for val in nights.split(",") ]
nights = np.array(nights)
if night_range is not None:
if ',' not in night_range:
raise ValueError("night_range must be a comma separated pair of nights in form YYYYMMDD,YYYYMMDD")
nightpair = night_range.split(',')
if len(nightpair) != 2 or not nightpair[0].isnumeric() or not nightpair[1].isnumeric():
raise ValueError("night_range must be a comma separated pair of nights in form YYYYMMDD,YYYYMMDD")
first_night, last_night = nightpair
nights = nights[np.where(int(first_night)<=nights.astype(int))[0]]
nights = nights[np.where(int(last_night)>=nights.astype(int))[0]]
if obstypes is not None:
obstypes = [ val.strip('\t ') for val in obstypes.split(",") ]
else:
obstypes = default_obstypes_for_exptable()
print("Nights: ", nights)
print("Obs types: ", obstypes)
## Deal with cameras and amps, if given
camword = cameras
if camword != '':
camword = parse_cameras(camword)
badcamword = bad_cameras
if badcamword != '':
badcamword = parse_cameras(badcamword)
## Warn people if changing camword
finalcamword = 'a0123456789'
if camword is not None and badcamword is None:
badcamword = difference_camwords(finalcamword,camword)
finalcamword = camword
elif camword is not None and badcamword is not None:
finalcamword = difference_camwords(camword, badcamword)
badcamword = difference_camwords('a0123456789', finalcamword)
elif badcamword is not None:
finalcamword = difference_camwords(finalcamword,badcamword)
else:
badcamword = ''
if badcamword != '':
## Inform the user what will be done with it.
print(f"Modifying camword of data to be processed with badcamword: {badcamword}. " + \
f"Camword to be processed: {finalcamword}")
## Make sure badamps is formatted properly
if badamps is None:
badamps = ''
else:
badamps = validate_badamps(badamps)
## Define where to find the data
if path_to_data is None:
path_to_data = define_variable_from_environment(env_name='DESI_SPECTRO_DATA',
var_descr="The data path")
## Define where to save the data
usespecprod = (not no_specprod)
if exp_table_path is None:
exp_table_path = get_exposure_table_path(night=None,usespecprod=usespecprod)
## Make the save directory exists
os.makedirs(exp_table_path, exist_ok=True)
## Loop over nights
colnames, coltypes, coldefaults = get_exposure_table_column_defs(return_default_values=True)
nights_with_data = listpath(path_to_data)
for night in nights:
if str(night) not in nights_with_data:
print(f'Night: {night} not in data directory {path_to_data}. Skipping')
continue
print(get_printable_banner(input_str=night))
## Create an astropy exposure table for the night
nightly_tab = instantiate_exposure_table()
## Loop through all exposures on disk
for exp in listpath(path_to_data,str(night)):
rowdict = summarize_exposure(path_to_data, night=night, exp=exp, obstypes=obstypes, \
colnames=colnames, coldefaults=coldefaults, verbosely=verbose)
if rowdict is not None and type(rowdict) is not str:
rowdict['BADCAMWORD'] = badcamword
rowdict['BADAMPS'] = badamps
## Add the dictionary of column values as a new row
nightly_tab.add_row(rowdict)
if verbose:
print("Rowdict:\n",rowdict,"\n\n")
if len(nightly_tab) > 0:
month = night_to_month(night)
exptab_path = pathjoin(exp_table_path,month)
os.makedirs(exptab_path,exist_ok=True)
exptab_name = get_exposure_table_name(night, extension=exp_filetype)
exptab_name = pathjoin(exptab_path, exptab_name)
write_table(nightly_tab, exptab_name, overwrite=overwrite_files)
else:
print('No rows to write to a file.')
print("Exposure table generations complete")
## Flush the outputs
sys.stdout.flush()
sys.stderr.flush()
| desihub/desispec | py/desispec/scripts/exposuretable.py | Python | bsd-3-clause | 8,118 |
import inspect
import logging
import os
from unittest.mock import Mock
import pytest
from ert_shared.plugins import workflow_config
@pytest.mark.parametrize(
"workflows, expected",
[
([], {}),
([("a", "/a/path")], {"a": "/a/path"}),
(
[("a", "/a/path"), ("b", "/another/path")],
{"a": "/a/path", "b": "/another/path"},
),
],
)
def test_workflow_configs(workflows, expected, monkeypatch):
def get_mock_config(name, config_path):
workflow_mock = Mock(spec=workflow_config.WorkflowConfig)
workflow_mock.name = name
workflow_mock.config_path = config_path
return workflow_mock
config = workflow_config.WorkflowConfigs()
workflows = [get_mock_config(name, config_path) for name, config_path in workflows]
monkeypatch.setattr(config, "_workflows", workflows)
assert config.get_workflows() == expected
def test_workflow_config_duplicate_log_message(caplog, monkeypatch):
def get_mock_config():
workflow_mock = Mock(spec=workflow_config.WorkflowConfig)
workflow_mock.name = "same_name"
workflow_mock.config_path = "/duplicate/path"
workflow_mock.function_dir = "func_dir"
return workflow_mock
config = workflow_config.WorkflowConfigs()
# Create duplicate workflows
workflows = [get_mock_config(), get_mock_config()]
monkeypatch.setattr(config, "_workflows", workflows)
with caplog.at_level(logging.INFO):
config.get_workflows()
assert "Duplicate workflow name: same_name, skipping func_dir" in caplog.text
@pytest.mark.parametrize(
"name, expected", [(None, "default_name"), ("some_name", "some_name")]
)
def test_workflow_config_init_name(tmpdir, monkeypatch, name, expected):
mock_func = Mock()
mock_func.__name__ = "default_name"
inspect_mock = Mock(return_value="/some/path")
monkeypatch.setattr(inspect, "getfile", inspect_mock)
workflow = workflow_config.WorkflowConfig(mock_func, tmpdir.strpath, name=name)
assert workflow.name == expected
def test_workflow_config_init_path(tmpdir, monkeypatch):
mock_func = Mock()
mock_func.__name__ = "default_name"
inspect_mock = Mock(return_value="/some/path")
monkeypatch.setattr(inspect, "getfile", inspect_mock)
workflow = workflow_config.WorkflowConfig(mock_func, tmpdir.strpath)
assert workflow.function_dir == "/some/path"
def test_workflow_config_write_workflow_config(tmpdir, monkeypatch):
expected_config = "INTERNAL True\nSCRIPT /some/path"
mock_func = Mock()
mock_func.__name__ = "default_name"
inspect_mock = Mock(return_value="/some/path")
monkeypatch.setattr(inspect, "getfile", inspect_mock)
workflow_config.WorkflowConfig(mock_func, tmpdir.strpath)
tmpdir.chdir()
assert os.path.isfile("DEFAULT_NAME")
with open("DEFAULT_NAME") as fin:
content = fin.read()
assert content == expected_config
| joakim-hove/ert | tests/ert_tests/all/plugins/test_workflow_config.py | Python | gpl-3.0 | 2,978 |
import unittest
from test import test_support
class LongExpText(unittest.TestCase):
def test_longexp(self):
REPS = 65580
l = eval("[" + "2," * REPS + "]")
self.assertEqual(len(l), REPS)
def test_main():
test_support.run_unittest(LongExpText)
if __name__=="__main__":
test_main()
| ktan2020/legacy-automation | win/Lib/test/test_longexp.py | Python | mit | 332 |
# -*- coding: utf-8 -*-
import numpy as np
import numpy.random as np_random
print("## square-root of an array:")
arr = np.arange(10)
print(np.sqrt(arr))
print("## Compare array:")
x = np_random.randn(8)
y = np_random.randn(8)
print(x)
print(y)
print(np.maximum(x, y))
print()
print("## Get fractional and integral part:")
arr = np_random.randn(7) * 5
print(arr)
print(np.modf(arr))
| lamontu/data-analysis | numpy/universal_functions.py | Python | gpl-3.0 | 386 |
"""
Django settings for SMes project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1suftunq)3ije0_7m$tu7efz^2gj$*gl2ehqye#frr-a&jif@x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'impianti',
'operatori',
'lavorazioni',
'ordini',
'registrazioni',
'programma',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SMes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SMes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'it-it'
TIME_ZONE = 'Europe/Rome'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| Byx69/SMes | SMes/settings.py | Python | gpl-2.0 | 3,270 |
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.fft._fftlog import fht, ifht, fhtoffset
from scipy.special import poch
def test_fht_agrees_with_fftlog():
# check that fht numerically agrees with the output from Fortran FFTLog,
# the results were generated with the provided `fftlogtest` program,
# after fixing how the k array is generated (divide range by n-1, not n)
# test function, analytical Hankel transform is of the same form
def f(r, mu):
return r**(mu+1)*np.exp(-r**2/2)
r = np.logspace(-4, 4, 16)
dln = np.log(r[1]/r[0])
mu = 0.3
offset = 0.0
bias = 0.0
a = f(r, mu)
# test 1: compute as given
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
-0.1949518286432330E-02, +0.3789220182554077E-02,
+0.5093959119952945E-03, +0.2785387803618774E-01,
+0.9944952700848897E-01, +0.4599202164586588E+00,
+0.3157462160881342E+00, -0.8201236844404755E-03,
-0.7834031308271878E-03, +0.3931444945110708E-03,
-0.2697710625194777E-03, +0.3568398050238820E-03,
-0.5554454827797206E-03, +0.8286331026468585E-03]
assert_allclose(ours, theirs)
# test 2: change to optimal offset
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
+0.3150140927838524E-03, +0.9149121960963704E-03,
+0.5808089753959363E-02, +0.2548065256377240E-01,
+0.1339477692089897E+00, +0.4821530509479356E+00,
+0.2659899781579785E+00, -0.1116475278448113E-01,
+0.1791441617592385E-02, -0.4181810476548056E-03,
+0.1314963536765343E-03, -0.5422057743066297E-04,
+0.3208681804170443E-04, -0.2696849476008234E-04]
assert_allclose(ours, theirs)
# test 3: positive bias
bias = 0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
+0.1065374386206564E+00, -0.5121739602708132E-01,
+0.2636649319269470E-01, +0.1697209218849693E-01,
+0.1250215614723183E+00, +0.4739583261486729E+00,
+0.2841149874912028E+00, -0.8312764741645729E-02,
+0.1024233505508988E-02, -0.1644902767389120E-03,
+0.3305775476926270E-04, -0.7786993194882709E-05,
+0.1962258449520547E-05, -0.8977895734909250E-06]
assert_allclose(ours, theirs)
# test 4: negative bias
bias = -0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
+0.2123969254700955E-03, +0.1009558244834628E-02,
+0.5131386375222176E-02, +0.2461678673516286E-01,
+0.1235812845384476E+00, +0.4719570096404403E+00,
+0.2893487490631317E+00, -0.1686570611318716E-01,
+0.2231398155172505E-01, -0.1480742256379873E-01,
+0.1692387813500801E+00, +0.3097490354365797E+00,
+2.7593607182401860E+00, 10.5251075070045800E+00]
assert_allclose(ours, theirs)
@pytest.mark.parametrize('optimal', [True, False])
@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
@pytest.mark.parametrize('bias', [0, 0.1, -0.1])
@pytest.mark.parametrize('n', [64, 63])
def test_fht_identity(n, bias, offset, optimal):
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(n)
dln = rng.uniform(-1, 1)
mu = rng.uniform(-2, 2)
if optimal:
offset = fhtoffset(dln, mu, initial=offset, bias=bias)
A = fht(a, dln, mu, offset=offset, bias=bias)
a_ = ifht(A, dln, mu, offset=offset, bias=bias)
assert_allclose(a, a_)
def test_fht_special_cases():
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(64)
dln = rng.uniform(-1, 1)
# let xp = (mu+1+q)/2, xm = (mu+1-q)/2, M = {0, -1, -2, ...}
# case 1: xp in M, xm in M => well-defined transform
mu, bias = -4.0, 1.0
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 2: xp not in M, xm in M => well-defined transform
mu, bias = -2.5, 0.5
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 3: xp in M, xm not in M => singular transform
mu, bias = -3.5, 0.5
with pytest.warns(Warning) as record:
fht(a, dln, mu, bias=bias)
assert record, 'fht did not warn about a singular transform'
# case 4: xp not in M, xm in M => singular inverse transform
mu, bias = -2.5, 0.5
with pytest.warns(Warning) as record:
ifht(a, dln, mu, bias=bias)
assert record, 'ifht did not warn about a singular transform'
@pytest.mark.parametrize('n', [64, 63])
def test_fht_exact(n):
rng = np.random.RandomState(3491349965)
# for a(r) a power law r^\gamma, the fast Hankel transform produces the
# exact continuous Hankel transform if biased with q = \gamma
mu = rng.uniform(0, 3)
# convergence of HT: -1-mu < gamma < 1/2
gamma = rng.uniform(-1-mu, 1/2)
r = np.logspace(-2, 2, n)
a = r**gamma
dln = np.log(r[1]/r[0])
offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
A = fht(a, dln, mu, offset=offset, bias=gamma)
k = np.exp(offset)/r[::-1]
# analytical result
At = (2/k)**gamma * poch((mu+1-gamma)/2, gamma)
assert_allclose(A, At)
| ilayn/scipy | scipy/fft/tests/test_fftlog.py | Python | bsd-3-clause | 5,819 |
# Copyright (c) 2016 Dmitry Dolzhenko
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
"""Packages manager
"""
#-------------------------------------------------------------------------------
__author__ = "Dmitry Dolzhenko"
__email__ = "d.dolzhenko@gmail.com"
#-------------------------------------------------------------------------------
import os, uuid, time
from jacis import core, utils, sync
log = core.get_logger(__name__)
#-------------------------------------------------------------------------------
import yaml
def __include(loader, node):
filename = loader.construct_scalar(node) + '.yml'
with open(filename, 'r') as f:
return yaml.load(f)
yaml.add_constructor('!include', __include)
#-------------------------------------------------------------------------------
def get_scope(db, upper_scope_variables, reserved_words):
if db is None:
db = {}
scope_variables = {k:v for k,v in db.items() if k not in reserved_words }
local_variables = upper_scope_variables.copy()
local_variables.update(scope_variables)
return local_variables
def walk_packages(names, db, upper_scope_variables, reserved_words):
if db is None:
db = {}
package_scope = get_scope(db, upper_scope_variables, reserved_words)
mask = '{}.{}'
for key, pdb in db.get('packages', {}).items():
yield from walk_packages(names+[key], pdb, package_scope, reserved_words)
for ver, vdb in db.get('versions', {}).items():
version_scope = get_scope(vdb, package_scope, reserved_words)
name = '{}=={}'.format('.'.join(names), ver)
yield (name, Package(names, ver, vdb, version_scope))
class Package:
def __format_param(self, param):
if isinstance(param, list):
return [self.__format_param(x) for x in param]
elif isinstance(param, dict):
return { k : self.__format_param(v) for k, v in param.items()}
elif isinstance(param, str):
return param.format(**self._scope)
return param
def __init__(self, names, version, db, upper_scope_variables):
self._name = names
self._version = version
self._pid = '{}=={}'.format('.'.join(names), version)
self._scope = upper_scope_variables.copy()
self._scope['self'] = self
self._db = db
# format prams with dictionary recursively
self._scope = self.__format_param(self._scope)
@property
def version(self):
return self._version
@property
def version_underscored(self):
return self.version.replace('.', '_')
@property
def pid(self):
return self._pid
def __act(self, action, params, **kvargs):
params = list(map(lambda x: self._scope.get(x), params))
actions = {
'sync': sync.store
}
func = actions.get(action)
if not func:
raise Exception('unknown command: {}'.format(action))
func(*params, **kvargs)
def __execute(self, what, **kvargs):
script = self._scope['scripts'][what]
if isinstance(script, str):
script = [script]
for action_str in script:
action = action_str.split(' ')
self.__act(action[0], action[1:], **kvargs)
def store(self, path):
utils.mktree(path)
self.__execute('store', path=path)
return dict(name=self.pid, path=path, hash=utils.checksum(path))
class RepoPackageList:
def __init__(self, path):
base = 'list.yml'
self._reserved = set(['packages', 'versions'])
self._fullpath = os.path.join(path, base)
with utils.work_dir(path):
with open(base) as f:
db = yaml.load(f)
self._packages = dict(walk_packages([], db, {}, self._reserved))
def __contains__(self, pid):
return pid in self._packages
def __getitem__(self, pid):
return self._packages[pid]
def __str__(self):
return '\n'.join(self._packages.keys())
def names(self):
yield from self._packages.keys()
class LocalPackageList:
def __init__(self, path):
self._installed_dir = path
self._fullpath = os.path.join(path, 'list.yml')
self.reload()
def __file_modtime(self):
return time.ctime(os.path.getmtime(self._fullpath))
def __file_modified(self):
self._db_time < self.__file_modtime()
def reload(self):
with open(self._fullpath) as f:
db = yaml.load(f)
self._db = db if db else {}
self._db_time = self.__file_modtime()
def dump(self):
with open(self._fullpath, 'w') as f:
yaml.dump(self._db, f, indent=4)
self._db_time = self.__file_modtime()
def __contains__(self, pid):
return pid in self._db
def __getitem__(self, pid):
return self._db[pid]
def __str__(self):
return '\n'.join(self._db.keys())
def __store_dir(self, pid):
# store_dir = str(uuid.uuid5(uuid.NAMESPACE_DNS, package.pid))
return pid
def names(self):
yield from self._db.keys()
def install(self, package):
log.info('installing package: {} ...'.format(package.pid))
with utils.work_dir(self._installed_dir):
store_dir = self.__store_dir(package.pid)
info = package.store(store_dir)
log.debug('package {} stored in {}, result = {}'.format(package.pid, store_dir, info))
self._db[package.pid] = info
self.dump();
log.info('done.')
def remove(self, pid):
log.info('installing package: {} ...'.format(pid))
with utils.work_dir(self._installed_dir):
store_dir = self.__store_dir(pid)
log.debug('removing from list')
del self._db[pid]
self.dump();
log.debug('removing dir: {}'.format(os.path.abspath(store_dir)))
utils.rmdir(store_dir)
log.info('done.')
class TestPackage(utils.TestCase):
def test_1(self):
plist = RepoPackageList('d:\\src\\repos\\package_info\\')
print(plist)
| ddolzhenko/jacis | jacis/packages.py | Python | mit | 7,250 |
from traceback import print_exc
from django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
print_exc()
try:
b.start()
except:
print 'Failed to start master'
print_exc()
| centrumholdings/cthulhubot | cthulhubot/management/commands/restart_masters.py | Python | bsd-3-clause | 918 |
from corrobo import _meta
__version__ = _meta.version
__version_info__ = _meta.version_info
| johnwlockwood/corrobo | corrobo/__init__.py | Python | apache-2.0 | 94 |
# #
# Copyright 2013-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This describes the easyconfig parser
The parser is format version aware
:author: Stijn De Weirdt (Ghent University)
"""
import os
import re
from vsc.utils import fancylogger
from easybuild.framework.easyconfig.format.format import FORMAT_DEFAULT_VERSION
from easybuild.framework.easyconfig.format.format import get_format_version, get_format_version_classes
from easybuild.framework.easyconfig.format.yeb import FormatYeb, is_yeb_format
from easybuild.framework.easyconfig.types import PARAMETER_TYPES, check_type_of_param_value
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import read_file, write_file
# deprecated easyconfig parameters, and their replacements
DEPRECATED_PARAMETERS = {
# <old_param>: (<new_param>, <deprecation_version>),
}
# replaced easyconfig parameters, and their replacements
REPLACED_PARAMETERS = {
'license': 'license_file',
'makeopts': 'buildopts',
'premakeopts': 'prebuildopts',
}
_log = fancylogger.getLogger('easyconfig.parser', fname=False)
def fetch_parameters_from_easyconfig(rawtxt, params):
"""
Fetch (initial) parameter definition from the given easyconfig file contents.
:param rawtxt: contents of the easyconfig file
:param params: list of parameter names to fetch values for
"""
param_values = []
for param in params:
regex = re.compile(r"^\s*%s\s*(=|: )\s*(?P<param>\S.*?)\s*(#.*)?$" % param, re.M)
res = regex.search(rawtxt)
if res:
param_values.append(res.group('param').strip("'\""))
else:
param_values.append(None)
_log.debug("Obtained parameters value for %s: %s" % (params, param_values))
return param_values
class EasyConfigParser(object):
"""Read the easyconfig file, return a parsed config object
Can contain references to multiple version and toolchain/toolchain versions
"""
def __init__(self, filename=None, format_version=None, rawcontent=None,
auto_convert_value_types=True):
"""
Initialise the EasyConfigParser class
:param filename: path to easyconfig file to parse (superseded by rawcontent, if specified)
:param format_version: version of easyconfig file format, used to determine how to parse supplied easyconfig
:param rawcontent: raw content of easyconfig file to parse (preferred over easyconfig file supplied via filename)
:param auto_convert_value_types: indicates whether types of easyconfig values should be automatically converted
in case they are wrong
"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self.rawcontent = None # the actual unparsed content
self.auto_convert = auto_convert_value_types
self.get_fn = None # read method and args
self.set_fn = None # write method and args
self.format_version = format_version
self._formatter = None
if rawcontent is not None:
self.rawcontent = rawcontent
self._set_formatter(filename)
elif filename is not None:
self._check_filename(filename)
self.process()
else:
raise EasyBuildError("Neither filename nor rawcontent provided to EasyConfigParser")
self._formatter.extract_comments(self.rawcontent)
def process(self, filename=None):
"""Create an instance"""
self._read(filename=filename)
self._set_formatter(filename)
def check_values_types(self, cfg):
"""
Check types of easyconfig parameter values.
:param cfg: dictionary with easyconfig parameter values (result of get_config_dict())
"""
wrong_type_msgs = []
for key in cfg:
type_ok, newval = check_type_of_param_value(key, cfg[key], self.auto_convert)
if not type_ok:
wrong_type_msgs.append("value for '%s' should be of type '%s'" % (key, PARAMETER_TYPES[key].__name__))
elif newval != cfg[key]:
self.log.warning("Value for '%s' easyconfig parameter was converted from %s (type: %s) to %s (type: %s)",
key, cfg[key], type(cfg[key]), newval, type(newval))
cfg[key] = newval
if wrong_type_msgs:
raise EasyBuildError("Type checking of easyconfig parameter values failed: %s", ', '.join(wrong_type_msgs))
else:
self.log.info("Type checking of easyconfig parameter values passed!")
def _check_filename(self, fn):
"""Perform sanity check on the filename, and set mechanism to set the content of the file"""
if os.path.isfile(fn):
self.get_fn = (read_file, (fn,))
self.set_fn = (write_file, (fn, self.rawcontent))
self.log.debug("Process filename %s with get function %s, set function %s" % (fn, self.get_fn, self.set_fn))
if self.get_fn is None:
raise EasyBuildError('Failed to determine get function for filename %s', fn)
if self.set_fn is None:
raise EasyBuildError('Failed to determine set function for filename %s', fn)
def _read(self, filename=None):
"""Read the easyconfig, dump content in self.rawcontent"""
if filename is not None:
self._check_filename(filename)
try:
self.rawcontent = self.get_fn[0](*self.get_fn[1])
except IOError, err:
raise EasyBuildError('Failed to obtain content with %s: %s', self.get_fn, err)
if not isinstance(self.rawcontent, basestring):
msg = 'rawcontent is not basestring: type %s, content %s' % (type(self.rawcontent), self.rawcontent)
raise EasyBuildError("Unexpected result for raw content: %s", msg)
def _det_format_version(self):
"""Extract the format version from the raw content"""
if self.format_version is None:
self.format_version = get_format_version(self.rawcontent)
if self.format_version is None:
self.format_version = FORMAT_DEFAULT_VERSION
self.log.debug('No version found, using default %s' % self.format_version)
def _get_format_version_class(self):
"""Locate the class matching the version"""
if self.format_version is None:
self._det_format_version()
found_classes = get_format_version_classes(version=self.format_version)
if len(found_classes) == 1:
return found_classes[0]
elif not found_classes:
raise EasyBuildError('No format classes found matching version %s', self.format_version)
else:
raise EasyBuildError("More than one format class found matching version %s in %s",
self.format_version, found_classes)
def _set_formatter(self, filename):
"""Obtain instance of the formatter"""
if self._formatter is None:
if is_yeb_format(filename, self.rawcontent):
self._formatter = FormatYeb()
else:
klass = self._get_format_version_class()
self._formatter = klass()
self._formatter.parse(self.rawcontent)
def set_format_text(self):
"""Create the text for the formatter instance"""
# TODO create the data in self.rawcontent
raise NotImplementedError
def write(self, filename=None):
"""Write the easyconfig format instance, using content in self.rawcontent."""
if filename is not None:
self._check_filename(filename)
try:
self.set_fn[0](*self.set_fn[1])
except IOError, err:
raise EasyBuildError("Failed to process content with %s: %s", self.set_fn, err)
def set_specifications(self, specs):
"""Set specifications."""
self._formatter.set_specifications(specs)
def get_config_dict(self, validate=True):
"""Return parsed easyconfig as a dict."""
# allows to bypass the validation step, typically for testing
if validate:
self._formatter.validate()
cfg = self._formatter.get_config_dict()
self.check_values_types(cfg)
return cfg
def dump(self, ecfg, default_values, templ_const, templ_val):
"""Dump easyconfig in format it was parsed from."""
return self._formatter.dump(ecfg, default_values, templ_const, templ_val)
| bartoldeman/easybuild-framework | easybuild/framework/easyconfig/parser.py | Python | gpl-2.0 | 9,527 |
emojis = defaultdict(list)
for r in csv.reader(open('./emoji.tsv'), delimiter='\t'):
emojis[r[1]].append(r[0])
def emoji_syn(word):
if word in emojis:
return emojis[word]
else:
return []
def unicode_len(word):
word = cgi.unescape(word)
return len(unicodedata.normalize('NFC', word))
def wn_syns(word, depth):
lemmas = wn.lemmas(word)
lemnames = list(i.synset().lemmas()[0].name() for i in lemmas)
if depth == 0:
return lemnames
else:
return [j for s in lemnames for j in wn_syns(s, depth-1)]
def shortest_syn(word, depth=0, emojionly=False):
if emojionly: depth=max(depth, 3)
synnames = wn_syns(word, depth)
shorter = set(i for i in synnames if len(i)<len(word))
shorter = sorted(shorter, key=len)
shorter = shorter[:10]
es = set(i for word in synnames for i in emoji_syn(word)) # TODO Add emoji syn of synonyms
is_sw = word in stopwords
options = []
if shorter:
options.extend(shorter)
if es:
options.extend(es)
if is_sw:
options.append('�')
if emojionly:
options=list(es)
print word, '=>', options, (depth, emojionly)
if options:
return ' '.join(options)
else:
return None
| adamobeng/chirp | util.py | Python | mit | 1,272 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import validate_email_add
from frappe.model.document import Document
from email.utils import parseaddr
class EmailGroup(Document):
def onload(self):
singles = [d.name for d in frappe.db.get_all("DocType", "name", {"issingle": 1})]
self.get("__onload").import_types = [{"value": d.parent, "label": "{0} ({1})".format(d.parent, d.label)} \
for d in frappe.db.get_all("DocField", ("parent", "label"), {"options": "Email"})
if d.parent not in singles]
def import_from(self, doctype):
"""Extract email ids from given doctype and add them to the current list"""
meta = frappe.get_meta(doctype)
email_field = [d.fieldname for d in meta.fields
if d.fieldtype in ("Data", "Small Text", "Text", "Code") and d.options=="Email"][0]
unsubscribed_field = "unsubscribed" if meta.get_field("unsubscribed") else None
added = 0
for user in frappe.db.get_all(doctype, [email_field, unsubscribed_field or "name"]):
try:
email = parseaddr(user.get(email_field))[1]
if email:
frappe.get_doc({
"doctype": "Email Group Member",
"email_group": self.name,
"email": email,
"unsubscribed": user.get(unsubscribed_field) if unsubscribed_field else 0
}).insert(ignore_permissions=True)
added += 1
except frappe.UniqueValidationError:
pass
frappe.msgprint(_("{0} subscribers added").format(added))
return self.update_total_subscribers()
def update_total_subscribers(self):
self.total_subscribers = self.get_total_subscribers()
self.db_update()
return self.total_subscribers
def get_total_subscribers(self):
return frappe.db.sql("""select count(*) from `tabEmail Group Member`
where email_group=%s""", self.name)[0][0]
def on_trash(self):
for d in frappe.get_all("Email Group Member", "name", {"email_group": self.name}):
frappe.delete_doc("Email Group Member", d.name)
@frappe.whitelist()
def import_from(name, doctype):
nlist = frappe.get_doc("Email Group", name)
if nlist.has_permission("write"):
return nlist.import_from(doctype)
@frappe.whitelist()
def add_subscribers(name, email_list):
if not isinstance(email_list, (list, tuple)):
email_list = email_list.replace(",", "\n").split("\n")
count = 0
for email in email_list:
email = email.strip()
valid = validate_email_add(email, False)
if valid:
if not frappe.db.get_value("Email Group Member",
{"email_group": name, "email": email}):
frappe.get_doc({
"doctype": "Email Group Member",
"email_group": name,
"email": email
}).insert(ignore_permissions = frappe.flags.ignore_permissions)
count += 1
else:
pass
else:
frappe.msgprint(_("{0} is not a valid email id").format(email))
frappe.msgprint(_("{0} subscribers added").format(count))
return frappe.get_doc("Email Group", name).update_total_subscribers()
def restrict_email_group(doc, method):
from frappe.limits import get_limits
email_group_limit = get_limits().get('email_group')
if not email_group_limit:
return
email_group = frappe.get_doc("Email Group", doc.email_group)
if email_group.get_total_subscribers() >= email_group_limit:
frappe.throw(_("Please Upgrade to add more than {0} subscribers").format(email_group_limit))
| indautgrp/frappe | frappe/email/doctype/email_group/email_group.py | Python | mit | 3,434 |
import pandas as pd
import numpy as np
import scipy.stats as stats
from sklearn import preprocessing
def main():
dat=pd.read_table('data/train_v2.csv',sep=',')
print "reading done, train"
loss=np.asarray(dat.loss)
dat=dat.drop(['loss','id'],1)
dat['new1']=dat['f528']-dat['f527'] #golden feature 1
dat['new2']=dat['f528']-dat['f274'] #golden feature 2
dat=np.asarray(dat.values, dtype=float)
col_med = stats.nanmedian(dat,axis=0)
print "calculated medians, train"
inds = np.where(np.isnan(dat))
dat[inds]=np.take(col_med,inds[1])
print "median imputation done, train"
scaler=preprocessing.Scaler().fit(dat)
dat=scaler.transform(dat)
print "scaling done, train"
labels=(loss>0).astype(int)
np.save('data/x_train.npy',dat)
np.save('data/y_train.npy',labels)
np.save('data/loss.npy',loss)
print "trainset done"
dat=pd.read_table('data/test_v2.csv',sep=',')
print "reading done, test"
ids=np.asarray(dat.id)
dat=dat.drop(['id'],1)
dat['new1']=dat['f528']-dat['f527'] #golden feature 1
dat['new2']=dat['f528']-dat['f274'] #golden feature 2
dat=np.asarray(dat.values,dtype=float)
col_med=stats.nanmedian(dat,axis=0)
print "calculated medians, test"
inds=np.where(np.isnan(dat))
dat[inds]=np.take(col_med,inds[1])
print "imputation done, test"
dat=scaler.transform(dat)
print "scaling done, test"
np.save('data/x_test.npy',dat)
np.save('data/ids.npy',ids)
print "testset done"
if __name__=="__main__":
main() | wallinm1/kaggle-loan-default | read.py | Python | mit | 1,565 |
"""Tests for functions provided by tools.py"""
from contextlib import contextmanager
import time
import tools
import models
from google.appengine.ext import ndb
from google.appengine.ext import testbed
@contextmanager
def ndb_context():
tb_obj = testbed.Testbed()
tb_obj.activate()
tb_obj.init_datastore_v3_stub()
tb_obj.init_memcache_stub()
# clear cache
ndb.get_context().clear_cache()
# yield the testbed
yield tb_obj
# cleanup
tb_obj.deactivate()
def get_msg():
"""Creates a false message for testing.
Args:
none
Returns:
dict - contains all the fields necessary for store_msg
"""
return {
'username': 'test_man',
'text': 'a test message',
'timestamp': '{}'.format(time.time())
}
def test_store_msg():
"""Ensures we can store a message"""
with ndb_context():
tools.store_msg(get_msg())
assert len(models.Message.query().fetch(10)) == 1
| PFCM/slack-ml | data/test_tools.py | Python | mit | 974 |
import os
def computador_escolhe_jogada(n,m):
print('computador_escolhe_jogada\n')
jogada_computador = 1
while(jogada_computador < m):
variavel_temp = (n % (m + 1))
multiplos = checa_multiplos(n,m)
#if(jogada_computador == (n % (m + 1) == 0)):
if (multiplos):
print('O Computador tirou ', jogada_computador, ' peça\n')
return jogada_computador
jogada_computador = jogada_computador + 1
if(n-m <=0):
print('O Computador tirou 1 peça\n')
else:
print('O Computador tirou ', jogada_computador, ' peça\n')
return m
def usuario_escolhe_jogada(n,m):
print('usuario_escolhe_jogada')
retirar_pecas = False
while(retirar_pecas == False):
num_pecas = int(input('\nQuantas peças você vai tirar?: '))
if (num_pecas > m):
print('\nOops! Jogada inválida! Tente de novo.', 'Maximo por jogada: ', m,'\n')
else:
'''
VERIFICAR PECAS RETIRADAS
'''
retirar_pecas = True
if (n - m <= 0):
print('Você tirou 1 peça\n')
else:
print('Você tirou ', num_pecas, ' peça\n')
print('Você tirou ',num_pecas,' peça\n')
return num_pecas
def checa_multiplos(n,m):
if (n % (m + 1) == 0):
return True
else:
return False
def partida():
print('\n*** Voce escolheu Uma Partida Isolada ***\n')
#Config
qtd_pecas_max = int(input('Quantas peças?: '))
limite_pecas = int(input('\nLimite de peças por jogada?: '))
game_ativo = True
usuario_joga = True
qtd_pecas_atual = qtd_pecas_max
qtd_pecas = 0
multiplo = checa_multiplos(qtd_pecas_max, limite_pecas)
if (multiplo):
usuariojoga = False
else:
usuario_joga = True
while(game_ativo):
if(usuario_joga):
qtd_pecas = usuario_escolhe_jogada(qtd_pecas_atual, limite_pecas)
usuario_joga = False
else:
qtd_pecas = computador_escolhe_jogada(qtd_pecas_atual, limite_pecas)
usuario_joga = True
qtd_pecas_atual = qtd_pecas_atual - qtd_pecas
if (qtd_pecas_atual > 0):
print('Agora resta apenas ',qtd_pecas_atual ,' peça(s) no tabuleiro.\n')
else:
print('Fim do jogo!\n')
if(usuario_joga):
print('\nO Computador Ganhou\n')
else:
print('\nVoce Ganhou!!!\n')
game_ativo = False
def campeonato():
print('\n*** Voce escolheu Campeonato ***\n')
print('**** Rodada 1 ****')
def main():
cls = lambda: os.system('cls')
cls()
print('\n'*5,'Bem vindo ao Game NIM! Escolha:\n \n'
'1 - Para Jogar Uma Partida Isolada \n'
'2 - Para Jogar Um Campeonato\n')
escolha = int(input('Sua escolha: '))
if(escolha == 1):
partida()
elif(escolha == 2):
campeonato()
else:
print('Entre com uma opcao valida: 1 ou 2')
def test_partida():
assert computador_escolhe_jogada(3, 1) == 1
assert computador_escolhe_jogada(14, 4) == 4
assert computador_escolhe_jogada(13, 4) == 3
assert computador_escolhe_jogada(11, 4) == 1
assert computador_escolhe_jogada(3, 5) == 3
assert computador_escolhe_jogada(1, 1) == 1
assert computador_escolhe_jogada(2, 2) == 2
#TEST_GAME
#test_partida()
#START_GAME
main()
| RodFernandes/Python_USP_Curso_Ciencias_da_Computacao_1 | jogo_nim_c.py | Python | apache-2.0 | 3,464 |
''' dfsr_query.py '''
from collections import defaultdict
import uuid
from wmi_client import WmiClient, ArgumentError
import dfsr_settings
def safe_guid(function):
''' A decorator to validate guids. '''
def _function(*args, **kwargs):
''' The internal function for the decorator '''
try:
uuid.UUID(args[1])
return function(*args, **kwargs)
except:
raise ArgumentError('Invalid GUID', str(args[1]))
return _function
class DfsrQuery():
'''
Sets up the WMI connection through WmiClient and then handles
parameterized WQL queries through that connection.
DfsrQuery currently allows exceptions raised by WmiClient to
bubble up to the caller. There isn't much we can do to recover
from them, because they're going to be caused by problems with
the server, network connectivity, or improper configuration.
So we're going to want the caller to define how the errors get
display to the user via the web service or CLI.
'''
def __init__(self, server):
self.server = server
self.wmi = WmiClient(
name_space=dfsr_settings.DFSR_NAME_SPACE,
property_enums=dfsr_settings.DFSR_PROPERTY_ENUMS)
def get_dfsr_state(self, server_name=None):
'''
Returns the DfsrInfo.State property.
'''
if server_name:
self.server = server_name
wql = 'SELECT State FROM DfsrInfo'
query_results = self.wmi.make_query(self.server, wql)
if len(query_results) > 0:
return query_results[0].State
else:
#Dfsr won't return an error when offline.
return 'Service offline'
def get_replication_status_counts(self, server_name=None):
'''
Returns a dict with replication states as keys for lists of
ReplicationGroupNames with that status. I'm not using a Counter
object here to allow the caller to easily get the list of replication
groups with that status, not just the count.
'''
if server_name:
self.server = server_name
wql = ('SELECT State, ReplicationGroupName FROM ' +
'DfsrReplicatedFolderInfo')
results = self.wmi.make_query(self.server, wql)
states = set([x.State for x in results])
counts = defaultdict(list)
for state in states:
counts[state] = sorted(
[rep.ReplicationGroupName for
rep in results if (rep.State == state)])
return counts
def get_connector_status_counts(self, server_name=None):
'''
Returns a dict with connector status as keys for lists of
Connectors with that status.
'''
wql = ('SELECT State, MemberName, PartnerName, ' +
'Inbound, ReplicationGroupName FROM DfsrConnectionInfo')
if server_name:
self.server = server_name
results = self.wmi.make_query(self.server, wql)
if len(results) > 0:
states = set([x.State for x in results])
counts = defaultdict(list)
for state in states:
counts[state] = sorted([
[conn.ReplicationGroupName, conn.MemberName,
conn.PartnerName, conn.Inbound]
for conn in results if (conn.State == state)])
return counts
else:
return []
def get_all_replication_groups(self, server_name=None):
'''
Returns ReplicationGroupName, ReplicationGroupGuid, and DefaultSchedule
of all replication groups in named tuples.
'''
if server_name:
self.server = server_name
wql = ('SELECT ReplicationGroupName, ReplicationGroupGuid ' +
'FROM DfsrReplicationGroupConfig')
results = self.wmi.make_query(self.server, wql)
if len(results) > 0:
return results
else:
return []
@safe_guid
def get_replication_folder_details(self, guid, server_name=None):
'''
Returns the full details about a DfsrReplicatedFolder.
Requires the GUID for the Replication Folder.
'''
if server_name:
self.server = server_name
wql = ('SELECT * FROM DfsrReplicatedFolderInfo WHERE ' +
'ReplicationGroupGuid = "%s"') % guid
folders = self.wmi.make_query(self.server, wql)
if len(folders) > 0:
return folders
else:
return []
@safe_guid
def get_connectors(self, guid, server_name=None):
'''
Returns a list of connectors for a DfsrReplicatedFolder with
all details
'''
if server_name:
self.server = server_name
wql = ('SELECT * FROM DfsrConnectionInfo WHERE ' +
'ReplicationGroupGuid = "%s"') % guid
return self.wmi.make_query(self.server, wql)
@safe_guid
def get_sync_details(self, guid, server_name=None):
'''
Returns the the full details about a connector's sync.
Requires the GUID of the DFSR Connector.
'''
if server_name:
self.server = server_name
wql = ('SELECT ConnectionGuid, InitiationReason, StartTime, ' +
'EndTime, UpdatesTransferred, BytesTransferred, ' +
'UpdatesNotTransferred, UpdatesToBeTransferred, '+
'ConflictsGenerated, TombstonesGenerated, ' +
'ForceReplicationBandwidthLevel, LastErrorCode ' +
'FROM DfsrSyncInfo WHERE ConnectionGuid = "{0}"').format(guid)
return self.wmi.make_query(self.server, wql)
@safe_guid
def get_update_details(self, guid, server_name=None):
'''
Returns the the full details about a connector's update.
Requires the GUID of the DFSR Connector.
'''
if server_name:
self.server = server_name
wql = ('SELECT * FROM DfsrIdUpdateInfo WHERE ' +
'ConnectionGuid = "%s"') % guid
return self.wmi.make_query(self.server, wql)
| tgross/gilamon | gilamon/dfsr_query.py | Python | bsd-3-clause | 6,331 |
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
import test
from taskcoachlib import gui, config
class PrinterTest(test.TestCase):
def setUp(self):
super(PrinterTest, self).setUp()
self.settings = config.Settings(load=False)
self.margins = dict(top=1, left=2, bottom=3, right=4)
self.printerSettings = gui.printer.PrinterSettings(self.settings)
self.pageSetupData = wx.PageSetupDialogData()
def tearDown(self):
super(PrinterTest, self).tearDown()
self.resetPrinterSettings()
def resetPrinterSettings(self):
gui.printer.PrinterSettings.deleteInstance() # pylint: disable-msg=E1101
def testInitialSettings(self):
printerSettings = self.printerSettings
self.assertEqual(wx.Point(0, 0), printerSettings.GetMarginTopLeft())
self.assertEqual(0, printerSettings.GetPaperId())
self.assertEqual(wx.PORTRAIT, printerSettings.GetOrientation())
def testSetMargin(self):
self.pageSetupData.SetMarginTopLeft(wx.Point(10, 1))
self.printerSettings.updatePageSetupData(self.pageSetupData)
self.assertEqual(wx.Point(10, 1),
self.printerSettings.GetMarginTopLeft())
def testDefaultMarginsFromSettings(self):
settings = self.settings
for margin in self.margins:
self.assertEqual(0, settings.getint('printer', 'margin_'+margin))
def testSetPaperId(self):
self.pageSetupData.SetPaperId(1)
self.printerSettings.updatePageSetupData(self.pageSetupData)
self.assertEqual(1, self.printerSettings.GetPaperId())
def testDefaultPaperIdFromSettings(self):
self.assertEqual(0, self.settings.getint('printer', 'paper_id'))
def testSetOrientation(self):
self.pageSetupData.GetPrintData().SetOrientation(wx.LANDSCAPE)
self.printerSettings.updatePageSetupData(self.pageSetupData)
self.assertEqual(wx.LANDSCAPE, self.printerSettings.GetOrientation())
def testDefaultOrientationFromSettings(self):
self.assertEqual(wx.PORTRAIT,
self.settings.getint('printer', 'orientation'))
def testUpdateMarginsInPageSetupDataUpdatesSettings(self):
self.pageSetupData.SetMarginTopLeft(wx.Point(self.margins['left'],
self.margins['top']))
self.pageSetupData.SetMarginBottomRight(wx.Point(self.margins['right'],
self.margins['bottom']))
self.printerSettings.updatePageSetupData(self.pageSetupData)
for margin in self.margins:
self.assertEqual(self.margins[margin],
self.settings.getint('printer', 'margin_'+margin))
def testUpdatePaperIdInPageSetupDataUpdatesSettings(self):
self.pageSetupData.SetPaperId(1)
self.printerSettings.updatePageSetupData(self.pageSetupData)
self.assertEqual(1, self.settings.getint('printer', 'paper_id'))
def testUpdateOrientationInPageSetupDataUpdatesSettings(self):
self.pageSetupData.GetPrintData().SetOrientation(wx.LANDSCAPE)
self.printerSettings.updatePageSetupData(self.pageSetupData)
self.assertEqual(wx.LANDSCAPE,
self.settings.getint('printer', 'orientation'))
def testMarginsInPageSetupDataAreUpdatedFromSettings(self):
self.resetPrinterSettings()
for margin in self.margins:
self.settings.set('printer', 'margin_'+margin,
str(self.margins[margin]))
printerSettings = gui.printer.PrinterSettings(self.settings)
self.assertEqual(wx.Point(2, 1), printerSettings.GetMarginTopLeft())
self.assertEqual(wx.Point(4, 3), printerSettings.GetMarginBottomRight())
def testPaperIdInPageSetupDataIsUpdatedFromSettings(self):
self.resetPrinterSettings()
self.settings.set('printer', 'paper_id', '1')
printerSettings = gui.printer.PrinterSettings(self.settings)
self.assertEqual(1, printerSettings.GetPaperId())
def testOrientationInPageSetupDataIsUpdatedFromSettings(self):
self.resetPrinterSettings()
self.settings.set('printer', 'orientation', str(wx.LANDSCAPE))
printerSettings = gui.printer.PrinterSettings(self.settings)
self.assertEqual(wx.LANDSCAPE, printerSettings.GetOrientation())
class HTMLPrintoutTest(test.TestCase):
def testCreate(self):
gui.printer.HTMLPrintout('<html></html>', config.Settings(load=False))
| wdmchaft/taskcoach | tests/unittests/guiTests/PrinterTest.py | Python | gpl-3.0 | 5,279 |
# -*- coding: iso-8859-1 -*-
# Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE)
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Selina Dennis <selina@tranzfusion.net>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
English Prose (YCOE), a 1.5 million word syntactically-annotated
corpus of Old English prose texts. The corpus is distributed by the
Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included
with NLTK.
The YCOE corpus is divided into 100 files, each representing
an Old English prose text. Tags used within each text complies
to the YCOE standard: http://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm
"""
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.tokenize import RegexpTokenizer
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.corpus.reader.tagged import TaggedCorpusReader
from string import split
import os, re
from nltk.internals import deprecated
class YCOECorpusReader(CorpusReader):
"""
Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
English Prose (YCOE), a 1.5 million word syntactically-annotated
corpus of Old English prose texts.
"""
def __init__(self, root):
self._psd_reader = YCOEParseCorpusReader(
os.path.join(root, 'psd'), '.*', '.psd')
self._pos_reader = YCOETaggedCorpusReader(
os.path.join(root, 'pos'), '.*', '.pos')
# Make sure we have a consistent set of items:
documents = set(f[:-4] for f in self._psd_reader.files())
if set(f[:-4] for f in self._pos_reader.files()) != documents:
raise ValueError('Items in "psd" and "pos" '
'subdirectories do not match.')
files = sorted(['%s.psd' % doc for doc in documents] +
['%s.pos' % doc for doc in documents])
CorpusReader.__init__(self, root, files)
self._documents = tuple(sorted(documents))
def documents(self, files=None):
"""
Return a list of document identifiers for all documents in
this corpus, or for the documents with the given file(s) if
specified.
"""
if files is None:
return self._documents
if isinstance(files, basestring):
files = [files]
for f in files:
if f not in self._files:
raise KeyError('File id %s not found' % files)
# Strip off the '.pos' and '.psd' extensions.
return sorted(set(f[:-4] for f in files))
def files(self, documents=None):
"""
Return a list of file identifiers for the files that make up
this corpus, or that store the given document(s) if specified.
"""
if documents is None:
return self._files
elif isinstance(documents, basestring):
documents = [documents]
return sorted(set(['%s.pos' % doc for doc in documents] +
['%s.psd' % doc for doc in documents]))
def _getfiles(self, documents, subcorpus):
"""
Helper that selects the appropraite files for a given set of
documents from a given subcorpus (pos or psd).
"""
if documents is None:
documents = self._documents
else:
if isinstance(documents, basestring):
documents = [documents]
for document in documents:
if document not in self._documents:
if document[-4:] in ('.pos', '.psd'):
raise ValueError(
'Expected a document identifier, not a file '
'identifier. (Use corpus.documents() to get '
'a list of document identifiers.')
else:
raise ValueError('Document identifier %s not found'
% document)
return ['%s.%s' % (d, subcorpus) for d in documents]
# Delegate to one of our two sub-readers:
def words(self, documents=None):
return self._pos_reader.words(self._getfiles(documents, 'pos'))
def sents(self, documents=None):
return self._pos_reader.sents(self._getfiles(documents, 'pos'))
def paras(self, documents=None):
return self._pos_reader.paras(self._getfiles(documents, 'pos'))
def tagged_words(self, documents=None):
return self._pos_reader.tagged_words(self._getfiles(documents, 'pos'))
def tagged_sents(self, documents=None):
return self._pos_reader.tagged_sents(self._getfiles(documents, 'pos'))
def tagged_paras(self, documents=None):
return self._pos_reader.tagged_paras(self._getfiles(documents, 'pos'))
def parsed_sents(self, documents=None):
return self._psd_reader.parsed_sents(self._getfiles(documents, 'psd'))
#{ Deprecated since 0.8
@deprecated("Use .raw() or .words() or .tagged_words() or "
".parsed_sents() instead.")
def read(self, items=None, format='parsed'):
if format == 'parsed': return self.parsed_sents(items)
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.words(items)
if format == 'tagged': return self.tagged_words(items)
if format == 'chunked': raise ValueError('no longer supported')
raise ValueError('bad format %r' % format)
@deprecated("Use .parsed_sents() instead.")
def parsed(self, items=None):
return self.parsed_sents(items)
@deprecated("Use .words() instead.")
def tokenized(self, items=None):
return self.words(items)
@deprecated("Use .tagged_words() instead.")
def tagged(self, items=None):
return self.tagged_words(items)
@deprecated("Operation no longer supported.")
def chunked(self, items=None):
raise ValueError('format "chunked" no longer supported')
#}
class YCOEParseCorpusReader(BracketParseCorpusReader):
"""Specialized version of the standard bracket parse corpus reader
that strips out (CODE ...) and (ID ...) nodes."""
def _parse(self, t):
t = re.sub(r'(?u)\((CODE|ID)[^\)]*\)', '', t)
if re.match(r'\s*\(\s*\)\s*$', t): return None
return BracketParseCorpusReader._parse(self, t)
class YCOETaggedCorpusReader(TaggedCorpusReader):
def __init__(self, root, items):
gaps_re = r'(?u)\(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*'
sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True)
TaggedCorpusReader.__init__(self, root, items, sep='_',
sent_tokenizer=sent_tokenizer)
#: A list of all documents and their titles in ycoe.
documents = {
'coadrian.o34': 'Adrian and Ritheus',
'coaelhom.o3': 'Ælfric, Supplemental Homilies',
'coaelive.o3': 'Ælfric\'s Lives of Saints',
'coalcuin': 'Alcuin De virtutibus et vitiis',
'coalex.o23': 'Alexander\'s Letter to Aristotle',
'coapollo.o3': 'Apollonius of Tyre',
'coaugust': 'Augustine',
'cobede.o2': 'Bede\'s History of the English Church',
'cobenrul.o3': 'Benedictine Rule',
'coblick.o23': 'Blickling Homilies',
'coboeth.o2': 'Boethius\' Consolation of Philosophy',
'cobyrhtf.o3': 'Byrhtferth\'s Manual',
'cocanedgD': 'Canons of Edgar (D)',
'cocanedgX': 'Canons of Edgar (X)',
'cocathom1.o3': 'Ælfric\'s Catholic Homilies I',
'cocathom2.o3': 'Ælfric\'s Catholic Homilies II',
'cochad.o24': 'Saint Chad',
'cochdrul': 'Chrodegang of Metz, Rule',
'cochristoph': 'Saint Christopher',
'cochronA.o23': 'Anglo-Saxon Chronicle A',
'cochronC': 'Anglo-Saxon Chronicle C',
'cochronD': 'Anglo-Saxon Chronicle D',
'cochronE.o34': 'Anglo-Saxon Chronicle E',
'cocura.o2': 'Cura Pastoralis',
'cocuraC': 'Cura Pastoralis (Cotton)',
'codicts.o34': 'Dicts of Cato',
'codocu1.o1': 'Documents 1 (O1)',
'codocu2.o12': 'Documents 2 (O1/O2)',
'codocu2.o2': 'Documents 2 (O2)',
'codocu3.o23': 'Documents 3 (O2/O3)',
'codocu3.o3': 'Documents 3 (O3)',
'codocu4.o24': 'Documents 4 (O2/O4)',
'coeluc1': 'Honorius of Autun, Elucidarium 1',
'coeluc2': 'Honorius of Autun, Elucidarium 1',
'coepigen.o3': 'Ælfric\'s Epilogue to Genesis',
'coeuphr': 'Saint Euphrosyne',
'coeust': 'Saint Eustace and his companions',
'coexodusP': 'Exodus (P)',
'cogenesiC': 'Genesis (C)',
'cogregdC.o24': 'Gregory\'s Dialogues (C)',
'cogregdH.o23': 'Gregory\'s Dialogues (H)',
'coherbar': 'Pseudo-Apuleius, Herbarium',
'coinspolD.o34': 'Wulfstan\'s Institute of Polity (D)',
'coinspolX': 'Wulfstan\'s Institute of Polity (X)',
'cojames': 'Saint James',
'colacnu.o23': 'Lacnunga',
'colaece.o2': 'Leechdoms',
'colaw1cn.o3': 'Laws, Cnut I',
'colaw2cn.o3': 'Laws, Cnut II',
'colaw5atr.o3': 'Laws, Æthelred V',
'colaw6atr.o3': 'Laws, Æthelred VI',
'colawaf.o2': 'Laws, Alfred',
'colawafint.o2': 'Alfred\'s Introduction to Laws',
'colawger.o34': 'Laws, Gerefa',
'colawine.ox2': 'Laws, Ine',
'colawnorthu.o3': 'Northumbra Preosta Lagu',
'colawwllad.o4': 'Laws, William I, Lad',
'coleofri.o4': 'Leofric',
'colsigef.o3': 'Ælfric\'s Letter to Sigefyrth',
'colsigewB': 'Ælfric\'s Letter to Sigeweard (B)',
'colsigewZ.o34': 'Ælfric\'s Letter to Sigeweard (Z)',
'colwgeat': 'Ælfric\'s Letter to Wulfgeat',
'colwsigeT': 'Ælfric\'s Letter to Wulfsige (T)',
'colwsigeXa.o34': 'Ælfric\'s Letter to Wulfsige (Xa)',
'colwstan1.o3': 'Ælfric\'s Letter to Wulfstan I',
'colwstan2.o3': 'Ælfric\'s Letter to Wulfstan II',
'comargaC.o34': 'Saint Margaret (C)',
'comargaT': 'Saint Margaret (T)',
'comart1': 'Martyrology, I',
'comart2': 'Martyrology, II',
'comart3.o23': 'Martyrology, III',
'comarvel.o23': 'Marvels of the East',
'comary': 'Mary of Egypt',
'coneot': 'Saint Neot',
'conicodA': 'Gospel of Nicodemus (A)',
'conicodC': 'Gospel of Nicodemus (C)',
'conicodD': 'Gospel of Nicodemus (D)',
'conicodE': 'Gospel of Nicodemus (E)',
'coorosiu.o2': 'Orosius',
'cootest.o3': 'Heptateuch',
'coprefcath1.o3': 'Ælfric\'s Preface to Catholic Homilies I',
'coprefcath2.o3': 'Ælfric\'s Preface to Catholic Homilies II',
'coprefcura.o2': 'Preface to the Cura Pastoralis',
'coprefgen.o3': 'Ælfric\'s Preface to Genesis',
'copreflives.o3': 'Ælfric\'s Preface to Lives of Saints',
'coprefsolilo': 'Preface to Augustine\'s Soliloquies',
'coquadru.o23': 'Pseudo-Apuleius, Medicina de quadrupedibus',
'corood': 'History of the Holy Rood-Tree',
'cosevensl': 'Seven Sleepers',
'cosolilo': 'St. Augustine\'s Soliloquies',
'cosolsat1.o4': 'Solomon and Saturn I',
'cosolsat2': 'Solomon and Saturn II',
'cotempo.o3': 'Ælfric\'s De Temporibus Anni',
'coverhom': 'Vercelli Homilies',
'coverhomE': 'Vercelli Homilies (E)',
'coverhomL': 'Vercelli Homilies (L)',
'covinceB': 'Saint Vincent (Bodley 343)',
'covinsal': 'Vindicta Salvatoris',
'cowsgosp.o3': 'West-Saxon Gospels',
'cowulf.o34': 'Wulfstan\'s Homilies'
}
| hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/corpus/reader/ycoe.py | Python | apache-2.0 | 11,296 |
# This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import test.support
import os
import os.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import pdb
import unittest
from test.script_helper import (spawn_python, kill_python, assert_python_ok,
temp_dir, make_script, make_zip_script)
verbose = test.support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related tests
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import test_doctest, sample_doctest
def _run_object_doctest(obj, module):
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__name__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.support.TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print ('doctest (%s) ... %d tests with zero failures' % (module.__name__, t))
return f, t
class ZipSupportTests(unittest.TestCase):
# This used to use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test. However, that restores
# *too much* information and breaks for the invocation of
# of test_doctest. So we do our own thing and leave
# sys.modules alone.
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with temp_dir() as d:
init_name = make_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = make_zip_script(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
try:
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
finally:
del sys.modules["zip_pkg"]
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
sample_src = inspect.getsource(sample_doctest)
sample_src = sample_src.replace("test.test_doctest",
"test_zipped_doctest")
with temp_dir() as d:
script_name = make_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = make_zip_script(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
z.writestr("sample_zipped_doctest.py", sample_src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
print ('Contents of %r:' % zip_name)
zip_file.printdir()
zip_file.close()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
try:
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
for obj in known_good_tests:
_run_object_doctest(obj, test_zipped_doctest)
finally:
del sys.modules["test_zipped_doctest"]
def test_doctest_main_issue4197(self):
test_src = textwrap.dedent("""\
class Test:
">>> 'line 2'"
pass
import doctest
doctest.testmod()
""")
pattern = 'File "%s", line 2, in %s'
with temp_dir() as d:
script_name = make_script(d, 'script', test_src)
rc, out, err = assert_python_ok(script_name)
expected = pattern % (script_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
rc, out, err = assert_python_ok(zip_name)
expected = pattern % (run_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
def test_pdb_issue4201(self):
test_src = textwrap.dedent("""\
def f():
pass
import pdb
pdb.Pdb(nosigint=True).runcall(f)
""")
with temp_dir() as d:
script_name = make_script(d, 'script', test_src)
p = spawn_python(script_name)
p.stdin.write(b'l\n')
data = kill_python(p)
self.assertIn(script_name.encode('utf-8'), data)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
p = spawn_python(zip_name)
p.stdin.write(b'l\n')
data = kill_python(p)
self.assertIn(run_name.encode('utf-8'), data)
def test_main():
test.support.run_unittest(ZipSupportTests)
test.support.reap_children()
if __name__ == '__main__':
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_zipimport_support.py | Python | mit | 9,949 |
from magetool.libraries.globalclass import GlobalClass
class Helper(GlobalClass):
"""Class which represents Mage helpers, i.e., PHP classes which go in
a module's Helper/ directory.
"""
@staticmethod
def help():
print """Usage: magetool [OPTION]... (create|register) helper [NAME]
Options:
-s, --superclass=SUPERCLASS Make the helper extend SUPERCLASS.
Default: Mage_Core_Helper_Abstract.
-o, --override Tell Mage that the helper overrides
its superclass (use in conjunction
with --superclass=SUPERCLASS.)
Examples:
magetool create helper Data
Define a PHP class in Helper/Data.php and update the module's
configuration accordingly.
magetool -s Mage_Checkout_Helper_Data create helper Data
Define a PHP class in Helper/Data.php which extends the class
Mage_Checkout_Helper_Data and update the module's configuration
file accordingly.
magetool -os Mage_Checkout_Helper_Data create helper Data
Define a PHP class in Helper/Data.php which extends and overrides
Mage_Checkout_Helper_Data.
magetool register helper
Update the module's configuration file to tell Mage that the module
has one or more helper classes."""
| jhckragh/magetool | magetool/commands/helper.py | Python | bsd-2-clause | 1,341 |
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^rosetta/', include('rosetta.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^dbmail/', include('dbmail.urls')),
url('^browser_notification/$', "demo.views.browser_notification"),
url(r'^ckeditor/', include('ckeditor.urls')),
) + staticfiles_urlpatterns()
'''
from django import VERSION
if VERSION < (1, 7):
urlpatterns += patterns(
'', url(r'^tinymce/', include('tinymce.urls')),
)
'''
| ilstreltsov/django-db-mailer | demo/demo/urls.py | Python | gpl-2.0 | 702 |
# List all ant build.xml files in the sandbox that should be called by the
# build system.
import os
import sandbox
import ioutil
import sys
if __name__ == '__main__':
if 'True' in sys.argv:
quick = True
else:
quick = False
x = []
try:
sb = sandbox.current
deps = [l.name for l in sb.get_cached_components()]
x = []
for c in deps:
if 'code' in sb.get_component_aspects(c):
path = sb.get_code_root() + c + '/build.xml'
else:
path = sb.get_built_root() + c + '/build.xml'
if os.path.isfile(path):
if quick and path.find('built.') > -1:
continue
x.append(path)
except:
pass
print(','.join(x))
| perfectsearch/sandman | code/buildscripts/list_ant_builds.py | Python | mit | 792 |
from Child import Child
from Node import Node # noqa: I201
AVAILABILITY_NODES = [
# availability-spec-list -> availability-entry availability-spec-list?
Node('AvailabilitySpecList', kind='SyntaxCollection',
element='AvailabilityArgument'),
# Wrapper for all the different entries that may occur inside @available
# availability-entry -> '*' ','?
# | identifier ','?
# | availability-version-restriction ','?
# | availability-versioned-argument ','?
Node('AvailabilityArgument', kind='Syntax',
description='''
A single argument to an `@available` argument like `*`, `iOS 10.1`, \
or `message: "This has been deprecated"`.
''',
children=[
Child('Entry', kind='Syntax',
description='The actual argument',
node_choices=[
Child('Star', kind='SpacedBinaryOperatorToken',
text_choices=['*']),
Child('IdentifierRestriction',
kind='IdentifierToken'),
Child('AvailabilityVersionRestriction',
kind='AvailabilityVersionRestriction'),
Child('AvailabilityLabeledArgument',
kind='AvailabilityLabeledArgument'),
]),
Child('TrailingComma', kind='CommaToken', is_optional=True,
description='''
A trailing comma if the argument is followed by another \
argument
'''),
]),
# Representation of 'deprecated: 2.3', 'message: "Hello world"' etc.
# availability-versioned-argument -> identifier ':' version-tuple
Node('AvailabilityLabeledArgument', kind='Syntax',
description='''
A argument to an `@available` attribute that consists of a label and \
a value, e.g. `message: "This has been deprecated"`.
''',
children=[
Child('Label', kind='IdentifierToken',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating label and value'),
Child('Value', kind='Syntax',
node_choices=[
Child('String', 'StringLiteralToken'),
Child('Version', 'VersionTuple'),
], description='The value of this labeled argument',),
]),
# Representation for 'iOS 10', 'swift 3.4' etc.
# availability-version-restriction -> identifier version-tuple
Node('AvailabilityVersionRestriction', kind='Syntax',
description='''
An argument to `@available` that restricts the availability on a \
certain platform to a version, e.g. `iOS 10` or `swift 3.4`.
''',
children=[
Child('Platform', kind='IdentifierToken', description='''
The name of the OS on which the availability should be \
restricted or 'swift' if the availability should be restricted \
based on a Swift version.
'''),
Child('Version', kind='VersionTuple'),
]),
# version-tuple -> integer-literal
# | float-literal
# | float-literal '.' integer-literal
Node('VersionTuple', kind='Syntax',
description='''
A version number of the form major.minor.patch in which the minor \
and patch part may be ommited.
''',
children=[
Child('MajorMinor', kind='Syntax',
node_choices=[
Child('Major', kind='IntegerLiteralToken'),
Child('MajorMinor', kind='FloatingLiteralToken')
], description='''
In case the version consists only of the major version, an \
integer literal that specifies the major version. In case \
the version consists of major and minor version number, a \
floating literal in which the decimal part is interpreted \
as the minor version.
'''),
Child('PatchPeriod', kind='PeriodToken', is_optional=True,
description='''
If the version contains a patch number, the period \
separating the minor from the patch number.
'''),
Child('PatchVersion', kind='IntegerLiteralToken',
is_optional=True, description='''
The patch version if specified.
'''),
]),
]
| huonw/swift | utils/gyb_syntax_support/AvailabilityNodes.py | Python | apache-2.0 | 4,794 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MaintenanceAlert',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('text', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| qedsoftware/commcare-hq | corehq/apps/hqwebapp/migrations/0001_initial.py | Python | bsd-3-clause | 755 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Automatic selection of arXiv papers for inclusion in Inspire.
Usage:
python arXiv.py mode record_file model_file
- mode: train or predict
- record_file:
JSON file holding records.
- model_file:
File into which is the model is saved (`mode == "train"`),
or loaded from (`mode == "predict"`).
Examples:
python arxiv.py train records.json model.pickle
python arxiv.py predict records.json model.pickle
"""
import numpy as np
import cPickle as pickle
from beard.utils import FuncTransformer
from beard.utils import Shaper
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
from sklearn.svm import LinearSVC
def _get_title(r):
return r["title"] if r["title"] else ""
def _get_abstract(r):
return r["abstract"] if r["abstract"] else ""
def _get_title_abstract(r):
return _get_title(r) + " " + _get_abstract(r)
def _get_categories(r):
return " ".join(r["categories"])
def train(records):
"""Train a classifier on the given arXiv records.
:param records:
Records are expected as a list of dictionaries with
the following fields required: "title", "abstract", "categories"
and "decision". The decision field should be either "CORE", "Non-CORE"
or "Rejected".
Example:
records = [{u'decision': "CORE",
u'title': u'Effects of top compositeness',
u'abstract': u'We investigate the effects of (...)'
u'categories': [u'cond-mat.mes-hall',
u'cond-mat.mtrl-sci']},
{...}, ...]
:return: the trained pipeline
"""
records = np.array(records, dtype=np.object).reshape((-1, 1))
transformer = Pipeline([
("features", FeatureUnion([
("title_abstract", Pipeline([
("getter", FuncTransformer(func=_get_title_abstract)),
("shape", Shaper(newshape=(-1,))),
("tfidf", TfidfVectorizer(min_df=3, max_df=0.1, norm="l2",
ngram_range=(1, 1),
stop_words="english",
strip_accents="unicode",
dtype=np.float32,
decode_error="replace"))])),
("categories", Pipeline([
("getter", FuncTransformer(func=_get_categories)),
("shape", Shaper(newshape=(-1,))),
("tfidf", TfidfVectorizer(norm="l2", dtype=np.float32,
decode_error="replace"))])),
])),
("scaling", Normalizer())
])
X = transformer.fit_transform(records)
y = np.array([r[0]["decision"] for r in records])
grid = GridSearchCV(LinearSVC(),
param_grid={"C": np.linspace(start=0.1, stop=1.0,
num=100)},
scoring="accuracy", cv=5, verbose=3)
grid.fit(X, y)
return Pipeline([("transformer", transformer),
("classifier", grid.best_estimator_)])
def predict(pipeline, record):
"""Predict whether the given record is CORE/Non-CORE/Rejected.
:param record:
Record is expected as a dictionary with
the following fields required: "title", "abstract", "categories".
Example:
record = {u'title': u'Effects of top compositeness',
u'abstract': u'We investigate the effects of (...)'
u'categories': [u'cond-mat.mes-hall',
u'cond-mat.mtrl-sci']}
:return decision, scores:
decision: CORE, Non-CORE or Rejected, as the argmax of scores
scores: the decision scores
Example:
(u'Rejected', array([-1.25554232, -1.2591557, 1.17074973]))
"""
transformer = pipeline.steps[0][1]
classifier = pipeline.steps[1][1]
X = transformer.transform(np.array([[record]], dtype=np.object))
decision = classifier.predict(X)[0]
scores = classifier.decision_function(X)[0]
return decision, scores
if __name__ == "__main__":
import json
import sys
mode = sys.argv[1]
records = json.load(open(sys.argv[2], "r"))
if isinstance(records, dict):
records = records.values()
if mode == "train":
pipeline = train(records)
pickle.dump(pipeline, open(sys.argv[3], "w"))
elif mode == "predict":
pipeline = pickle.load(open(sys.argv[3]))
for r in records[:5]:
print r
print predict(pipeline, r)
print
| Dziolas/inspire-next | inspire/modules/predicter/arxiv.py | Python | gpl-2.0 | 5,673 |
import slippytiles
import xml.etree.cElementTree as ET
import os, uuid
if __name__ == "__main__":
fina = "mod.osm"
xml = ET.parse(fina)
root = xml.getroot()
#Check which tiles contain the nodes
for obj in root:
if obj.tag != "node":
continue
print float(obj.attrib['lat']), float(obj.attrib['lat'])
| TimSC/gitgis | Upload.py | Python | bsd-2-clause | 320 |
# Scrapy settings for familynames project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'FontBakery'
SPIDER_MODULES = ['familynames.spiders']
NEWSPIDER_MODULE = 'familynames.spiders'
COMMANDS_MODULE = 'familynames.commands'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'familynames (+http://www.yourdomain.com)'
| googlefonts/namecheck | bakery/crawl/familynames/familynames/settings.py | Python | apache-2.0 | 523 |
import pyuaf
import unittest
from pyuaf.util.unittesting import parseArgs
ARGS = parseArgs()
def suite(args=None):
if args is not None:
global ARGS
ARGS = args
return unittest.TestLoader().loadTestsFromTestCase(NodeIdIdentifierTest)
class NodeIdIdentifierTest(unittest.TestCase):
def setUp(self):
self.n0 = pyuaf.util.NodeIdIdentifier("SomeStringIdentifier")
self.n0_ = pyuaf.util.NodeIdIdentifier("SomeStringIdentifier")
self.n1 = pyuaf.util.NodeIdIdentifier(42)
self.n2 = pyuaf.util.NodeIdIdentifier("SomeOtherStringIdentifier")
self.n3 = pyuaf.util.NodeIdIdentifier(pyuaf.util.Guid("{10000000-0000-0000-0000-000000000000}"))
self.n4 = pyuaf.util.NodeIdIdentifier(bytearray("abcd"))
def test_util_NodeIdIdentifier_type(self):
self.assertEqual( self.n1.type , pyuaf.util.nodeididentifiertypes.Identifier_Numeric )
self.assertEqual( self.n2.type , pyuaf.util.nodeididentifiertypes.Identifier_String )
self.assertEqual( self.n3.type , pyuaf.util.nodeididentifiertypes.Identifier_Guid )
self.assertEqual( self.n4.type , pyuaf.util.nodeididentifiertypes.Identifier_Opaque )
def test_util_NodeIdIdentifier_idString(self):
self.assertEqual( self.n0.idString , "SomeStringIdentifier" )
def test_util_NodeIdIdentifier_idNumeric(self):
self.assertEqual( self.n1.idNumeric , 42 )
def test_util_NodeIdIdentifier_idGuid(self):
self.assertEqual( self.n3.idGuid , pyuaf.util.Guid("{10000000-0000-0000-0000-000000000000}") )
def test_util_NodeIdIdentifier_idOpaque(self):
self.assertEqual( self.n4.idOpaque , bytearray("abcd") )
def test_util_NodeIdIdentifier___eq__(self):
self.assertTrue( self.n0 == self.n0_ )
def test_util_NodeIdIdentifier___ne__(self):
self.assertTrue( self.n0 != self.n1 )
self.assertTrue( self.n0 != self.n2 )
def test_util_NodeIdIdentifier___lt__(self):
self.assertTrue( self.n1 < self.n0 )
self.assertTrue( self.n2 < self.n0 )
def test_util_NodeIdIdentifier___gt__(self):
self.assertTrue( self.n0 > self.n1 )
self.assertTrue( self.n0 > self.n2 )
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = ARGS.verbosity).run(suite()) | robi-wan/uaf | unittests/pyuaftests/util/nodeididentifier.py | Python | lgpl-3.0 | 2,359 |
#
# sabayon.py: Sabayon and Kogaion Linux Anaconda install method backend
#
#
# Copyright (C) 2010 Fabio Erculiani
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
# add Entropy module path to PYTHONPATH
sys.path.insert(0, '/usr/lib/entropy/libraries')
sys.path.insert(0, '/usr/lib/entropy/lib')
# Entropy Interface
from entropy.client.interfaces import Client
from entropy.output import nocolor
from entropy.fetchers import UrlFetcher
import entropy.tools
class Entropy(Client):
_progress = None
_oldcount = None
def init_singleton(self):
Client.init_singleton(self, xcache = False,
url_fetcher = InstallerUrlFetcher)
nocolor()
@classmethod
def connect_progress(cls, progress):
Entropy._progress = progress
@classmethod
def output(cls, text, header = "", footer = "", back = False,
importance = 0, level = "info", count = None, percent = False):
if not Entropy._progress:
return
if not Entropy._oldcount:
Entropy._oldcount = (0, 100)
progress_text = text
if len(progress_text) > 80:
progress_text = "%s..." % (progress_text[:80].rstrip(),)
if count:
try:
Entropy._oldcount = (int(count[0]), int(count[1]))
except:
Entropy._oldcount = (0, 100)
if not percent:
count_str = "(%s/%s) " % (str(count[0]),str(count[1]),)
progress_text = count_str+progress_text
percentage = float(Entropy._oldcount[0]) / Entropy._oldcount[1] * 100
percentage = round(percentage, 2)
Entropy._progress.set_fraction(percentage)
Entropy._progress.set_text(progress_text)
def is_installed(self, package_name):
match = self.installed_repository().atomMatch(package_name)
if match[0] != -1:
return True
return False
@staticmethod
def is_sabayon_mce():
with open("/proc/cmdline", "r") as cmd_f:
args = cmd_f.readline().strip().split()
for tstr in ("mceinstall", "sabayonmce"):
if tstr in args:
return True
return False
@staticmethod
def is_sabayon_steambox():
with open("/proc/cmdline", "r") as cmd_f:
args = cmd_f.readline().strip().split()
for tstr in ("steaminstall", "steambox"):
if tstr in args:
return True
return False
# in this way, any singleton class that tries to directly load Client
# gets Entropy in change
Client.__singleton_class__ = Entropy
class InstallerUrlFetcher(UrlFetcher):
gui_last_avg = 0
def __init__(self, *args, **kwargs):
UrlFetcher.__init__(self, *args, **kwargs)
self.__average = 0
self.__downloadedsize = 0
self.__remotesize = 0
self.__datatransfer = 0
def handle_statistics(self, th_id, downloaded_size, total_size,
average, old_average, update_step, show_speed, data_transfer,
time_remaining, time_remaining_secs):
self.__average = average
self.__downloadedsize = downloaded_size
self.__remotesize = total_size
self.__datatransfer = data_transfer
def output(self):
""" backward compatibility """
return self.update()
def update(self):
myavg = abs(int(round(float(self.__average), 1)))
if abs((myavg - InstallerUrlFetcher.gui_last_avg)) < 1:
return
if (myavg > InstallerUrlFetcher.gui_last_avg) or (myavg < 2) or \
(myavg > 97):
cur_prog = float(self.__average)/100
cur_prog_str = str(int(self.__average))
human_dt = entropy.tools.bytes_into_human(self.__datatransfer)
prog_str = "%s/%s kB @ %s" % (
str(round(float(self.__downloadedsize)/1024, 1)),
str(round(self.__remotesize, 1)),
str(human_dt) + "/sec",
)
Entropy().output(prog_str)
InstallerUrlFetcher.gui_last_avg = myavg
| Rogentos/rogentos-anaconda | sabayon/__init__.py | Python | gpl-2.0 | 4,742 |
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
"""externalsites.syncing.kaltura -- Sync subtitles to/from kaltura"""
from xml.dom import minidom
import requests
from externalsites.exceptions import SyncingError
from externalsites.syncing.kaltura_languages import KalturaLanguageMap
KALTURA_API_URL = 'http://www.kaltura.com/api_v3/'
SESSION_TYPE_USER = 0
SESSION_TYPE_ADMIN = 2
CAPTION_TYPE_DFXP = 2
CAPTION_TYPE_SRT = 1
CAPTION_TYPE_WEBVTT = 3
# partnerData value we set for subtitles that we've synced
PARTNER_DATA_TAG = 'synced-from-amara'
def _node_text(node):
return ''.join(child.nodeValue
for child in node.childNodes
if child.nodeType == child.TEXT_NODE)
def _find_child(node, tag_name):
return node.getElementsByTagName(tag_name)[0]
def _has_child(node, tag_name):
return len(node.getElementsByTagName(tag_name)) > 0
def _check_error(result):
"""Checks if we had an error result."""
if _has_child(result, 'error'):
error = _find_child(result, 'error')
code = _node_text(_find_child(error, 'code'))
message = _node_text(_find_child(error, 'message'))
raise SyncingError("%s: %s" % (code, message))
def _make_request(service, action, data):
params = { 'service': service, 'action': action, }
response = requests.post(KALTURA_API_URL, params=params, data=data)
dom = minidom.parseString(response.content)
try:
result = _find_child(dom, 'result')
except IndexError:
return None
_check_error(result)
return result
def _start_session(partner_id, secret):
result = _make_request('session', 'start', {
'secret': secret,
'partnerId': partner_id,
'type': SESSION_TYPE_ADMIN,
})
return _node_text(result)
def _end_session(ks):
_make_request('session', 'end', { 'ks': ks })
def _find_existing_captionset(ks, video_id, language_code):
language = KalturaLanguageMap.get_name(language_code)
result = _make_request('caption_captionasset', 'list', {
'ks': ks,
'filter:entryIdEqual': video_id,
})
objects = _find_child(result, 'objects')
for item in objects.getElementsByTagName('item'):
partner_data = _find_child(item, 'partnerData')
language_node = _find_child(item, 'language')
if (_node_text(partner_data) == PARTNER_DATA_TAG and
_node_text(language_node) == language):
return _node_text(_find_child(item, 'id'))
return None
def _add_captions(ks, video_id, language_code):
language = KalturaLanguageMap.get_name(language_code)
result = _make_request('caption_captionasset', 'add', {
'ks': ks,
'entryId': video_id,
'captionAsset:language': language,
'captionAsset:partnerData': PARTNER_DATA_TAG,
'captionAsset:format': CAPTION_TYPE_SRT,
'captionAsset:fileExt': 'srt',
})
return _node_text(_find_child(result, 'id'))
def _update_caption_content(ks, caption_id, sub_data):
_make_request('caption_captionasset', 'setcontent', {
'ks': ks,
'id': caption_id,
'contentResource:objectType': 'KalturaStringResource',
'contentResource:content': sub_data,
})
def _delete_captions(ks, caption_id):
_make_request('caption_captionasset', 'delete', {
'ks': ks,
'captionAssetId': caption_id,
})
def update_subtitles(partner_id, secret, video_id, language_code,
srt_data):
ks = _start_session(partner_id, secret)
try:
caption_id = _find_existing_captionset(ks, video_id, language_code)
if caption_id is None:
caption_id = _add_captions(ks, video_id, language_code)
_update_caption_content(ks, caption_id, srt_data)
finally:
_end_session(ks)
def delete_subtitles(partner_id, secret, video_id, language_code):
ks = _start_session(partner_id, secret)
try:
caption_id = _find_existing_captionset(ks, video_id, language_code)
if caption_id is not None:
_delete_captions(ks, caption_id)
finally:
_end_session(ks)
| ofer43211/unisubs | apps/externalsites/syncing/kaltura.py | Python | agpl-3.0 | 4,831 |
# -*- coding: utf-8 -*-
"""
A collection of methods that can search for good parameters for the neural network
"""
__author__ = 'Sindre Nistad'
from Classifier.neural_network import ClassificationNet
from RegionOfInterest.regions_of_interest import RegionsOfInterest
folder = '../ASCII roi/'
normalized = 'normalizing/'
targets = ['master_r19_7_5_emissivity_sub', 'sb_r19_sub_sub_corrected', 'sb_r20_2011_rfl_sub',
'sb_r21_sub_sub_corrected', 'sb_r22_sub_sub_corrected_colored']
extension = '.txt'
def hidden_layer_to_input_output_layers(target_index, target_regions, target_ratios,
hidden_ratio_resolution, neighborhood_size):
"""
Method to see which parameter works best
:param target_index: The index of which regions of interest we are to use from the list 'targets'.
:param target_regions: The regions we are interested in, e.g. 'soil', or 'rock'.
:param target_ratios: The ratios of targets vs. background.
:param hidden_ratio_resolution: The ratio between hidden layers, and the sum of input and output layers.
:param neighborhood_size: The size of the neigborhood.
:type target_index: int
:type target_regions: list of [str]
:type target_ratios: list of [float]
:type hidden_ratio_resolution: float
:type neighborhood_size: int
:return: The best neural network
:rtype: ClassificationNet
"""
path = folder + targets[target_index] + extension
normalized_path = folder + normalized + targets[target_index] + extension
rois = RegionsOfInterest(path, normalizing_path=normalized_path)
net = ClassificationNet(rois, neigborhood_size=neighborhood_size, hidden_layer=1,
targets=target_regions, targets_background_ration=target_ratios)
net.set_trainer()
net.train_network(max_epochs=15, verbose=True, continue_epochs=10, validation_proportion=0.25, force_split=True)
best = net
ratios = [n * hidden_ratio_resolution for n in range(1, int(1/hidden_ratio_resolution))]
for ratio in ratios:
net = ClassificationNet(rois, neigborhood_size=neighborhood_size, hidden_ratio=ratio,
targets=target_regions, targets_background_ration=target_ratios)
net.set_trainer()
net.train_network(max_epochs=15, verbose=True, continue_epochs=10, validation_proportion=0.25, force_split=True)
if net.is_better(best, best.data_set):
best = net
return best
| cLupus/neighborhood_classifier | src/Classifier/finding_parameters.py | Python | gpl-3.0 | 2,619 |
#!/usr/bin/env python
import sys
if sys.argv.__len__() < 4:
print "Usage : Enter time needed for each portion of the code"
print " % overhead <walltime> <advance> <exchange> <regrid>"
sys.exit();
if (sys.argv.__len__() == 4):
a = float(sys.argv[1])
e = float(sys.argv[2])
r = float(sys.argv[3])
o = r + e
t = r + e + a
print " "
print "%40s %6.1f%%" % ("ADVANCE as percentage of total",100.0*a/t)
print "%40s %6.1f%%" % ("EXCHANGE as percentage of total",100.0*e/t)
print "%40s %6.1f%%" % ("REGRID as percentage of total",100.0*r/t)
print " "
if o > a:
print "Exchange and regridding take %5.2f times as long as grid advances" % (o/a)
else:
print "Grid advances take %5.2f times as long as exchange and regridding" % (a/o)
print " "
else:
w = float(sys.argv[1])
a = float(sys.argv[2])
e = float(sys.argv[3])
r = float(sys.argv[4])
o = w - (a + e + r)
print " "
print "%40s %6.1f%%" % ("ADVANCE as percentage of wall time",100.0*a/w)
print "%40s %6.1f%%" % ("EXCHANGE as percentage of wall time",100.0*e/w)
print "%40s %6.1f%%" % ("REGRID as percentage of wall time",100.0*r/w)
print "%40s %6.1f%%" % ("OTHER as percentage of wall time",100.0*o/w)
print " "
| ForestClaw/forestclaw | scripts/overhead.py | Python | bsd-2-clause | 1,293 |
import re
import urllib
from repoze.bfg.traversal import find_model
from repoze.bfg.traversal import find_root
from repoze.bfg.traversal import model_path
from karl.content.interfaces import ICalendarCategory
from karl.content.interfaces import ICalendarEvent
from karl.content.interfaces import ICalendarLayer
from karl.models.interfaces import ICatalogSearch
from karl.models.subscribers import index_content
from karl.models.subscribers import reindex_content
from karl.utilities.randomid import unfriendly_random_id
def evolve(context):
root = find_root(context)
searcher = ICatalogSearch(root)
categories_and_layers_query = dict(
interfaces={'query':[ICalendarCategory, ICalendarLayer],
'operator':'or'},
)
total, docids, resolver = searcher(**categories_and_layers_query)
changed_categories = {}
for docid in docids:
ob = resolver(docid)
if ob is None:
# slash in path, likely
path = root.catalog.document_map.docid_to_address.get(docid)
if path is not None:
container_path, ob_name = path.rsplit('/', 1)
container = find_model(root, container_path)
name = urllib.unquote(ob_name)
ob = container.get(name)
if ob is not None:
ob_name = ob.__name__
if not (ob_name.startswith('_default_') or is_opaque_id(ob_name)):
old_path = model_path(ob)
container = ob.__parent__
new_name = generate_name(container)
del container[ob_name]
ob.__name__ = new_name # XXX required?
container.add(new_name, ob, send_events=False)
new_path = model_path(ob)
index_content(ob, None)
print 'path fixed: %s -> %s' % (old_path, new_path)
if ICalendarCategory.providedBy(ob):
changed_categories[old_path] = new_path
if changed_categories:
# fix layer.paths
layers_query = dict(interfaces=[ICalendarLayer])
total, docids, resolver = searcher(**layers_query)
for docid in docids:
layer = resolver(docid)
if layer is not None:
new_paths = []
changed = []
for path in layer.paths:
if path in changed_categories:
new_paths.append(changed_categories[path])
changed.append((path, changed_categories[path]))
else:
new_paths.append(path)
if changed:
layer.paths = new_paths
reindex_content(layer, None)
print 'layer fixed: %s, %s' % (
model_path(layer), [ '%s -> %s' % x for x in changed ])
# fix the category of events
events_query = dict(
interfaces=[ICalendarEvent],
)
total, docids, resolver = searcher(**events_query)
for docid in docids:
event = resolver(docid)
if event is not None:
category = event.calendar_category
if category in changed_categories:
old_category = event.calendar_category
new_category = changed_categories[category]
event.calendar_category = new_category
reindex_content(event, None)
print 'event fixed: %s, %s -> %s' % (
model_path(event),
old_category,
new_category)
def generate_name(context):
while True:
name = unfriendly_random_id()
if not (name in context):
return name
opaque_id = re.compile(r'^[A-Z0-9]{10}$')
is_opaque_id = opaque_id.match
| boothead/karl | karl/evolve/zodb/evolve4.py | Python | gpl-2.0 | 3,893 |
import planckStyle as s
g = s.getPlotter()
g.settings.lineM = ['-k', '-r', '-b', '-g', '-m', '--c']
labels = [s.planckTT, s.NoLowLE, s.planckTT + '+lensing', s.NoLowLE + '+lensing', s.NoLowLE + '+lensing+BAO',
s.planckTTlowTEB]
roots = [s.defdata_TTonly,
s.defdata_allNoLowE,
s.defdata_TTonly + '_lensing',
s.defdata_allNoLowE + '_lensing',
s.defdata_allNoLowE + '_lensing_post_BAO',
s.defdata,
]
roots = ['base_' + root for root in roots]
g.settings.legend_frac_subplot_margin = 0.05
g.plots_1d(roots, ['omegabh2', 'thetastar', 'A', 'tau', 'omegam', 'omegach2', 'ns', 'sigma8', 'zrei', 'H0'], nx=5,
legend_ncol=len(roots), legend_labels=labels, share_y=True)
g.export()
| ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/CosmoMC-master/batch2/outputs/tau_compare.py | Python | gpl-3.0 | 753 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import inspect
import linecache
import sys
import textwrap
import tokenize
import warnings
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import py
import six
class Source(object):
""" an immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get("deindent", True)
for part in parts:
if not part:
partlines = []
elif isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split("\n")
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before="", after="", indent=" " * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=" " * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self):
"""return a new source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
from parser import suite as syntax_checker
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + "\n")
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(
self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None
):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + "%s:%d>" % (fn, lineno)
else:
filename = base + "%r %s:%d>" % (filename, fn, lineno)
source = "\n".join(self.lines) + "\n"
try:
co = compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + "^")
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError("\n".join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
from .code import Code
try:
code = Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = inspect.findsource(obj)
except py.builtin._sysex:
raise
except: # noqa
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
from .code import getrawcode
obj = getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = '"Buggy python version consider upgrading, cannot get source"'
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
# See #4260:
# don't produce duplicate warnings when compiling source to find ast
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = compile(content, "source", "exec", _AST_FLAG)
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
| txomon/pytest | src/_pytest/_code/source.py | Python | mit | 10,430 |
import operator
import weakref
import pymongo
from bson import SON, DBRef, ObjectId
from mongoengine.base.common import UPDATE_OPERATORS
from mongoengine.base.datastructures import (
BaseDict,
BaseList,
EmbeddedDocumentList,
)
from mongoengine.common import _import_class
from mongoengine.errors import DeprecatedError, ValidationError
__all__ = ("BaseField", "ComplexBaseField", "ObjectIdField", "GeoJsonBaseField")
class BaseField:
"""A base class for fields in a MongoDB document. Instances of this class
may be added to subclasses of `Document` to define a document's schema.
"""
name = None # set in TopLevelDocumentMetaclass
_geo_index = False
_auto_gen = False # Call `generate` to generate a value
_auto_dereference = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that MongoEngine implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
def __init__(
self,
db_field=None,
required=False,
default=None,
unique=False,
unique_with=None,
primary_key=False,
validation=None,
choices=None,
null=False,
sparse=False,
**kwargs,
):
"""
:param db_field: The database field to store this field in
(defaults to the name of the field)
:param required: If the field is required. Whether it has to have a
value or not. Defaults to False.
:param default: (optional) The default value for this field if no value
has been set (or if the value has been unset). It can be a
callable.
:param unique: Is the field value unique or not. Defaults to False.
:param unique_with: (optional) The other field this field should be
unique with.
:param primary_key: Mark this field as the primary key. Defaults to False.
:param validation: (optional) A callable to validate the value of the
field. The callable takes the value as parameter and should raise
a ValidationError if validation fails
:param choices: (optional) The valid choices
:param null: (optional) If the field value can be null. If no and there is a default value
then the default value is set
:param sparse: (optional) `sparse=True` combined with `unique=True` and `required=False`
means that uniqueness won't be enforced for `None` values
:param **kwargs: (optional) Arbitrary indirection-free metadata for
this field can be supplied as additional keyword arguments and
accessed as attributes of the field. Must not conflict with any
existing attributes. Common metadata includes `verbose_name` and
`help_text`.
"""
self.db_field = db_field if not primary_key else "_id"
self.required = required or primary_key
self.default = default
self.unique = bool(unique or unique_with)
self.unique_with = unique_with
self.primary_key = primary_key
self.validation = validation
self.choices = choices
self.null = null
self.sparse = sparse
self._owner_document = None
# Make sure db_field is a string (if it's explicitly defined).
if self.db_field is not None and not isinstance(self.db_field, str):
raise TypeError("db_field should be a string.")
# Make sure db_field doesn't contain any forbidden characters.
if isinstance(self.db_field, str) and (
"." in self.db_field
or "\0" in self.db_field
or self.db_field.startswith("$")
):
raise ValueError(
'field names cannot contain dots (".") or null characters '
'("\\0"), and they must not start with a dollar sign ("$").'
)
# Detect and report conflicts between metadata and base properties.
conflicts = set(dir(self)) & set(kwargs)
if conflicts:
raise TypeError(
"%s already has attribute(s): %s"
% (self.__class__.__name__, ", ".join(conflicts))
)
# Assign metadata to the instance
# This efficient method is available because no __slots__ are defined.
self.__dict__.update(kwargs)
# Adjust the appropriate creation counter, and save our local copy.
if self.db_field == "_id":
self.creation_counter = BaseField.auto_creation_counter
BaseField.auto_creation_counter -= 1
else:
self.creation_counter = BaseField.creation_counter
BaseField.creation_counter += 1
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document."""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
return instance._data.get(self.name)
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document."""
# If setting to None and there is a default value provided for this
# field, then set the value to the default value.
if value is None:
if self.null:
value = None
elif self.default is not None:
value = self.default
if callable(value):
value = value()
if instance._initialised:
try:
value_has_changed = (
self.name not in instance._data
or instance._data[self.name] != value
)
if value_has_changed:
instance._mark_as_changed(self.name)
except Exception:
# Some values can't be compared and throw an error when we
# attempt to do so (e.g. tz-naive and tz-aware datetimes).
# Mark the field as changed in such cases.
instance._mark_as_changed(self.name)
EmbeddedDocument = _import_class("EmbeddedDocument")
if isinstance(value, EmbeddedDocument):
value._instance = weakref.proxy(instance)
elif isinstance(value, (list, tuple)):
for v in value:
if isinstance(v, EmbeddedDocument):
v._instance = weakref.proxy(instance)
instance._data[self.name] = value
def error(self, message="", errors=None, field_name=None):
"""Raise a ValidationError."""
field_name = field_name if field_name else self.name
raise ValidationError(message, errors=errors, field_name=field_name)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
return value
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type."""
return self.to_python(value)
def _to_mongo_safe_call(self, value, use_db_field=True, fields=None):
"""Helper method to call to_mongo with proper inputs."""
f_inputs = self.to_mongo.__code__.co_varnames
ex_vars = {}
if "fields" in f_inputs:
ex_vars["fields"] = fields
if "use_db_field" in f_inputs:
ex_vars["use_db_field"] = use_db_field
return self.to_mongo(value, **ex_vars)
def prepare_query_value(self, op, value):
"""Prepare a value that is being used in a query for PyMongo."""
if op in UPDATE_OPERATORS:
self.validate(value)
return value
def validate(self, value, clean=True):
"""Perform validation on a value."""
pass
def _validate_choices(self, value):
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
choice_list = self.choices
if isinstance(next(iter(choice_list)), (list, tuple)):
# next(iter) is useful for sets
choice_list = [k for k, _ in choice_list]
# Choices which are other types of Documents
if isinstance(value, (Document, EmbeddedDocument)):
if not any(isinstance(value, c) for c in choice_list):
self.error("Value must be an instance of %s" % (choice_list))
# Choices which are types other than Documents
else:
values = value if isinstance(value, (list, tuple)) else [value]
if len(set(values) - set(choice_list)):
self.error("Value must be one of %s" % str(choice_list))
def _validate(self, value, **kwargs):
# Check the Choices Constraint
if self.choices:
self._validate_choices(value)
# check validation argument
if self.validation is not None:
if callable(self.validation):
try:
# breaking change of 0.18
# Get rid of True/False-type return for the validation method
# in favor of having validation raising a ValidationError
ret = self.validation(value)
if ret is not None:
raise DeprecatedError(
"validation argument for `%s` must not return anything, "
"it should raise a ValidationError if validation fails"
% self.name
)
except ValidationError as ex:
self.error(str(ex))
else:
raise ValueError(
'validation argument for `"%s"` must be a ' "callable." % self.name
)
self.validate(value, **kwargs)
@property
def owner_document(self):
return self._owner_document
def _set_owner_document(self, owner_document):
self._owner_document = owner_document
@owner_document.setter
def owner_document(self, owner_document):
self._set_owner_document(owner_document)
class ComplexBaseField(BaseField):
"""Handles complex fields, such as lists / dictionaries.
Allows for nesting of embedded documents inside complex types.
Handles the lazy dereferencing of a queryset by lazily dereferencing all
items in a list / dict rather than one at a time.
"""
def __init__(self, field=None, **kwargs):
self.field = field
super().__init__(**kwargs)
@staticmethod
def _lazy_load_refs(instance, name, ref_values, *, max_depth):
_dereference = _import_class("DeReference")()
documents = _dereference(
ref_values,
max_depth=max_depth,
instance=instance,
name=name,
)
return documents
def __get__(self, instance, owner):
"""Descriptor to automatically dereference references."""
if instance is None:
# Document class being used rather than a document object
return self
ReferenceField = _import_class("ReferenceField")
GenericReferenceField = _import_class("GenericReferenceField")
EmbeddedDocumentListField = _import_class("EmbeddedDocumentListField")
auto_dereference = instance._fields[self.name]._auto_dereference
dereference = auto_dereference and (
self.field is None
or isinstance(self.field, (GenericReferenceField, ReferenceField))
)
if (
instance._initialised
and dereference
and instance._data.get(self.name)
and not getattr(instance._data[self.name], "_dereferenced", False)
):
ref_values = instance._data.get(self.name)
instance._data[self.name] = self._lazy_load_refs(
ref_values=ref_values, instance=instance, name=self.name, max_depth=1
)
if hasattr(instance._data[self.name], "_dereferenced"):
instance._data[self.name]._dereferenced = True
value = super().__get__(instance, owner)
# Convert lists / values so we can watch for any changes on them
if isinstance(value, (list, tuple)):
if issubclass(type(self), EmbeddedDocumentListField) and not isinstance(
value, EmbeddedDocumentList
):
value = EmbeddedDocumentList(value, instance, self.name)
elif not isinstance(value, BaseList):
value = BaseList(value, instance, self.name)
instance._data[self.name] = value
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
instance._data[self.name] = value
if (
auto_dereference
and instance._initialised
and isinstance(value, (BaseList, BaseDict))
and not value._dereferenced
):
value = self._lazy_load_refs(
ref_values=value, instance=instance, name=self.name, max_depth=1
)
value._dereferenced = True
instance._data[self.name] = value
return value
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if isinstance(value, str):
return value
if hasattr(value, "to_python"):
return value.to_python()
BaseDocument = _import_class("BaseDocument")
if isinstance(value, BaseDocument):
# Something is wrong, return the value as it is
return value
is_list = False
if not hasattr(value, "items"):
try:
is_list = True
value = {idx: v for idx, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
self.field._auto_dereference = self._auto_dereference
value_dict = {
key: self.field.to_python(item) for key, item in value.items()
}
else:
Document = _import_class("Document")
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error(
"You can only reference documents once they"
" have been saved to the database"
)
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, "to_python"):
value_dict[k] = v.to_python()
else:
value_dict[k] = self.to_python(v)
if is_list: # Convert back to a list
return [
v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))
]
return value_dict
def to_mongo(self, value, use_db_field=True, fields=None):
"""Convert a Python type to a MongoDB-compatible type."""
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
GenericReferenceField = _import_class("GenericReferenceField")
if isinstance(value, str):
return value
if hasattr(value, "to_mongo"):
if isinstance(value, Document):
return GenericReferenceField().to_mongo(value)
cls = value.__class__
val = value.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(value, EmbeddedDocument):
val["_cls"] = cls.__name__
return val
is_list = False
if not hasattr(value, "items"):
try:
is_list = True
value = {k: v for k, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = {
key: self.field._to_mongo_safe_call(item, use_db_field, fields)
for key, item in value.items()
}
else:
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error(
"You can only reference documents once they"
" have been saved to the database"
)
# If its a document that is not inheritable it won't have
# any _cls data so make it a generic reference allows
# us to dereference
meta = getattr(v, "_meta", {})
allow_inheritance = meta.get("allow_inheritance")
if not allow_inheritance and not self.field:
value_dict[k] = GenericReferenceField().to_mongo(v)
else:
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, "to_mongo"):
cls = v.__class__
val = v.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(v, (Document, EmbeddedDocument)):
val["_cls"] = cls.__name__
value_dict[k] = val
else:
value_dict[k] = self.to_mongo(v, use_db_field, fields)
if is_list: # Convert back to a list
return [
v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))
]
return value_dict
def validate(self, value):
"""If field is provided ensure the value is valid."""
errors = {}
if self.field:
if hasattr(value, "items"):
sequence = value.items()
else:
sequence = enumerate(value)
for k, v in sequence:
try:
self.field._validate(v)
except ValidationError as error:
errors[k] = error.errors or error
except (ValueError, AssertionError) as error:
errors[k] = error
if errors:
field_class = self.field.__class__.__name__
self.error(f"Invalid {field_class} item ({value})", errors=errors)
# Don't allow empty values if required
if self.required and not value:
self.error("Field is required and cannot be empty")
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def lookup_member(self, member_name):
if self.field:
return self.field.lookup_member(member_name)
return None
def _set_owner_document(self, owner_document):
if self.field:
self.field.owner_document = owner_document
self._owner_document = owner_document
class ObjectIdField(BaseField):
"""A field wrapper around MongoDB's ObjectIds."""
def to_python(self, value):
try:
if not isinstance(value, ObjectId):
value = ObjectId(value)
except Exception:
pass
return value
def to_mongo(self, value):
if not isinstance(value, ObjectId):
try:
return ObjectId(str(value))
except Exception as e:
self.error(str(e))
return value
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def validate(self, value):
try:
ObjectId(str(value))
except Exception:
self.error("Invalid ObjectID")
class GeoJsonBaseField(BaseField):
"""A geo json field storing a geojson style object."""
_geo_index = pymongo.GEOSPHERE
_type = "GeoBase"
def __init__(self, auto_index=True, *args, **kwargs):
"""
:param bool auto_index: Automatically create a '2dsphere' index.\
Defaults to `True`.
"""
self._name = "%sField" % self._type
if not auto_index:
self._geo_index = False
super().__init__(*args, **kwargs)
def validate(self, value):
"""Validate the GeoJson object based on its type."""
if isinstance(value, dict):
if set(value.keys()) == {"type", "coordinates"}:
if value["type"] != self._type:
self.error(f'{self._name} type must be "{self._type}"')
return self.validate(value["coordinates"])
else:
self.error(
"%s can only accept a valid GeoJson dictionary"
" or lists of (x, y)" % self._name
)
return
elif not isinstance(value, (list, tuple)):
self.error("%s can only accept lists of [x, y]" % self._name)
return
validate = getattr(self, "_validate_%s" % self._type.lower())
error = validate(value)
if error:
self.error(error)
def _validate_polygon(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return "Polygons must contain list of linestrings"
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return "Invalid Polygon must contain at least one valid linestring"
errors = []
for val in value:
error = self._validate_linestring(val, False)
if not error and val[0] != val[-1]:
error = "LineStrings must start and end at the same point"
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid Polygon:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_linestring(self, value, top_level=True):
"""Validate a linestring."""
if not isinstance(value, (list, tuple)):
return "LineStrings must contain list of coordinate pairs"
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return "Invalid LineString must contain at least one valid point"
errors = []
for val in value:
error = self._validate_point(val)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid LineString:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_point(self, value):
"""Validate each set of coords"""
if not isinstance(value, (list, tuple)):
return "Points must be a list of coordinate pairs"
elif not len(value) == 2:
return "Value (%s) must be a two-dimensional point" % repr(value)
elif not isinstance(value[0], (float, int)) or not isinstance(
value[1], (float, int)
):
return "Both values (%s) in point must be float or int" % repr(value)
def _validate_multipoint(self, value):
if not isinstance(value, (list, tuple)):
return "MultiPoint must be a list of Point"
# Quick and dirty validator
try:
value[0][0]
except (TypeError, IndexError):
return "Invalid MultiPoint must contain at least one valid point"
errors = []
for point in value:
error = self._validate_point(point)
if error and error not in errors:
errors.append(error)
if errors:
return "%s" % ", ".join(errors)
def _validate_multilinestring(self, value, top_level=True):
if not isinstance(value, (list, tuple)):
return "MultiLineString must be a list of LineString"
# Quick and dirty validator
try:
value[0][0][0]
except (TypeError, IndexError):
return "Invalid MultiLineString must contain at least one valid linestring"
errors = []
for linestring in value:
error = self._validate_linestring(linestring, False)
if error and error not in errors:
errors.append(error)
if errors:
if top_level:
return "Invalid MultiLineString:\n%s" % ", ".join(errors)
else:
return "%s" % ", ".join(errors)
def _validate_multipolygon(self, value):
if not isinstance(value, (list, tuple)):
return "MultiPolygon must be a list of Polygon"
# Quick and dirty validator
try:
value[0][0][0][0]
except (TypeError, IndexError):
return "Invalid MultiPolygon must contain at least one valid Polygon"
errors = []
for polygon in value:
error = self._validate_polygon(polygon, False)
if error and error not in errors:
errors.append(error)
if errors:
return "Invalid MultiPolygon:\n%s" % ", ".join(errors)
def to_mongo(self, value):
if isinstance(value, dict):
return value
return SON([("type", self._type), ("coordinates", value)])
| MongoEngine/mongoengine | mongoengine/base/fields.py | Python | mit | 25,864 |
import json
import sys
from sys import stdout
#takes a series id, returning an object containing its attributes iff the series has info about race, education, age, and/or sex
def parseSeriesId(l):
series = {}
l = l.split('\t')
if (int(l[1]) == 40 and l[2] == 'M' and int(l[5]) == 0 and int(l[17]) == 0 and int(l[31]) == 0 and int(l[29]) == 0 and int(l[7]) == 0 and int(l[14]) == 0 and int(l[19]) == 0):
orgin = int(l[20])
if (orgin == 0):
race = int(l[22])
elif (orgin == 1):
race = 5
else:
race = 6
age = int(l[6])
education = int(l[9])
id = l[0]
if ((age == 10 or age == 28) and (race < 7) and (education != 34) and l[0][2] == 'U'):
series['id'] = l[0].strip()
series['description'] = l[3]
series['age'] = age
series['edu'] = education
series['sex'] = int(l[27])
series['race'] = race
series['data'] = []
return series
#short list of series without ages saved ahead of time in order to more easily exclude all none 16-24 & 25+ series
serieses = [];
validIds = [];
ageLess = json.load(open('ageLess.json'))
for default in ageLess:
serieses.append(default)
validIds.append(default['id'])
#looks through list of every series, creating an array of objects returned from parseSeriesId
f = open('ln.series')
lines = f.readlines()
for l in lines:
series = parseSeriesId(l)
if 'id' in series:
serieses.append(series)
validIds.append(series['id'])
#scans every BLS data point, recording those that match one of the previosly found series
f = open('ln.data.1.AllData')
lines = f.readlines()
for i in range(4180000, len(lines)): #first useful entry starts after line 4000000
line = lines[i].split('\t')
stdout.write("\r%d" % i)
stdout.flush()
i = i + 1
id = line[0].strip()
#saves the data on the line if it has a valid id, is after 2001, and not a year adverage
if id in validIds and int(line[1])>1990 and int(line[2][1:3]) != 13:
index = validIds.index(id)
temp = {}
temp['t'] = line[1] + line[2][1:3] #time of entry
temp['v'] = float(line[3].strip()) #data value of entry
serieses[index]['data'].append(temp)
print len(serieses)
#saves the result to disk
json.dump(serieses, open('serieses.json', 'wb')) | xqin1/unemployment | parseBLS.py | Python | mit | 2,188 |
# -*- coding: utf-8 -*-
"""Shared functionality for dtFabric-based data format parsers."""
import abc
import os
from dtfabric import errors as dtfabric_errors
from dtfabric.runtime import data_maps as dtfabric_data_maps
from dtfabric.runtime import fabric as dtfabric_fabric
from plaso.lib import errors
from plaso.parsers import interface
class DtFabricBaseParser(interface.FileObjectParser):
"""Shared functionality for dtFabric-based data format parsers.
A dtFabric-based data format parser defines its data format structures
in dtFabric definition file, for example "dtfabric.yaml":
name: int32
type: integer
description: 32-bit signed integer type
attributes:
format: signed
size: 4
units: bytes
---
name: point3d
aliases: [POINT]
type: structure
description: Point in 3 dimensional space.
attributes:
byte_order: little-endian
members:
- name: x
aliases: [XCOORD]
data_type: int32
- name: y
data_type: int32
- name: z
data_type: int32
The path to the definition file is defined in the class constant
"_DEFINITION_FILE" and will be read on class instantiation.
The definition files contains data type definitions such as "int32" and
"point3d" in the previous example.
A data type map can be used to create a Python object that represent the
data type definition mapped to a byte stream, for example if we have the
following byte stream: 01 00 00 00 02 00 00 00 03 00 00 00
The corresponding "point3d" Python object would be: point3d(x=1, y=2, z=3)
A parser that wants to implement a dtFabric-based data format parser needs to:
* define a definition file and override _DEFINITION_FILE;
* implement the ParseFileObject method.
The _GetDataTypeMap method of this class can be used to retrieve data type
maps from the "fabric", which is the collection of the data type definitions
in definition file. Data type maps are cached for reuse.
The _ReadStructure method of this class can be used to read structure data
from a file-like object and create a Python object using a data type map.
"""
# The dtFabric definition file, which must be overwritten by a subclass.
_DEFINITION_FILE = None
# Preserve the absolute path value of __file__ in case it is changed
# at run-time.
_DEFINITION_FILES_PATH = os.path.dirname(__file__)
def __init__(self):
"""Initializes a dtFabric-based data format parser."""
super(DtFabricBaseParser, self).__init__()
self._data_type_maps = {}
self._fabric = self._ReadDefinitionFile(self._DEFINITION_FILE)
def _FormatPackedIPv4Address(self, packed_ip_address):
"""Formats a packed IPv4 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv4 address.
Returns:
str: human readable IPv4 address.
"""
return '.'.join(['{0:d}'.format(octet) for octet in packed_ip_address[:4]])
def _FormatPackedIPv6Address(self, packed_ip_address):
"""Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address.
"""
# Note that socket.inet_ntop() is not supported on Windows.
octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])
octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs]
# TODO: omit ":0000" from the string.
return ':'.join([
'{0:04x}'.format(octet_pair) for octet_pair in octet_pairs])
def _GetDataTypeMap(self, name):
"""Retrieves a data type map defined by the definition file.
The data type maps are cached for reuse.
Args:
name (str): name of the data type as defined by the definition file.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data.
"""
data_type_map = self._data_type_maps.get(name, None)
if not data_type_map:
data_type_map = self._fabric.CreateDataTypeMap(name)
self._data_type_maps[name] = data_type_map
return data_type_map
def _ReadData(self, file_object, file_offset, data_size):
"""Reads data.
Args:
file_object (dvfvs.FileIO): a file-like object to read.
file_offset (int): offset of the data relative to the start of
the file-like object.
data_size (int): size of the data. The resulting data size much match
the requested data size so that dtFabric can map the data type
definitions onto the byte stream.
Returns:
bytes: byte stream containing the data.
Raises:
ParseError: if the data cannot be read.
ValueError: if the file-like object is missing.
"""
if not file_object:
raise ValueError('Missing file-like object.')
file_object.seek(file_offset, os.SEEK_SET)
read_error = ''
try:
data = file_object.read(data_size)
if len(data) != data_size:
read_error = 'missing data'
except IOError as exception:
read_error = '{0!s}'.format(exception)
if read_error:
raise errors.ParseError(
'Unable to read data at offset: 0x{0:08x} with error: {1:s}'.format(
file_offset, read_error))
return data
def _ReadDefinitionFile(self, filename):
"""Reads a dtFabric definition file.
Args:
filename (str): name of the dtFabric definition file.
Returns:
dtfabric.DataTypeFabric: data type fabric which contains the data format
data type maps of the data type definition, such as a structure, that
can be mapped onto binary data or None if no filename is provided.
"""
if not filename:
return None
path = os.path.join(self._DEFINITION_FILES_PATH, filename)
with open(path, 'rb') as file_object:
definition = file_object.read()
return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)
def _ReadStructureFromByteStream(
self, byte_stream, file_offset, data_type_map, context=None):
"""Reads a structure from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
context (Optional[dtfabric.DataTypeMapContext]): data type map context.
The context is used within dtFabric to hold state about how to map
the data type definition onto the byte stream. In this class it is
used to determine the size of variable size data type definitions.
Returns:
object: structure values object.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing.
"""
if not byte_stream:
raise ValueError('Missing byte stream.')
if not data_type_map:
raise ValueError('Missing data type map.')
try:
return data_type_map.MapByteStream(byte_stream, context=context)
except (dtfabric_errors.ByteStreamTooSmallError,
dtfabric_errors.MappingError) as exception:
raise errors.ParseError((
'Unable to map {0:s} data at offset: 0x{1:08x} with error: '
'{2!s}').format(data_type_map.name or '', file_offset, exception))
def _ReadStructureFromFileObject(
self, file_object, file_offset, data_type_map):
"""Reads a structure from a file-like object.
If the data type map has a fixed size this method will read the predefined
number of bytes from the file-like object. If the data type map has a
variable size, depending on values in the byte stream, this method will
continue to read from the file-like object until the data type map can be
successfully mapped onto the byte stream or until an error occurs.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
Returns:
tuple[object, int]: structure values object and data size of
the structure.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing.
"""
context = None
data = b''
last_data_size = 0
data_size = data_type_map.GetByteSize()
if not data_size:
data_size = data_type_map.GetSizeHint()
while data_size != last_data_size:
read_offset = file_offset + last_data_size
read_size = data_size - last_data_size
data_segment = self._ReadData(file_object, read_offset, read_size)
data = b''.join([data, data_segment])
try:
context = dtfabric_data_maps.DataTypeMapContext()
structure_values_object = data_type_map.MapByteStream(
data, context=context)
return structure_values_object, data_size
except dtfabric_errors.ByteStreamTooSmallError:
pass
except dtfabric_errors.MappingError as exception:
raise errors.ParseError((
'Unable to map {0:s} data at offset: 0x{1:08x} with error: '
'{2!s}').format(data_type_map.name, file_offset, exception))
last_data_size = data_size
data_size = data_type_map.GetSizeHint(context=context)
raise errors.ParseError(
'Unable to read {0:s} at offset: 0x{1:08x}'.format(
data_type_map.name, file_offset))
@abc.abstractmethod
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
| kiddinn/plaso | plaso/parsers/dtfabric_parser.py | Python | apache-2.0 | 9,892 |
# -*- coding: utf-8 -*-
"""
WSGI config for Kiwi TCMS project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import tempfile
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tcms.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcms.settings.product")
os.environ["PYTHON_EGG_CACHE"] = tempfile.mkdtemp(prefix=".python-eggs-")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
_APPLICATION = get_wsgi_application()
def application(environ, start_response):
environ["PATH_INFO"] = environ["SCRIPT_NAME"] + environ["PATH_INFO"]
if environ["wsgi.url_scheme"] == "https":
environ["HTTPS"] = "on"
return _APPLICATION(environ, start_response)
| kiwitcms/Kiwi | tcms/wsgi.py | Python | gpl-2.0 | 1,656 |
# -*- coding: utf-8 -*-
#
# classjs documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 3 09:48:09 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'classjs'
copyright = u'2012, Angelo Dini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'classjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'classjs.tex', u'classjs Documentation',
u'Angelo Dini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'classjs', u'classjs Documentation',
[u'Angelo Dini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'classjs', u'classjs Documentation',
u'Angelo Dini', 'classjs', 'simple class based inheritance.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| ojii/classjs | docs/conf.py | Python | bsd-3-clause | 7,760 |
'''Deep Dreaming in Keras.
Run the script with:
```
python deep_dream.py path_to_your_base_image.jpg prefix_for_results
```
e.g.:
```
python deep_dream.py img/mypic.jpg results/dream
```
It is preferrable to run this script on GPU, for speed.
If running on CPU, prefer the TensorFlow backend (much faster).
Example results: http://i.imgur.com/FX6ROg9.jpg
'''
from __future__ import print_function
from scipy.misc import imread, imresize, imsave
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
import h5py
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import backend as K
parser = argparse.ArgumentParser(description='Deep Dreams with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
result_prefix = args.result_prefix
# dimensions of the generated picture.
img_width = 600
img_height = 600
# path to the model weights file.
weights_path = 'vgg16_weights.h5'
# some settings we found interesting
saved_settings = {
'bad_trip': {'features': {'conv4_1': 0.05,
'conv4_2': 0.01,
'conv4_3': 0.01},
'continuity': 0.1,
'dream_l2': 0.8,
'jitter': 5},
'dreamy': {'features': {'conv5_1': 0.05,
'conv5_2': 0.02},
'continuity': 0.1,
'dream_l2': 0.02,
'jitter': 0},
}
# the settings we will use in this experiment
settings = saved_settings['bad_trip']
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path):
img = imresize(imread(image_path), (img_width, img_height))
img = img.transpose((2, 0, 1)).astype('float64')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# this will contain our generated image
dream = K.placeholder((1, 3, img_width, img_height))
# build the VGG16 network with our dream as input
first_layer = ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))
first_layer.input = dream
model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# continuity loss util function
def continuity_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
return K.sum(K.pow(a + b, 1.25))
# define the loss
loss = K.variable(0.)
for layer_name in settings['features']:
# add the L2 norm of the features of a layer to the loss
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
x = layer_dict[layer_name].get_output()
shape = layer_dict[layer_name].output_shape
# we avoid border artifacts by only involving non-border pixels in the loss
loss -= coeff * K.sum(K.square(x[:, :, 2: shape[2]-2, 2: shape[3]-2])) / np.prod(shape[1:])
# add continuity loss (gives image local coherence, can result in an artful blur)
loss += settings['continuity'] * continuity_loss(dream) / (3 * img_width * img_height)
# add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
loss += settings['dream_l2'] * K.sum(K.square(dream)) / (3 * img_width * img_height)
# feel free to further modify the loss as you see fit, to achieve new effects...
# compute the gradients of the dream wrt the loss
grads = K.gradients(loss, dream)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([dream], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, 3, img_width, img_height))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the loss
x = preprocess_image(base_image_path)
for i in range(10):
print('Start of iteration', i)
start_time = time.time()
# add a random jitter to the initial image. This will be reverted at decoding time
random_jitter = (settings['jitter'] * 2) * (np.random.random((3, img_width, img_height)) - 0.5)
x += random_jitter
# run L-BFGS for 7 steps
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=7)
print('Current loss value:', min_val)
# decode the dream and save it
x = x.reshape((3, img_width, img_height))
x -= random_jitter
img = deprocess_image(x)
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
| dandxy89/ExperiKeras | Keras/Artwork/deep_dream.py | Python | mit | 8,598 |
from django.template.loader import make_origin
from django.template import TemplateDoesNotExist
from django.conf import settings
from djinja2.models import Jinja2Template
class Jinja2Loader(object):
skip_dirs = getattr(settings, 'KEEP_DJANGO_TEMPLATES', ())
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = self.get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def is_keep_django_template(self, template_name):
for prefix in self.skip_dirs:
if template_name.startswith(prefix):
return True
return False
def get_template_from_string(self, source, origin=None, name=None):
if name and self.is_keep_django_template(name):
raise TemplateDoesNotExist
return Jinja2Template(source, name)
| comoga/django-djinja2 | djinja2/loaders/__init__.py | Python | bsd-3-clause | 1,429 |
from amigocloud import AmigoCloud
amigocloud = AmigoCloud(token='<token>')
owner_id = 1 # Project Owner ID
project_id = 2 # Project ID
dataset_id = 3 # Dataset ID
amigocloud.listen_dataset_events(owner_id, project_id, dataset_id)
def realtime(data):
print 'Realtime dataset id=%(dataset_id)s' % data
for obj in data['data']:
print "Object '%(object_id)s' is now at (%(latitude)s, %(longitude)s)" % obj
amigocloud.add_callback('realtime', realtime)
amigocloud.start_listening()
| amigocloud/amigocloud_samples | python/listen_realtime_example.py | Python | mit | 500 |
# ============================================================================
#
# Copyright (c) 2007-2011 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
##
## apps.py
##
## This script contains functions for application deployments.
from java.io import File
import thread
import time
#=======================================================================================
# Load required modules
#=======================================================================================
try:
commonModule
except NameError:
execfile('ConfigNOW/common/common.py')
#=======================================================================================
# Global variables
#=======================================================================================
appsModule = '1.0.1'
log.debug('Loading module [apps.py] version [' + appsModule + ']')
#=======================================================================================
# Deploy applications
#=======================================================================================
def deployApps(componentProperties):
"""Deploys applications"""
applications = componentProperties.getProperty('applications')
if applications is None:
log.info('No applications to deploy')
else:
apps = applications.split(',')
for app in apps:
__deployApp('application.' + app, componentProperties)
#=======================================================================================
# Undeploy applications
#=======================================================================================
def undeployApps(componentProperties):
"""Deploys applications"""
applications = componentProperties.getProperty('applications')
if applications is None:
log.info('No applications to undeploy')
else:
apps = applications.split(',')
for app in apps:
__undeployApp('application.' + app, componentProperties=componentProperties)
#=======================================================================================
# Deploy an application
#=======================================================================================
def __deployApp(appPrefix, componentProperties):
"""Deploys an application"""
appName = componentProperties.getProperty(appPrefix + '.name')
appPath = componentProperties.getProperty(appPrefix + '.path')
targets = componentProperties.getProperty(appPrefix + '.targets')
isRemote = componentProperties.getProperty(appPrefix +'.isRemote')
if appPath is None or len(appPath)==0:
appPath = componentProperties.getProperty('applications.default.deploy.path')
appFile = appPath + File.separator + componentProperties.getProperty(appPrefix + '.file')
try:
if isRemote is not None and isRemote.upper()=='TRUE':
log.info('Deploying application Remotely: ' + appName)
progress = deploy(appName, appFile, targets, stageMode='stage',upload='true',remote='true')
else:
log.info('Deploying Application : '+appName)
progress = deploy(appName, appFile, targets)
#log.info('Deploying application: ' + appName)
progress.printStatus()
log.debug(str(appName) + ' has been deployed. Check state ' + str(appName) + '?=' + str(progress.getState()))
log.debug(str(appName) + ' has been deployed. Check if ' + str(appName) + ' is completed?=' + str(progress.isCompleted()))
log.debug(str(appName) + ' has been deployed. Check if ' + str(appName) + ' is running?=' + str(progress.isRunning()))
log.debug(str(appName) + ' has been deployed. Check if ' + str(appName) + ' is failed?=' + str(progress.isFailed()))
log.debug(str(appName) + ' has been deployed. Check message ' + str(appName) + '?=' + str(progress.getMessage()))
except Exception, error:
raise ScriptError, 'Unable to deploy application [' + appName + ']: ' + str(error)
#=======================================================================================
# Undeploy an application
#=======================================================================================
def __undeployApp(appPrefix, componentProperties):
"""Undeploys an application"""
appName = componentProperties.getProperty(appPrefix + '.name')
targets = componentProperties.getProperty(appPrefix + '.targets')
undeployTimeout = componentProperties.getProperty('applications.default.undeploy.timeout')
try:
__stopApp(appName)
log.info('Undeploying application: ' + appName)
progress = undeploy(appName, targets, timeout=undeployTimeout)
log.debug(str(appName) + ' has been undeployed. Check state ' + str(appName) + '?=' + str(progress.getState()))
log.debug(str(appName) + ' has been undeployed. Check if ' + str(appName) + ' is completed?=' + str(progress.isCompleted()))
log.debug(str(appName) + ' has been undeployed. Check if ' + str(appName) + ' is running?=' + str(progress.isRunning()))
log.debug(str(appName) + ' has been undeployed. Check if ' + str(appName) + ' is failed?=' + str(progress.isFailed()))
log.debug(str(appName) + ' has been undeployed. Check message ' + str(appName) + '?=' + str(progress.getMessage()))
if progress.isFailed():
if str(progress.getMessage()).find('Deployer:149001') == -1:
raise ScriptError, 'Unable to undeploy application [' + appName + ']: ' + str(progress.getMessage())
except Exception, error:
raise ScriptError, 'Unable to undeploy application [' + appName + ']: ' + str(error)
#=======================================================================================
# Stop an application
#=======================================================================================
def __stopApp(appName):
"""Stops an application"""
log.info('Stopping application: ' + appName)
try:
progress = stopApplication(appName)
log.debug('Is running? ' + str(progress.isRunning()))
except Exception, error:
raise ScriptError, 'Unable to stop application [' + appName + ']: ' + str(error)
| Integral-Technology-Solutions/ConfigNOW | wlst/apps.py | Python | mit | 6,847 |
from django.conf import settings
def all(request):
return {
'settings': settings,
'request': request,
}
| gitmill/gitmill | django/web/context_processors.py | Python | mit | 129 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of Channel Args on client/server side."""
import unittest
import grpc
TEST_CHANNEL_ARGS = (
('arg1', b'bytes_val'),
('arg2', 'str_val'),
('arg3', 1),
(b'arg4', 'str_val'),
)
class ChannelArgsTest(unittest.TestCase):
def test_client(self):
grpc.insecure_channel('localhost:8080', options=TEST_CHANNEL_ARGS)
def test_server(self):
grpc.server(None, options=TEST_CHANNEL_ARGS)
if __name__ == '__main__':
unittest.main(verbosity=2)
| jcanizales/grpc | src/python/grpcio_tests/tests/unit/_channel_args_test.py | Python | bsd-3-clause | 2,004 |
__author__ = 'quentin'
import unittest
class TestExple(unittest.TestCase):
def test_exple(self):
ans = 1
ref = 1
self.assertEqual(ans, ref)
| gilestrolab/ethoscope | src/ethoscope/tests/unittests/test_utils.py | Python | gpl-3.0 | 172 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import waypoints.models
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Waypoint',
fields=[
('fid', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=255, blank=True)),
('description', models.CharField(max_length=1000, blank=True)),
('elevation', models.FloatField(null=True, blank=True)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(null=True)),
('the_geom', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, blank=True)),
('visible', models.BooleanField(default=False)),
('route', models.ForeignKey(related_name='waypoints', to='routes.Route')),
],
options={
'db_table': 'waypoints',
'managed': True,
},
),
migrations.CreateModel(
name='WaypointMedia',
fields=[
('fid', models.AutoField(serialize=False, primary_key=True)),
('content_type', models.CharField(max_length=100)),
('filename', models.CharField(max_length=100)),
('size', models.IntegerField()),
('file', models.FileField(max_length=255, upload_to=waypoints.models.get_upload_path)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now=True)),
('waypoint', models.ForeignKey(related_name='waypoint_media', to='waypoints.Waypoint')),
],
options={
'db_table': 'waypoint_media',
'managed': True,
},
),
]
| bjohare/comap | waypoints/migrations/0001_initial.py | Python | gpl-3.0 | 2,051 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Locking functionality when launching things from the command line.
Uses a pidfile.
This prevents multiple identical workflows to be launched simultaneously.
"""
from __future__ import print_function
import errno
import hashlib
import os
import sys
from subprocess import Popen, PIPE
from luigi import six
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, )
with os.popen(cmd, 'r') as p:
lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
if lines:
_, val = lines
return val
elif sys.platform == "darwin":
# Use pgrep instead of /proc on macOS.
pidfile = ".%d.pid" % (pid, )
with open(pidfile, 'w') as f:
f.write(str(pid))
try:
p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE)
stdout, _ = p.communicate()
line = stdout.decode('utf8').strip()
if line:
_, scmd = line.split(' ', 1)
return scmd
finally:
os.unlink(pidfile)
else:
# Use the /proc filesystem
# At least on android there have been some issues with not all
# process infos being readable. In these cases using the `ps` command
# worked. See the pull request at
# https://github.com/spotify/luigi/pull/1876
try:
with open('/proc/{0}/cmdline'.format(pid), 'r') as fh:
if six.PY3:
return fh.read().replace('\0', ' ').rstrip()
else:
return fh.read().replace('\0', ' ').decode('utf8').rstrip()
except IOError:
# the system may not allow reading the command line
# of a process owned by another user
pass
# Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
return '[PROCESS_WITH_PID={}]'.format(pid)
def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
if my_pid is None:
my_pid = os.getpid()
my_cmd = getpcmd(my_pid)
cmd_hash = my_cmd.encode('utf8')
pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
def acquire_for(pid_dir, num_available=1, kill_signal=None):
"""
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_process --foo bar".
"""
my_pid, my_cmd, pid_file = get_info(pid_dir)
# Create a pid file if it does not exist
try:
os.mkdir(pid_dir)
os.chmod(pid_dir, 0o777)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
# Let variable "pids" be all pids who exist in the .pid-file who are still
# about running the same command.
pids = {pid for pid in _read_pids_file(pid_file) if getpcmd(pid) == my_cmd}
if kill_signal is not None:
for pid in pids:
os.kill(pid, kill_signal)
print('Sent kill signal to Pids: {}'.format(pids))
# We allow for the killer to progress, yet we don't want these to stack
# up! So we only allow it once.
num_available += 1
if len(pids) >= num_available:
# We are already running under a different pid
print('Pid(s) {} already running'.format(pids))
if kill_signal is not None:
print('Note: There have (probably) been 1 other "--take-lock"'
' process which continued to run! Probably no need to run'
' this one as well.')
return False
_write_pids_file(pid_file, pids | {my_pid})
return True
def _read_pids_file(pid_file):
# First setup a python 2 vs 3 compatibility
# http://stackoverflow.com/a/21368622/621449
try:
FileNotFoundError
except NameError:
# Should only happen on python 2
FileNotFoundError = IOError
# If the file happen to not exist, simply return
# an empty set()
try:
with open(pid_file, 'r') as f:
return {int(pid_str.strip()) for pid_str in f if pid_str.strip()}
except FileNotFoundError:
return set()
def _write_pids_file(pid_file, pids_set):
with open(pid_file, 'w') as f:
f.writelines('{}\n'.format(pid) for pid in pids_set)
# Make the .pid-file writable by all (when the os allows for it)
if os.name != 'nt':
s = os.stat(pid_file)
if os.getuid() == s.st_uid:
os.chmod(pid_file, s.st_mode | 0o777)
| Magnetic/luigi | luigi/lock.py | Python | apache-2.0 | 5,561 |
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TemplateLink(Model):
"""
Entity representing the reference to the template.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar uri: URI referencing the template. Default value:
"https://azuresdkci.blob.core.windows.net/templatehost/CreateVm_2016-07-19/azuredeploy.json"
.
:vartype uri: str
:param content_version: If included it must match the ContentVersion in
the template.
:type content_version: str
"""
_validation = {
'uri': {'required': True, 'constant': True},
}
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
}
uri = "https://azuresdkci.blob.core.windows.net/templatehost/CreateVm_2016-07-19/azuredeploy.json"
def __init__(self, content_version=None):
self.content_version = content_version
| BurtBiel/azure-cli | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/mgmt_vm/lib/models/template_link.py | Python | mit | 1,661 |
from shutit_module import ShutItModule
class test8(ShutItModule):
def build(self, shutit):
shutit.add_line_to_file(['asd'],'/tmp/asd')
shutit.add_line_to_file('asd2','/tmp/asd2')
shutit.add_line_to_file('asd2','/tmp/asd2')
shutit.add_line_to_file(['asd3','asd4'],'/tmp/asd2')
res = shutit.send_and_get_output("""wc -l /tmp/asd2 | awk '{print $1}'""")
if res != '3':
shutit.fail('expected 3') # pragma: no cover
return True
def module():
return test8(
'shutit.tk.test8.test8', 782914092.008,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
| ianmiell/shutit-test | test/docker_tests/8/test8.py | Python | mit | 584 |
"""
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Paramaters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
comb_iter = []
for x in range(len(input_sets) - iteration):
for y in range(x + 1, len(input_sets) - iteration):
comb_iter.append((x, y))
for curr in full_results:
cost, positions, remaining = curr
for con in comb_iter:
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Find cost
new_cost = _compute_size_by_dict(idx_contract, idx_dict)
if idx_removed:
new_cost *= 2
# Build (total_cost, positions, indices_remaining)
new_cost += cost
new_pos = positions + [con]
iter_results.append((new_cost, new_pos, new_input_sets))
# Update list to iterate over
full_results = iter_results
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
if len(input_sets) == 1:
return [(0,)]
path = []
for iteration in range(len(input_sets) - 1):
iteration_results = []
comb_iter = []
# Compute all unique pairs
for x in range(len(input_sets)):
for y in range(x + 1, len(input_sets)):
comb_iter.append((x, y))
for positions in comb_iter:
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
if _compute_size_by_dict(idx_result, idx_dict) > memory_limit:
continue
# Build sort tuple
removed_size = _compute_size_by_dict(idx_removed, idx_dict)
cost = _compute_size_by_dict(idx_contract, idx_dict)
sort = (-removed_size, cost)
# Add contraction to possible choices
iteration_results.append([sort, positions, new_input_sets])
# If we did not find a new contraction contract remaining
if len(iteration_results) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(iteration_results, key=lambda x: x[0])
path.append(best[1])
input_sets = best[2]
return path
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> __parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(len(operands[num].shape), 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as reprsented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', False)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d.",
input_subscripts[tnum], tnum)
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
if dimension_dict[char] != dim:
raise ValueError("Size of label '%s' for operand %d does "
"not match previous terms.", char, tnum)
else:
dimension_dict[char] = dim
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
size_list.append(_compute_size_by_dict(term, dimension_dict))
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isnt quite right, need to look into exactly how einsum does this
naive_cost = _compute_size_by_dict(indices, dimension_dict)
indices_in_input = input_subscripts.replace(',', '')
mult = max(len(input_list) - 1, 1)
if (len(indices_in_input) - len(set(indices_in_input))):
mult *= 2
naive_cost *= mult
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
# Maximum memory should be at most out_size for this algorithm
memory_arg = min(memory_arg, max_size)
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _compute_size_by_dict(idx_contract, dimension_dict)
if idx_removed:
cost *= 2
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
# Rewrite einsum to handle different cases
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : {ndarray, None}, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Default is False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
See ``np.einsum_path`` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, C)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
# Grab non-einsum kwargs
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
# If out was specified
if specified_out and ((num + 1) == len(contraction_list)):
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
| DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/core/einsumfunc.py | Python | mit | 35,518 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'date_order': fields.datetime.now(),
'state': 'draft',
'invoice_ids': [],
'date_confirm': False,
'client_order_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'sale.order'),
'procurement_group_id': False,
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if arg[1] == '=':
if arg[2]:
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', size=64, required=True,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', size=64, help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Reference/Description', size=64),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed."),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group'),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=None)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
payment_term = part.property_payment_term and part.property_payment_term.id or False
fiscal_position = part.property_account_position and part.property_account_position.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'fiscal_position': fiscal_position,
'user_id': dedicated_salesman,
}
if pricelist:
val['pricelist_id'] = pricelist
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order') or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context)['value']
vals = dict(defaults, **vals)
context.update({'mail_create_nolog': True})
new_id = super(sale_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=context)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_quotation_sent(cr, uid, ids)
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_manual_invoice(cr, uid, ids)
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
if context is None:
context = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context['date_invoice'] = date_invoice
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
for o, l in val:
invoice_ref += o.name + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
invoice.write(cr, uid, [res], {'origin': invoice_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
for r in self.read(cr, uid, ids, ['invoice_ids']):
account_invoice_obj.signal_invoice_cancel(cr, uid, r['invoice_ids'])
sale_order_line_obj.write(cr, uid, [l.id for l in sale.order_line],
{'state': 'cancel'})
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_order_confirm(cr, uid, ids)
# redisplay the record as a sales order
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not o.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id}, context=context)
for line in order.order_line:
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
proc_ids += [x.id for x in line.procurement_ids if x.state == 'exception']
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
proc_id = procurement_obj.create(cr, uid, vals, context=context)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
# if mode == 'finished':
# returns True if all lines are done, False otherwise
# if mode == 'canceled':
# returns True if there is at least one canceled line, False otherwise
def test_state(self, cr, uid, ids, mode, *args):
assert mode in ('finished', 'canceled'), _("invalid mode for test_state")
finished = True
canceled = False
write_done_ids = []
write_cancel_ids = []
for order in self.browse(cr, uid, ids, context={}):
#TODO: Need to rethink what happens when cancelling
for line in order.order_line:
states = [x.state for x in line.procurement_ids]
cancel = states and all([x == 'cancel' for x in states])
doneorcancel = all([x in ('done', 'cancel') for x in states])
if cancel:
canceled = True
if line.state != 'exception':
write_cancel_ids.append(line.id)
if not doneorcancel:
finished = False
if doneorcancel and not cancel:
write_done_ids.append(line.id)
if write_done_ids:
self.pool.get('sale.order.line').write(cr, uid, write_done_ids, {'state': 'done'})
if write_cancel_ids:
self.pool.get('sale.order.line').write(cr, uid, write_cancel_ids, {'state': 'exception'})
if mode == 'finished':
return finished
elif mode == 'canceled':
return canceled
def procurement_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.procurement_ids]
return res
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line}}
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
return False
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')], 'Status', required=True, readonly=True,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=context
)['value']
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state': 'draft', 'invoice_lines': [], 'procurement_ids': []})
return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
context = {'lang': lang, 'partner_id': partner_id}
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
context_partner = {'lang': lang, 'partner_id': partner_id}
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
result.update({'price_unit': price})
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_quotation_sent(cr, uid, [context['default_res_id']])
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
SaleOrderLine = self.pool['sale.order.line']
return {
product_id: SaleOrderLine.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| bealdav/OpenUpgrade | addons/sale/sale.py | Python | agpl-3.0 | 67,360 |
import collections
from typing import Any, Dict
from config import paths
Resolution = collections.namedtuple('Resolution', ['height', 'width'])
DATASET_ROOT = {'nuscenes': paths.DIR_NUSCENES}
TRAIN_RESOLUTION = {'nuscenes': Resolution(height=448, width=800)}
def _get_dataset_param(dataset: str, all_params: Dict[str, Any]):
if dataset in all_params:
return all_params[dataset]
raise ValueError(f'Unknown dataset: {dataset}')
def get_data_dir(dataset: str) -> str:
return _get_dataset_param(dataset, DATASET_ROOT)
def get_train_resolution(dataset: str) -> Resolution:
return _get_dataset_param(dataset, TRAIN_RESOLUTION)
| googleinterns/keypoint-mot | src/config/config.py | Python | apache-2.0 | 655 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.