repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/lily/translate.py | 1 | 101025 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: lily/translate.py
# Purpose: music21 classes for translating to Lilypond
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2007-2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
music21 translates to Lilypond format and if Lilypond is installed on the
local computer, can automatically generate .pdf, .png, and .svg versions
of musical files using Lilypond
this replaces (July 2012) the old LilyString() conversion methods.
'''
from __future__ import unicode_literals
import os
import subprocess
import sys
import re
# import threading
import unittest
from music21 import common
from music21 import duration
from music21 import environment
from music21 import exceptions21
from music21 import variant
from music21 import note
from music21.lily import lilyObjects as lyo
from music21.ext import six
_MOD = 'lily.translate2012.py'
environLocal = environment.Environment(_MOD)
try:
# optional imports for PIL
from PIL import Image
from PIL import ImageOps
noPIL = False
except ImportError:
try:
import Image
import ImageOps
noPIL = False
except ImportError:
noPIL = True
from music21 import corpus
### speed up tests! move to music21 base...
class _sharedCorpusTestObject(object):
sharedCache = {}
sharedCacheObject = _sharedCorpusTestObject()
def _getCachedCorpusFile(keyName):
#return corpus.parse(keyName)
if keyName not in sharedCacheObject.sharedCache:
sharedCacheObject.sharedCache[keyName] = corpus.parse(keyName)
return sharedCacheObject.sharedCache[keyName]
#b.parts[0].measure(4)[2].color = 'blue'#.rightBarline = 'double'
def makeLettersOnlyId(inputString):
'''
Takes an id and makes it purely letters by substituting
letters for all other characters.
>>> print(lily.translate.makeLettersOnlyId('rainbow123@@dfas'))
rainbowxyzmmdfas
'''
inputString = str(inputString)
returnString = ''
for c in inputString:
if not c.isalpha():
c = chr(ord(c) % 26 + 97)
returnString += c
return returnString
#-------------------------------------------------------------------------------
class LilypondConverter(object):
fictaDef = \
r'''
ficta = #(define-music-function (parser location) () #{ \once \set suggestAccidentals = ##t #})
'''.lstrip()
colorDef = \
r'''
color = #(define-music-function (parser location color) (string?) #{
\once \override NoteHead #'color = #(x11-color color)
\once \override Stem #'color = #(x11-color color)
\once \override Rest #'color = #(x11-color color)
\once \override Beam #'color = #(x11-color color)
#})
'''.lstrip()
simplePaperDefinitionScm = r'''
\paper { #(define dump-extents #t)
indent = 0\mm
force-assignment = #""
oddFooterMarkup=##f
oddHeaderMarkup=##f
bookTitleMarkup=##f
}
'''.lstrip()
transparencyStartScheme = r'''
\override Rest #'transparent = ##t
\override Dots #'transparent = ##t
'''.lstrip()
transparencyStopScheme = r'''
\revert Rest #'transparent
\revert Dots #'transparent
'''.lstrip()
bookHeader = r'''
\include "lilypond-book-preamble.ly"
'''.lstrip()
accidentalConvert = {"double-sharp": u"isis",
"double-flat": u"eses",
"one-and-a-half-sharp": u"isih",
"one-and-a-half-flat": u"eseh",
"sharp": u"is",
"flat": u"es",
"half-sharp": u"ih",
"half-flat": u"eh",
}
barlineDict = {'regular': '|',
'dotted': ':',
'dashed': 'dashed',
'heavy': '.', #??
'double': '||',
'final': '|.',
'heavy-light': '.|',
'heavy-heavy': '.|.',
'start-repeat': '|:',
'end-repeat': ':|',
# no music21 support for |.| lightHeavyLight yet
'tick': '\'',
#'short': '', # no lilypond support??
'none': '',
}
def __init__(self):
self.topLevelObject = lyo.LyLilypondTop()
self.setupTools()
self.context = self.topLevelObject
self.storedContexts = []
self.doNotOutput = []
self.currentMeasure = None
self.addedVariants = []
self.variantColors = ['blue', 'red', 'purple', 'green', 'orange', 'yellow', 'grey']
self.coloredVariants = False
self.variantMode = False
self.LILYEXEC = None
self.tempName = None
self.inWord = None
def findLilyExec(self):
if os.path.exists(environLocal['lilypondPath']):
LILYEXEC = environLocal['lilypondPath']
else:
if sys.platform == "darwin":
LILYEXEC = '/Applications/Lilypond.app/Contents/Resources/bin/lilypond'
if not os.path.exists(LILYEXEC):
LILYEXEC = 'lilypond'
elif sys.platform == 'win32' and os.path.exists('c:/Program Files (x86)'):
LILYEXEC = r'c:/Program\ Files\ (x86)/lilypond/usr/bin/lilypond'
if not os.path.exists(LILYEXEC) and not os.path.exists(LILYEXEC + '.exe'):
LILYEXEC = 'lilypond'
elif sys.platform == 'win32':
LILYEXEC = r'c:/Program\ Files/lilypond/usr/bin/lilypond'
if not os.path.exists(LILYEXEC) and not os.path.exists(LILYEXEC + '.exe'):
LILYEXEC = 'lilypond'
else:
LILYEXEC = 'lilypond'
self.LILYEXEC = LILYEXEC
return LILYEXEC
def setupTools(self):
LILYEXEC = self.findLilyExec()
command = [LILYEXEC, '--version']
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError:
raise LilyTranslateException("Cannot find a copy of Lilypond installed on your system. " +
"Please be sure it is installed. And that your " +
"environment.UserSettings()['lilypondPath'] is set to find it.")
stdout, unused = proc.communicate()
if six.PY3:
stdout = stdout.decode(encoding='utf-8')
versionString = stdout.split()[2]
versionPieces = versionString.split('.')
self.majorVersion = versionPieces[0]
self.minorVersion = versionPieces[1]
#self.majorVersion = 2 # this should be obtained from user and/or user's system
#self.minorVersion = 13
self.versionString = self.topLevelObject.backslash + "version " + self.topLevelObject.quoteString(str(self.majorVersion) + '.' + str(self.minorVersion))
self.versionScheme = lyo.LyEmbeddedScm(self.versionString)
self.headerScheme = lyo.LyEmbeddedScm(self.bookHeader)
self.backend = 'ps'
if int(self.majorVersion) >= 2:
if int(self.minorVersion) >= 11:
self.backendString = '-dbackend='
else:
self.backendString = '--backend='
else:
self.backendString = '--backend='
# I had a note that said 2.12 and > should use 'self.backendString = '--formats=' ' but doesn't seem true
def newContext(self, newContext):
self.storedContexts.append(self.context)
self.context = newContext
def restoreContext(self):
try:
self.context = self.storedContexts.pop()
except IndexError:
self.context = self.topLevelObject
#------------ Set a complete Lilypond Tree from a music21 object ----------#
def textFromMusic21Object(self, m21ObjectIn):
r'''
get a proper lilypond text file for writing from a music21 object
>>> n = note.Note()
>>> print(lily.translate.LilypondConverter().textFromMusic21Object(n))
\version "2..."
\include "lilypond-book-preamble.ly"
color = #(define-music-function (parser location color) (string?) #{
\once \override NoteHead #'color = #(x11-color color)
\once \override Stem #'color = #(x11-color color)
\once \override Rest #'color = #(x11-color color)
\once \override Beam #'color = #(x11-color color)
#})
\header { }
\score {
<< \new Staff = ... { c' 4
}
>>
}
\paper { }
...
'''
self.loadFromMusic21Object(m21ObjectIn)
s = str(self.topLevelObject)
s = re.sub(r'\s*\n\s*\n', '\n', s).strip()
return s
def loadFromMusic21Object(self, m21ObjectIn):
r'''
Create a Lilypond object hierarchy in self.topLevelObject from an
arbitrary music21 object.
TODO: make lilypond automatically run makeNotation.makeTupletBrackets(s)
TODO: Add tests...
'''
from music21 import stream
c = m21ObjectIn.classes
if 'Stream' in c:
if m21ObjectIn.recurse().variants:
## has variants so we need to make a deepcopy...
m21ObjectIn = variant.makeAllVariantsReplacements(m21ObjectIn, recurse = True)
m21ObjectIn.makeVariantBlocks()
if ('Stream' not in c) or ('Measure' in c) or ('Voice' in c):
scoreObj = stream.Score()
partObj = stream.Part()
# no need for measures or voices...
partObj.insert(0, m21ObjectIn)
scoreObj.insert(0, partObj)
self.loadObjectFromScore(scoreObj, makeNotation = False)
elif 'Part' in c:
scoreObj = stream.Score()
scoreObj.insert(0, m21ObjectIn)
self.loadObjectFromScore(scoreObj, makeNotation = False)
elif 'Score' in c:
self.loadObjectFromScore(m21ObjectIn, makeNotation = False)
elif 'Opus' in c:
self.loadObjectFromOpus(m21ObjectIn, makeNotation = False)
else: # treat as part...
scoreObj = stream.Score()
scoreObj.insert(0, m21ObjectIn)
self.loadObjectFromScore(scoreObj, makeNotation = False)
#raise LilyTranslateException("Unknown stream type %s." % (m21ObjectIn.__class__))
def loadObjectFromOpus(self, opusIn = None, makeNotation = True):
r'''
creates a filled topLevelObject (lily.lilyObjects.LyLilypondTop)
whose string representation accurately reflects all the Score objects
in this Opus object.
>>> #_DOCS_SHOW fifeOpus = corpus.parse('miscFolk/americanfifeopus.abc')
>>> #_DOCS_SHOW lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW lpc.loadObjectFromOpus(fifeOpus, makeNotation = False)
>>> #_DOCS_SHOW lpc.showPDF()
'''
contents = []
lpVersionScheme = self.versionScheme
lpHeaderScheme = self.headerScheme
lpColorScheme = lyo.LyEmbeddedScm(self.colorDef)
contents.append(lpVersionScheme)
contents.append(lpHeaderScheme)
contents.append(lpColorScheme)
for thisScore in opusIn.scores:
if makeNotation is True:
thisScore = thisScore.makeNotation(inPlace = False)
lpHeader = lyo.LyLilypondHeader()
lpScoreBlock = self.lyScoreBlockFromScore(thisScore)
if thisScore.metadata is not None:
self.setHeaderFromMetadata(thisScore.metadata, lpHeader = lpHeader)
contents.append(lpHeader)
contents.append(lpScoreBlock)
lpOutputDefHead = lyo.LyOutputDefHead(defType = 'paper')
lpOutputDefBody = lyo.LyOutputDefBody(outputDefHead = lpOutputDefHead)
lpOutputDef = lyo.LyOutputDef(outputDefBody = lpOutputDefBody)
contents.append(lpOutputDef)
lpLayout = lyo.LyLayout()
contents.append(lpLayout)
self.context.contents = contents
def loadObjectFromScore(self, scoreIn = None, makeNotation = True):
r'''
creates a filled topLevelObject (lily.lilyObjects.LyLilypondTop)
whose string representation accurately reflects this Score object.
>>> lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW b = corpus.parse('bach/bwv66.6')
>>> b = lily.translate._getCachedCorpusFile('bach/bwv66.6') #_DOCS_HIDE
>>> lpc.loadObjectFromScore(b)
>>> #print lpc.topLevelObject
'''
if makeNotation is True:
scoreIn = scoreIn.makeNotation(inPlace = False)
lpVersionScheme = self.versionScheme
lpHeaderScheme = self.headerScheme
lpColorScheme = lyo.LyEmbeddedScm(self.colorDef)
lpHeader = lyo.LyLilypondHeader()
# here's the heavy work...
lpScoreBlock = self.lyScoreBlockFromScore(scoreIn)
lpOutputDefHead = lyo.LyOutputDefHead(defType = 'paper')
lpOutputDefBody = lyo.LyOutputDefBody(outputDefHead = lpOutputDefHead)
lpOutputDef = lyo.LyOutputDef(outputDefBody = lpOutputDefBody)
lpLayout = lyo.LyLayout()
contents = [lpVersionScheme, lpHeaderScheme, lpColorScheme, lpHeader, lpScoreBlock, lpOutputDef, lpLayout]
if scoreIn.metadata is not None:
self.setHeaderFromMetadata(scoreIn.metadata, lpHeader = lpHeader)
self.context.contents = contents
#------- return Lily objects or append to the current context -----------#
def lyScoreBlockFromScore(self, scoreIn):
lpCompositeMusic = lyo.LyCompositeMusic()
self.newContext(lpCompositeMusic)
# Also get the variants, and the total number of measures here and make start each
# staff context with { \stopStaff s1*n} where n is the number of measures.
if hasattr(scoreIn, 'parts') and scoreIn.iter.parts: # or has variants
if scoreIn.recurse().variants:
lpPartsAndOssiaInit = self.lyPartsAndOssiaInitFromScore(scoreIn)
lpGroupedMusicList = self.lyGroupedMusicListFromScoreWithParts(
scoreIn,
scoreInit=lpPartsAndOssiaInit)
else:
lpGroupedMusicList = self.lyGroupedMusicListFromScoreWithParts(scoreIn)
lpCompositeMusic.groupedMusicList = lpGroupedMusicList
else:
# treat as a part...
lpPrefixCompositeMusic = self.lyPrefixCompositeMusicFromStream(scoreIn)
lpCompositeMusic.prefixCompositeMusic = lpPrefixCompositeMusic
lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
lpScoreBody = lyo.LyScoreBody(music = lpMusic)
lpScoreBlock = lyo.LyScoreBlock(scoreBody = lpScoreBody)
self.restoreContext()
return lpScoreBlock
def lyPartsAndOssiaInitFromScore(self, scoreIn):
r'''
Takes in a score and returns a block that starts each part context and variant context
with an identifier and {\stopStaff s1*n} (or s, whatever is needed for the duration)
where n is the number of measures in the score.
>>> import copy
Set up score:
>>> s = stream.Score()
>>> p1,p2 = stream.Part(), stream.Part()
>>> p1.insert(0, meter.TimeSignature('4/4'))
>>> p2.insert(0, meter.TimeSignature('4/4'))
>>> p1.append(variant.Variant(name = 'london'))
>>> p2.append(variant.Variant(name = 'london'))
>>> p1.append(variant.Variant(name = 'rome'))
>>> p2.append(variant.Variant(name = 'rome'))
>>> for i in range(4):
... m = stream.Measure()
... n = note.Note('D4', type='whole')
... m.append(n)
... p1.append(m)
... p2.append(copy.deepcopy(m))
>>> p1.id = 'pa'
>>> p2.id = 'pb'
>>> s.append(p1)
>>> s.append(p2)
Run method
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.lyPartsAndOssiaInitFromScore(s))
\new Staff = pa { \stopStaff s1 s1 s1 s1 }
\new Staff = londonpa
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pa"
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = romepa
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pa"
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = pb { \stopStaff s1 s1 s1 s1 }
\new Staff = londonpb
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pb...
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
\new Staff = romepb
\with {
\remove "Time_signature_engraver"
alignAboveContext = #"pb...
fontSize = #-3
\override StaffSymbol #'staff-space = #(magstep -3)
\override StaffSymbol #'thickness = #(magstep -3)
\override TupletBracket #'bracket-visibility = ##f
\override TupletNumber #'stencil = ##f
\override Clef #'transparent = ##t
\override OctavateEight #'transparent = ##t
\consists "Default_bar_line_engraver"
}
{ \stopStaff s1 s1 s1 s1 }
'''
lpMusicList = lyo.LyMusicList()
musicList = []
lpMusic = r'{ \stopStaff %s}'
for p in scoreIn.parts:
partIdText = makeLettersOnlyId(p.id)
partId = lyo.LyOptionalId(partIdText)
spacerDuration = self.getLySpacersFromStream(p)
lpPrefixCompositeMusicPart = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = partId,
simpleString = 'Staff',
music = lpMusic % spacerDuration)
musicList.append(lpPrefixCompositeMusicPart)
variantsAddedForPart = []
for v in p.variants:
variantName = v.groups[0]
if not variantName in variantsAddedForPart:
self.addedVariants.append(variantName)
variantsAddedForPart.append(variantName)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+partIdText)
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = 'Staff',
music = lpMusic % spacerDuration)
contextModList = [r'\remove "Time_signature_engraver"',
r'alignAboveContext = #"%s"' % partIdText,
r'fontSize = #-3',
r"\override StaffSymbol #'staff-space = #(magstep -3)",
r"\override StaffSymbol #'thickness = #(magstep -3)",
r"\override TupletBracket #'bracket-visibility = ##f",
r"\override TupletNumber #'stencil = ##f",
r"\override Clef #'transparent = ##t",
r"\override OctavateEight #'transparent = ##t",
r'\consists "Default_bar_line_engraver"',
]
optionalContextMod = lyo.LyContextModification(contextModList)
lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
musicList.append(lpPrefixCompositeMusicVariant)
lpMusicList.contents = musicList
return lpMusicList
def getLySpacersFromStream(self, streamIn, measuresOnly = True):
'''
Creates a series of Spacer objects for the measures in a Stream Part.
>>> m1 = stream.Measure(converter.parse("tinynotation: 3/4 a2."))
>>> m2 = stream.Measure(converter.parse("tinynotation: 3/4 b2."))
>>> m3 = stream.Measure(converter.parse("tinynotation: 4/4 a1"))
>>> m4 = stream.Measure(converter.parse("tinynotation: 4/4 b1"))
>>> m5 = stream.Measure(converter.parse("tinynotation: 4/4 c1"))
>>> m6 = stream.Measure(converter.parse("tinynotation: 5/4 a4 b1"))
>>> streamIn = stream.Stream([m1, m2, m3, m4, m5, m6])
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.getLySpacersFromStream(streamIn))
s2. s2. s1 s1 s1 s1 s4
TODO: Low-priority... rare, but possible: tuplet time signatures (3/10)...
'''
returnString = ''
#mostRecentDur = ''
#recentDurCount = 0
for el in streamIn:
if not "Measure" in el.classes:
continue
if el.duration.quarterLength == 0.0:
continue
try:
dur = str(self.lyMultipliedDurationFromDuration(el.duration))
returnString = returnString + 's'+ dur
# general exception is the only way to catch str exceptions
except: #pylint: disable=bare-except
for c in el.duration.components:
dur = str(self.lyMultipliedDurationFromDuration(c))
returnString = returnString + 's'+ dur
#if dur == mostRecentDur:
# recentDurCount += 1
#else:
# mostRecentDur = dur
# recentDurCount = 0
#if recentDurCount != 0:
# returnString = returnString + '*' + str(recentDurCount)
return returnString
def lyGroupedMusicListFromScoreWithParts(self, scoreIn, scoreInit = None):
r'''
More complex example showing how the score can be set up with ossia parts...
>>> lpc = lily.translate.LilypondConverter()
>>> #_DOCS_SHOW b = corpus.parse('bach/bwv66.6')
>>> b = lily.translate._getCachedCorpusFile('bach/bwv66.6') #_DOCS_HIDE
>>> lpPartsAndOssiaInit = lpc.lyPartsAndOssiaInitFromScore(b)
>>> lpGroupedMusicList = lpc.lyGroupedMusicListFromScoreWithParts(b, scoreInit = lpPartsAndOssiaInit)
>>> print(lpGroupedMusicList)
<BLANKLINE>
<< \new Staff = Soprano { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Alto { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Tenor { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
\new Staff = Bass { \stopStaff s4 s1 s1 s1 s1 s1 s1 s1 s1 s2. }
<BLANKLINE>
\context Staff = Soprano \with {
\autoBeamOff
}
{ \startStaff \partial 32*8
\clef "treble"
\key fis \minor
\time 4/4
\set stemRightBeamCount = #1
\once \override Stem #'direction = #DOWN
cis'' 8 [
\set stemLeftBeamCount = #1
\once \override Stem #'direction = #DOWN
b... 8 ]
\bar "|" %{ end measure 0 %}
\once \override Stem #'direction = #UP
a' 4
\once \override Stem #'direction = #DOWN
b... 4
\once \override Stem #'direction = #DOWN
cis'' 4 \fermata
\once \override Stem #'direction = #DOWN
e'' 4
\bar "|" %{ end measure 1 %}
\once \override Stem #'direction = #DOWN
cis'' 4
...
}
<BLANKLINE>
<BLANKLINE>
\context Staff = Alto \with {
\autoBeamOff
}
{ \startStaff \partial 32*8
\clef "treble"...
\once \override Stem #'direction = #UP
e' 4
\bar "|" %{ end measure 0 %}
\once \override Stem #'direction = #UP
fis' 4
\once \override Stem #'direction = #UP
e' 4
...
}
<BLANKLINE>
<BLANKLINE>
>>
<BLANKLINE>
'''
compositeMusicList = []
lpGroupedMusicList = lyo.LyGroupedMusicList()
lpSimultaneousMusic = lyo.LySimultaneousMusic()
lpMusicList = lyo.LyMusicList()
lpSimultaneousMusic.musicList = lpMusicList
lpGroupedMusicList.simultaneousMusic = lpSimultaneousMusic
self.newContext(lpMusicList)
if scoreInit is None:
for p in scoreIn.parts:
compositeMusicList.append(self.lyPrefixCompositeMusicFromStream(p))
else:
compositeMusicList.append(scoreInit)
for p in scoreIn.parts:
compositeMusicList.append(self.lyPrefixCompositeMusicFromStream(p, type='context', beforeMatter = 'startStaff'))
self.restoreContext()
lpMusicList.contents = compositeMusicList
return lpGroupedMusicList
def lyNewLyricsFromStream(self, streamIn, streamId = None, alignment = 'alignBelowContext' ):
r'''
returns a LyNewLyrics object
This is a little bit of a hack. This should be switched over to using a
prefixed context thing with \new Lyric = "id" \with { } {}
>>> s = converter.parse('tinyNotation: 4/4 c4_hel- d4_-lo r4 e4_world')
>>> s.makeMeasures(inPlace = True)
>>> s.id = 'helloWorld'
>>> lpc = lily.translate.LilypondConverter()
>>> lyNewLyrics = lpc.lyNewLyricsFromStream(s)
>>> print(lyNewLyrics)
\addlyrics { \set alignBelowContext = #"helloWorld"
"hel" --
"lo"__
"world"
}
'''
lyricsDict = streamIn.lyrics(skipTies = True)
if streamId is None:
streamId = makeLettersOnlyId(streamIn.id)
streamId = "#"+ lyo.LyObject().quoteString(streamId)
lpGroupedMusicLists = []
for lyricNum in sorted(lyricsDict):
lyricList = []
lpAlignmentProperty = lyo.LyPropertyOperation(mode = 'set', value1 = alignment, value2 = streamId)
lyricList.append(lpAlignmentProperty)
self.inWord = False
for el in lyricsDict[lyricNum]:
lpLyricElement = self.lyLyricElementFromM21Lyric(el)
lyricList.append(lpLyricElement)
self.inWord = False
lpLyricList = lyo.LyMusicList(lyricList)
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpLyricList)
lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
lpGroupedMusicLists.append(lpGroupedMusicList)
lpNewLyrics = lyo.LyNewLyrics(groupedMusicLists = lpGroupedMusicLists)
return lpNewLyrics
def lyLyricElementFromM21Lyric(self, m21Lyric):
'''
Returns a :class:`~music21.lily.lilyObjects.LyLyricElement` object
from a :class:`~music21.note.Lyric` object.
Uses self.inWord to keep track of whether or not we're in the middle of
a word.
>>> s = converter.parse('tinyNotation: 4/4 c4_hel- d4_-lo r2 e2 f2_world')
>>> s.makeMeasures(inPlace = True)
>>> lyrics = s.lyrics()[1] # get first verse (yes, 1 = first, not 0!)
>>> lpc = lily.translate.LilypondConverter()
>>> lpc.lyLyricElementFromM21Lyric(lyrics[0])
<music21.lily.lilyObjects.LyLyricElement object...'"hel" --'>
>>> lpc.inWord
True
>>> lpc.lyLyricElementFromM21Lyric(lyrics[1])
<music21.lily.lilyObjects.LyLyricElement object...'"lo"__'>
>>> lpc.lyLyricElementFromM21Lyric(lyrics[2])
<music21.lily.lilyObjects.LyLyricElement object...' _ '>
>>> lpc.lyLyricElementFromM21Lyric(lyrics[3])
<music21.lily.lilyObjects.LyLyricElement object...'"world"'>
>>> lpc.inWord
False
'''
if hasattr(self, 'inWord'):
inWord = self.inWord
else:
inWord = False
el = m21Lyric
if el is None and inWord:
text = ' _ '
elif el is None and inWord is False:
text = ' _ '
elif el.text == '':
text = ' _ '
else:
text = '"' + el.text + '"'
if el.syllabic == 'end':
text = text + '__'
inWord = False
elif el.syllabic == 'begin' or el.syllabic == 'middle':
text = text + ' --'
inWord = True
else:
text = text
self.inWord = inWord
lpLyricElement = lyo.LyLyricElement(text)
return lpLyricElement
def lySequentialMusicFromStream(self, streamIn, beforeMatter = None):
r'''
returns a LySequentialMusic object from a stream
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> lpc = lily.translate.LilypondConverter()
>>> lySequentialMusicOut = lpc.lySequentialMusicFromStream(c)
>>> lySequentialMusicOut
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> print(lySequentialMusicOut)
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
'''
musicList = []
lpMusicList = lyo.LyMusicList(contents = musicList)
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList, beforeMatter = beforeMatter)
self.newContext(lpMusicList)
self.appendObjectsToContextFromStream(streamIn)
lyObject = self.closeMeasure()
if lyObject is not None:
musicList.append(lyObject)
self.restoreContext()
return lpSequentialMusic
def lyPrefixCompositeMusicFromStream(self, streamIn, contextType = None, type = None, beforeMatter = None): #@ReservedAssignment
r'''
returns an LyPrefixCompositeMusic object from
a stream (generally a part, but who knows...)
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> c.staffLines = 4
>>> lpc = lily.translate.LilypondConverter()
>>> lyPrefixCompositeMusicOut = lpc.lyPrefixCompositeMusicFromStream(c, contextType='Staff')
>>> lyPrefixCompositeMusicOut
<music21.lily.lilyObjects.LyPrefixCompositeMusic object at 0x...>
>>> print(lyPrefixCompositeMusicOut)
\new Staff = ... \with {
\override StaffSymbol #'line-count = #4
}
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
<BLANKLINE>
'''
compositeMusicType = type
optionalId = None
contextModList = []
c = streamIn.classes
if contextType is None:
if 'Part' in c:
newContext = 'Staff'
optionalId = lyo.LyOptionalId(makeLettersOnlyId(streamIn.id))
elif 'Voice' in c:
newContext = 'Voice'
else:
newContext = 'Voice'
else:
newContext = contextType
optionalId = lyo.LyOptionalId(makeLettersOnlyId(streamIn.id))
if streamIn.streamStatus.haveBeamsBeenMade() is True:
contextModList.append(r"\autoBeamOff ")
if hasattr(streamIn, 'staffLines') and streamIn.staffLines != 5:
contextModList.append(r"\override StaffSymbol #'line-count = #%d" % streamIn.staffLines)
if streamIn.staffLines % 2 == 0: # even stafflines need a change...
pass
lpNewLyrics = self.lyNewLyricsFromStream(streamIn, streamId = makeLettersOnlyId(streamIn.id))
lpSequentialMusic = self.lySequentialMusicFromStream(streamIn, beforeMatter = beforeMatter)
lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
lpCompositeMusic = lyo.LyCompositeMusic(groupedMusicList = lpGroupedMusicList, newLyrics = lpNewLyrics)
lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
if compositeMusicType is None:
compositeMusicType = 'new'
if contextModList:
contextMod = lyo.LyContextModification(contextModList)
else:
contextMod = None
lpPrefixCompositeMusic = lyo.LyPrefixCompositeMusic(type = compositeMusicType,
optionalId = optionalId,
simpleString = newContext,
optionalContextMod = contextMod,
music = lpMusic)
return lpPrefixCompositeMusic
def appendObjectsToContextFromStream(self, streamObject):
r'''
takes a Stream and appends all the elements in it to the current
context's .contents list, and deals with creating Voices in it. It also deals with
variants in it.
(should eventually replace the main Score parts finding tools)
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> c = converter.parse('tinynotation: 3/4 c4 d- e#')
>>> lpc.appendObjectsToContextFromStream(c)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm...>, <music21.lily.lilyObjects.LySimpleMusic...>, <music21.lily.lilyObjects.LySimpleMusic...>, <music21.lily.lilyObjects.LySimpleMusic...]
>>> print(lpc.context)
\clef "treble"
\time 3/4
c' 4
des' 4
eis' 4
<BLANKLINE>
>>> v1 = stream.Voice()
>>> v1.append(note.Note("C5", quarterLength = 4.0))
>>> v2 = stream.Voice()
>>> v2.append(note.Note("C#5", quarterLength = 4.0))
>>> m = stream.Measure()
>>> m.insert(0, v1)
>>> m.insert(0, v2)
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.appendObjectsToContextFromStream(m)
>>> print(lpc.context) # internal spaces removed...
<< \new Voice { c'' 1
\bar "|." %{ end measure 1 %}
}
\new Voice { cis'' 1
}
>>
'''
for groupedElements in streamObject.groupElementsByOffset():
#print groupedElements
if len(groupedElements) == 1: # one thing at that moment...
el = groupedElements[0]
el.activeSite = streamObject
self.appendM21ObjectToContext(el)
else: # voices or other More than one thing at once...
# if voices
voiceList = []
variantList = []
otherList = []
for el in groupedElements:
if 'Voice' in el.classes:
voiceList.append(el)
elif 'Variant' in el.classes:
variantList.append(el)
else:
el.activeSite = streamObject
otherList.append(el)
if len(variantList) > 0:
for v in variantList:
v.activeSite = streamObject
self.appendContextFromVariant(variantList, activeSite = streamObject, coloredVariants = self.coloredVariants)
if len(voiceList) > 0:
musicList2 = []
lp2GroupedMusicList = lyo.LyGroupedMusicList()
lp2SimultaneousMusic = lyo.LySimultaneousMusic()
lp2MusicList = lyo.LyMusicList()
lp2SimultaneousMusic.musicList = lp2MusicList
lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
for voice in voiceList:
if voice not in self.doNotOutput:
lpPrefixCompositeMusic = self.lyPrefixCompositeMusicFromStream(voice)
musicList2.append(lpPrefixCompositeMusic)
lp2MusicList.contents = musicList2
contextObject = self.context
currentMusicList = contextObject.contents
currentMusicList.append(lp2GroupedMusicList)
lp2GroupedMusicList.setParent(self.context)
if len(otherList) > 0:
for el in otherList:
self.appendM21ObjectToContext(el)
def appendM21ObjectToContext(self, thisObject):
'''
converts any type of object into a lilyObject of LyMusic (
LySimpleMusic, LyEmbeddedScm etc.) type
'''
if thisObject in self.doNotOutput:
return
### treat complex duration objects as multiple objects
c = thisObject.classes
if 'Stream' not in c and thisObject.duration.type == 'complex':
thisObjectSplit = thisObject.splitAtDurations()
for subComponent in thisObjectSplit:
self.appendM21ObjectToContext(subComponent)
return
contextObject = self.context
if hasattr(contextObject, 'contents'):
currentMusicList = contextObject.contents
else:
raise LilyTranslateException("Cannot get a currentMusicList from contextObject %r" % contextObject)
if hasattr(thisObject, 'startTransparency') and thisObject.startTransparency is True:
# old hack, replace with the better "hidden" attribute
lyScheme = lyo.LyEmbeddedScm(self.transparencyStartScheme)
currentMusicList.append(lyScheme)
lyObject = None
if "Measure" in c:
## lilypond does not put groups around measures...
## it does however need barline ends
## also, if variantMode is True, the last note in each "measure" should have \noBeam
closeMeasureObj = self.closeMeasure() # could be None
if closeMeasureObj is not None:
currentMusicList.append(closeMeasureObj)
closeMeasureObj.setParent(contextObject)
padObj = self.getSchemeForPadding(thisObject)
if padObj is not None:
currentMusicList.append(padObj)
padObj.setParent(contextObject)
## here we go!
self.appendObjectsToContextFromStream(thisObject)
self.currentMeasure = thisObject
elif "Stream" in c:
#try:
lyObject = self.lyPrefixCompositeMusicFromStream(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
#except AttributeError as ae:
# raise Exception("Cannot parse %s: %s" % (thisObject, str(ae)))
elif "Note" in c or "Rest" in c:
self.appendContextFromNoteOrRest(thisObject)
elif "Chord" in c:
lyObject = self.lySimpleMusicFromChord(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "Clef" in c:
lyObject = self.lyEmbeddedScmFromClef(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "KeySignature" in c:
lyObject = self.lyEmbeddedScmFromKeySignature(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "TimeSignature" in c and self.variantMode is False:
lyObject = self.lyEmbeddedScmFromTimeSignature(thisObject)
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "Variant" in c:
self.appendContextFromVariant(thisObject, coloredVariants=self.coloredVariants)
elif "SystemLayout" in c:
lyObject = lyo.LyEmbeddedScm(r'\break')
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
elif "PageLayout" in c:
lyObject = lyo.LyEmbeddedScm(r'\pageBreak')
currentMusicList.append(lyObject)
lyObject.setParent(contextObject)
else:
lyObject = None
if hasattr(thisObject, 'stopTransparency') and thisObject.stopTransparency is True:
# old hack, replace with the better "hidden" attribute
lyScheme = lyo.LyEmbeddedScm(self.transparencyStopScheme)
currentMusicList.append(lyScheme)
def appendContextFromNoteOrRest(self, noteOrRest):
r'''
appends lySimpleMusicFromNoteOrRest to the
current context.
>>> n = note.Note("C#4")
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.appendContextFromNoteOrRest(n)
>>> print(lpMusicList)
cis' 4
<BLANKLINE>
>>> n2 = note.Note("D#4")
>>> n2.duration.quarterLength = 1.0/3
>>> n2.duration.tuplets[0].type = 'start'
>>> n3 = note.Note("E4")
>>> n3.duration.quarterLength = 1.0/3
>>> n4 = note.Note("F4")
>>> n4.duration.quarterLength = 1.0/3
>>> n4.duration.tuplets[0].type = 'stop'
>>> n5 = note.Note("F#4")
>>> lpc.appendContextFromNoteOrRest(n2)
>>> lpc.appendContextFromNoteOrRest(n3)
>>> lpc.appendContextFromNoteOrRest(n4)
>>> lpc.appendContextFromNoteOrRest(n5)
>>> print(lpc.context)
cis' 4
\times 2/3 { dis' 8
e' 8
f' 8
}
<BLANKLINE>
fis' 4
<BLANKLINE>
'''
# commented out until complete
# if self.variantMode is True:
# #TODO: attach \noBeam to note if it is the last note
# if "NotRest" in noteOrRest.classes:
# n = noteOrRest
# activeSite = n.activeSite
# offset = n.offset
# # failed at least once...
# if offset + n.duration.quarterLength == activeSite.duration.quarterLength:
# pass
self.setContextForTupletStart(noteOrRest)
self.appendBeamCode(noteOrRest)
self.appendStemCode(noteOrRest)
lpSimpleMusic = self.lySimpleMusicFromNoteOrRest(noteOrRest)
self.context.contents.append(lpSimpleMusic)
lpSimpleMusic.setParent(self.context)
self.setContextForTupletStop(noteOrRest)
def lySimpleMusicFromNoteOrRest(self, noteOrRest):
r'''
returns a lilyObjects.LySimpleMusic object for the generalNote containing...
LyEventChord containing
LySimpleChordElements containing
LySimpleElement containing
LyPitch AND
LyMultipliedDuration containing:
LyMultipliedDuration containing
LyStenoDuration
does not check for tuplets. That's in
appendContextFromNoteOrRest
read-only property that returns a string of the lilypond representation of
a note (or via subclassing, rest or chord)
>>> conv = lily.translate.LilypondConverter()
>>> n0 = note.Note("D#5")
>>> n0.pitch.accidental.displayType = 'always'
>>> n0.pitch.accidental.displayStyle = 'parentheses'
>>> n0.editorial.color = 'blue'
>>> sm = conv.lySimpleMusicFromNoteOrRest(n0)
>>> print(sm)
\color "blue" dis'' ! ? 4
Now make the note disappear...
>>> n0.hideObjectOnPrint = True
>>> sm = conv.lySimpleMusicFromNoteOrRest(n0)
>>> print(sm)
s 4
'''
c = noteOrRest.classes
simpleElementParts = []
if noteOrRest._editorial is not None:
if noteOrRest.editorial.color and noteOrRest.hideObjectOnPrint is not True:
simpleElementParts.append(noteOrRest.editorial.colorLilyStart())
if 'Note' in c:
if noteOrRest.hideObjectOnPrint is not True:
lpPitch = self.lyPitchFromPitch(noteOrRest.pitch)
simpleElementParts.append(lpPitch)
if noteOrRest.pitch.accidental is not None:
if noteOrRest.pitch.accidental.displayType == 'always':
simpleElementParts.append('! ')
if noteOrRest.pitch.accidental.displayStyle == 'parentheses':
simpleElementParts.append('? ')
else:
simpleElementParts.append("s ")
elif "SpacerRest" in c:
simpleElementParts.append("s ")
elif 'Rest' in c:
if noteOrRest.hideObjectOnPrint is True:
simpleElementParts.append("s ")
else:
simpleElementParts.append("r ")
lpMultipliedDuration = self.lyMultipliedDurationFromDuration(noteOrRest.duration)
simpleElementParts.append(lpMultipliedDuration)
if 'NotRest' in c and noteOrRest.beams is not None and len(noteOrRest.beams) > 0:
if noteOrRest.beams.beamsList[0].type == 'start':
simpleElementParts.append("[ ")
elif noteOrRest.beams.beamsList[0].type == 'stop':
simpleElementParts.append("] ") # no start-stop in music21...
simpleElement = lyo.LySimpleElement(parts = simpleElementParts)
postEvents = self.postEventsFromObject(noteOrRest)
evc = lyo.LyEventChord(simpleElement, postEvents = postEvents)
mlSM = lyo.LySimpleMusic(eventChord = evc)
return mlSM
def appendBeamCode(self, noteOrChord):
r'''
Adds an LyEmbeddedScm object to the context's contents if the object's has a .beams
attribute.
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> n1 = note.Note(quarterLength = 0.25)
>>> n2 = note.Note(quarterLength = 0.25)
>>> n1.beams.fill(2, 'start')
>>> n2.beams.fill(2, 'stop')
>>> lpc.appendBeamCode(n1)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context)
\set stemRightBeamCount = #2
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> lpc.appendBeamCode(n2)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context)
\set stemLeftBeamCount = #2
'''
leftBeams = 0
rightBeams = 0
if hasattr(noteOrChord, 'beams'):
if noteOrChord.beams is not None:
for b in noteOrChord.beams:
if b.type == 'start':
rightBeams += 1
elif b.type == 'continue':
rightBeams += 1
leftBeams += 1
elif b.type == 'stop':
leftBeams += 1
elif b.type == 'partial':
if b.direction == 'left':
leftBeams += 1
else: # better wrong direction than none
rightBeams += 1
if leftBeams > 0:
beamText = r'''\set stemLeftBeamCount = #%d''' % leftBeams
lpBeamScheme = lyo.LyEmbeddedScm(beamText)
self.context.contents.append(lpBeamScheme)
lpBeamScheme.setParent(self.context)
if rightBeams > 0:
beamText = r'''\set stemRightBeamCount = #%d''' % rightBeams
lpBeamScheme = lyo.LyEmbeddedScm(beamText)
self.context.contents.append(lpBeamScheme)
lpBeamScheme.setParent(self.context)
def appendStemCode(self, noteOrChord):
r'''
Adds an LyEmbeddedScm object to the context's contents if the object's stem direction
is set (currrently, only "up" and "down" are supported).
>>> lpc = lily.translate.LilypondConverter()
>>> lpMusicList = lily.lilyObjects.LyMusicList()
>>> lpc.context = lpMusicList
>>> lpc.context.contents
[]
>>> n = note.Note()
>>> n.stemDirection = 'up'
>>> lpc.appendStemCode(n)
>>> print(lpc.context.contents)
[<music21.lily.lilyObjects.LyEmbeddedScm object at 0x...>]
>>> print(lpc.context.contents[0])
\once \override Stem #'direction = #UP
'''
if hasattr(noteOrChord, 'stemDirection') and noteOrChord.stemDirection is not None:
stemDirection = noteOrChord.stemDirection.upper()
if stemDirection in ['UP', 'DOWN']:
stemFile = r'''\once \override Stem #'direction = #%s ''' % stemDirection
lpStemScheme = lyo.LyEmbeddedScm(stemFile)
self.context.contents.append(lpStemScheme)
lpStemScheme.setParent(self.context)
def lySimpleMusicFromChord(self, chordObj):
'''
>>> conv = lily.translate.LilypondConverter()
>>> c1 = chord.Chord(["C#2", "E4", "D#5"])
>>> c1.quarterLength = 3.5
>>> c1.pitches[2].accidental.displayType = 'always'
>>> print(conv.lySimpleMusicFromChord(c1))
< cis, e' dis'' ! > 2..
test hidden chord:
>>> c1.hideObjectOnPrint = True
>>> print(conv.lySimpleMusicFromChord(c1))
s 2..
'''
self.appendBeamCode(chordObj)
if chordObj.hideObjectOnPrint is not True:
self.appendStemCode(chordObj)
chordBodyElements = []
for p in chordObj.pitches:
chordBodyElementParts = []
lpPitch = self.lyPitchFromPitch(p)
chordBodyElementParts.append(lpPitch)
if p.accidental is not None:
if p.accidental.displayType == 'always':
chordBodyElementParts.append('! ')
if p.accidental.displayStyle == 'parentheses':
chordBodyElementParts.append('? ')
lpChordElement = lyo.LyChordBodyElement(parts = chordBodyElementParts)
chordBodyElements.append(lpChordElement)
lpChordBody = lyo.LyChordBody(chordBodyElements = chordBodyElements)
else:
lpChordBody = lyo.LyPitch('s ', '')
lpMultipliedDuration = self.lyMultipliedDurationFromDuration(chordObj.duration)
postEvents = self.postEventsFromObject(chordObj)
lpNoteChordElement = lyo.LyNoteChordElement(chordBody = lpChordBody,
optionalNoteModeDuration = lpMultipliedDuration,
postEvents = postEvents)
evc = lyo.LyEventChord(noteChordElement = lpNoteChordElement)
mlSM = lyo.LySimpleMusic(eventChord = evc)
return mlSM
# TODO: Chord beaming...
def postEventsFromObject(self, generalNote):
'''
attaches events that apply to notes and chords (and some other things) equally
'''
postEvents = []
# remove this hack once lyrics work
#if generalNote.lyric is not None: # hack that uses markup...
# postEvents.append(r'_\markup { "' + generalNote.lyric + '" }\n ')
# consider this hack removed. Yeah!
if (hasattr(generalNote, 'tie') and generalNote.tie is not None):
if (generalNote.tie.type != "stop"):
postEvents.append("~ ")
if (hasattr(generalNote, 'expressions') and generalNote.expressions):
for thisExpression in generalNote.expressions:
if 'Fermata' in thisExpression.classes:
postEvents.append(r'\fermata ')
return postEvents
def lyPitchFromPitch(self, pitch):
'''
converts a music21.pitch.Pitch object to a lily.lilyObjects.LyPitch
object.
'''
baseName = self.baseNameFromPitch(pitch)
octaveModChars = self.octaveCharactersFromPitch(pitch)
lyPitch = lyo.LyPitch(baseName, octaveModChars)
return lyPitch
def baseNameFromPitch(self, pitch):
'''
returns a string of the base name (including accidental)
for a music21 pitch
'''
baseName = pitch.step.lower()
if pitch.accidental is not None:
if pitch.accidental.name in self.accidentalConvert:
baseName += self.accidentalConvert[pitch.accidental.name]
return baseName
def octaveCharactersFromPitch(self, pitch):
'''
returns a string of single-quotes or commas or "" representing
the octave of a :class:`~music21.pitch.Pitch` object
'''
spio = pitch.implicitOctave
if (spio < 3):
correctedOctave = 3 - spio
octaveModChars = u',' * correctedOctave # C2 = c, C1 = c,,
else:
correctedOctave = spio - 3
octaveModChars = u'\'' * correctedOctave # C4 = c', C5 = c'' etc.
return octaveModChars
def lyMultipliedDurationFromDuration(self, durationObj):
r'''
take a simple Duration (that is one with one DurationTuple
object and return a LyMultipliedDuration object:
>>> d = duration.Duration(3)
>>> lpc = lily.translate.LilypondConverter()
>>> lyMultipliedDuration = lpc.lyMultipliedDurationFromDuration(d)
>>> str(lyMultipliedDuration)
'2. '
>>> str(lpc.lyMultipliedDurationFromDuration(duration.Duration(8.0)))
'\\breve '
Does not work with complex durations:
>>> d = duration.Duration(5.0)
>>> str(lpc.lyMultipliedDurationFromDuration(d))
Traceback (most recent call last):
LilyTranslateException: DurationException for durationObject <music21.duration.Duration 5.0>: Could not determine durationNumber from None
Instead split by components:
>>> components = d.components
>>> [str(lpc.lyMultipliedDurationFromDuration(c)) for c in components]
['1 ', '4 ']
'''
try:
number_type = duration.convertTypeToNumber(durationObj.type) # module call
except duration.DurationException as de:
raise LilyTranslateException("DurationException for durationObject %s: %s" % (durationObj, de))
if number_type < 1:
if number_type == 0.5:
number_type = r'\breve'
elif number_type == 0.25:
number_type = r'\longa'
else:
# no support for maxima...
number_type = int(number_type * 16)
else:
number_type = int(number_type)
try:
stenoDuration = lyo.LyStenoDuration(number_type, int(durationObj.dots))
multipliedDuration = lyo.LyMultipliedDuration(stenoDuration)
except duration.DurationException as de:
raise LilyTranslateException("DurationException: Cannot translate durationObject %s: %s" % (durationObj, de))
return multipliedDuration
def lyEmbeddedScmFromClef(self, clefObj):
r'''
converts a Clef object to a
lilyObjects.LyEmbeddedScm object
>>> tc = clef.TrebleClef()
>>> conv = lily.translate.LilypondConverter()
>>> lpEmbeddedScm = conv.lyEmbeddedScmFromClef(tc)
>>> print(lpEmbeddedScm)
\clef "treble"
'''
c = clefObj.classes
if 'Treble8vbClef' in c:
lilyName = 'treble_8'
elif 'TrebleClef' in c:
lilyName = "treble"
elif 'BassClef' in c:
lilyName = "bass"
elif 'AltoClef' in c:
lilyName = 'alto'
elif 'TenorClef' in c:
lilyName = 'tenor'
elif 'SopranoClef' in c:
lilyName = 'soprano'
elif 'PercussionClef' in c:
lilyName = 'percussion'
else:
environLocal.printDebug('got a clef that lilypond does not know what to do with: %s' % clefObj)
lilyName = ""
lpEmbeddedScm = lyo.LyEmbeddedScm()
clefScheme = lpEmbeddedScm.backslash + 'clef ' + lpEmbeddedScm.quoteString(lilyName) + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = clefScheme
return lpEmbeddedScm
def lyEmbeddedScmFromKeySignature(self, keyObj):
r'''
converts a Key or KeySignature object
to a lilyObjects.LyEmbeddedScm object
>>> d = key.KeySignature(-1)
>>> d.mode = 'minor'
>>> conv = lily.translate.LilypondConverter()
>>> lpEmbeddedScm = conv.lyEmbeddedScmFromKeySignature(d)
>>> print(lpEmbeddedScm)
\key d \minor
Major is assumed:
>>> fsharp = key.KeySignature(6)
>>> print(conv.lyEmbeddedScmFromKeySignature(fsharp))
\key fis \major
'''
(p, m) = keyObj.pitchAndMode
if m is None:
m = "major"
pn = self.baseNameFromPitch(p)
lpEmbeddedScm = lyo.LyEmbeddedScm()
keyScheme = lpEmbeddedScm.backslash + 'key ' + pn + ' ' + lpEmbeddedScm.backslash + m + ' ' + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = keyScheme
return lpEmbeddedScm
def lyEmbeddedScmFromTimeSignature(self, ts):
r'''
convert a :class:`~music21.meter.TimeSignature` object
to a lilyObjects.LyEmbeddedScm object
>>> ts = meter.TimeSignature('3/4')
>>> conv = lily.translate.LilypondConverter()
>>> print(conv.lyEmbeddedScmFromTimeSignature(ts))
\time 3/4
'''
lpEmbeddedScm = lyo.LyEmbeddedScm()
keyScheme = lpEmbeddedScm.backslash + 'time ' + ts.ratioString + lpEmbeddedScm.newlineIndent
lpEmbeddedScm.content = keyScheme
return lpEmbeddedScm
def setContextForTupletStart(self, inObj):
'''
if the inObj has tuplets then we set a new context
for the tuplets and anything up till a tuplet stop.
Note that a broken tuplet (a la Michael Gordon)
will not work.
If there are no tuplets, this routine does
nothing. If there are tuplets and they have type start then
it returns an lpMusicList object, which is the new context
For now, no nested tuplets. They're an
easy extension, but there's too much
else missing to do it now...
'''
if inObj.duration.tuplets is None or len(inObj.duration.tuplets) == 0:
return None
elif inObj.duration.tuplets[0].type == 'start':
numerator = str(int(inObj.duration.tuplets[0].tupletNormal[0]))
denominator = str(int(inObj.duration.tuplets[0].tupletActual[0]))
lpMusicList = self.setContextForTimeFraction(numerator, denominator)
return lpMusicList
else:
return None
def setContextForTimeFraction(self, numerator, denominator):
'''
Explicitly starts a new context for scaled music (tuplets, etc.)
for the given numerator and denominator (either an int or a string or unicode)
Returns an lpMusicList object contained in an lpSequentialMusic object
in an lpPrefixCompositeMusic object which sets the times object to a particular
fraction.
>>> lpc = lily.translate.LilypondConverter()
>>> lpc.context
<music21.lily.lilyObjects.LyLilypondTop object at 0x...>
>>> lyTop = lpc.context
>>> lyoMusicList = lpc.setContextForTimeFraction(5, 4)
>>> lyoMusicList
<music21.lily.lilyObjects.LyMusicList object at 0x...>
>>> lpc.context
<music21.lily.lilyObjects.LyMusicList object at 0x...>
>>> lpc.context is lyoMusicList
True
>>> lpc.context.getParent()
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> lpc.context.getParent().getParent()
<music21.lily.lilyObjects.LyPrefixCompositeMusic object at 0x...>
>>> lpc.context.getParent().getParent().fraction
'5/4'
>>> lpc.context.getParent().getParent().type
'times'
>>> lpc.context.getParent().getParent().getParent()
<music21.lily.lilyObjects.LyLilypondTop object at 0x...>
>>> lpc.context.getParent().getParent().getParent() is lyTop
True
'''
# pylint: disable=undefined-variable
if six.PY2:
fraction = unicode(numerator) + '/' + unicode(denominator) # @UndefinedVariable
else:
fraction = str(numerator) + '/' + str(denominator)
lpMusicList = lyo.LyMusicList()
lpSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList)
## technically needed, but we can speed things up
#lpGroupedMusicList = lyo.LyGroupedMusicList(sequentialMusic = lpSequentialMusic)
#lpCompositeMusic = lyo.LyCompositeMusic(groupedMusicList = lpGroupedMusicList)
#lpMusic = lyo.LyMusic(compositeMusic = lpCompositeMusic)
lpPrefixCompositeMusic = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpSequentialMusic)
currentContents = self.context.contents
if currentContents is None:
raise LilyTranslateException("Cannot find contents for self.context: %r " % self.context)
currentContents.append(lpPrefixCompositeMusic)
lpPrefixCompositeMusic.setParent(self.context)
self.newContext(lpMusicList)
return lpMusicList
def setContextForTupletStop(self, inObj):
'''
Reverse of setContextForTupletStart
'''
if len(inObj.duration.tuplets) == 0:
return
elif inObj.duration.tuplets[0].type == 'stop':
self.restoreContext()
else:
return None
def appendContextFromVariant(self, variantObjectOrList, activeSite=None, coloredVariants=False):
'''
Create a new context from the variant object or a list of variants and append.
'''
musicList = []
if isinstance(variantObjectOrList, variant.Variant):
variantObject = variantObjectOrList
replacedElements = variantObject.replacedElements(activeSite)
lpPrefixCompositeMusicVariant = self.lyPrefixCompositeMusicFromVariant(
variantObject, replacedElements, coloredVariants=coloredVariants)
lpSequentialMusicStandard = self.lySequentialMusicFromStream(replacedElements)
musicList.append(lpPrefixCompositeMusicVariant)
musicList.append(lpSequentialMusicStandard)
elif isinstance(variantObjectOrList, list):
longestReplacementLength = -1
variantDict = {}
for variantObject in variantObjectOrList:
if variantObject.groups:
variantName = variantObject.groups[0]
else:
variantName = "variant"
if variantName in variantDict:
variantDict[variantName].append(variantObject)
else:
variantDict[variantName] = [variantObject]
for key in variantDict:
variantList = variantDict[key]
if len(variantList) == 1:
variantObject = variantList[0]
replacedElements = variantObject.replacedElements(activeSite)
lpPrefixCompositeMusicVariant = self.lyPrefixCompositeMusicFromVariant(
variantObject, replacedElements, coloredVariants=coloredVariants)
musicList.append(lpPrefixCompositeMusicVariant)
else:
lpPrefixCompositeMusicVariant, replacedElements = self.lyPrefixCompositeMusicFromRelatedVariants(
variantList, activeSite=activeSite, coloredVariants=coloredVariants)
musicList.append(lpPrefixCompositeMusicVariant)
if longestReplacementLength < replacedElements.duration.quarterLength:
longestReplacementLength = replacedElements.duration.quarterLength
longestReplacedElements = replacedElements
lpSequentialMusicStandard = self.lySequentialMusicFromStream(longestReplacedElements)
musicList.append(lpSequentialMusicStandard)
for el in longestReplacedElements:
self.doNotOutput.append(el)
lp2MusicList = lyo.LyMusicList()
lp2MusicList.contents = musicList
lp2SimultaneousMusic = lyo.LySimultaneousMusic()
lp2SimultaneousMusic.musicList = lp2MusicList
lp2GroupedMusicList = lyo.LyGroupedMusicList()
lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
contextObject = self.context
currentMusicList = contextObject.contents
currentMusicList.append(lp2GroupedMusicList)
lp2GroupedMusicList.setParent(self.context)
def lyPrefixCompositeMusicFromRelatedVariants(self, variantList,
activeSite=None, coloredVariants=False):
r'''
>>> s1 = converter.parse("tinynotation: 4/4 a4 a a a a1")
>>> s2 = converter.parse("tinynotation: 4/4 b4 b b b")
>>> s3 = converter.parse("tinynotation: 4/4 c4 c c c")
>>> s4 = converter.parse("tinynotation: 4/4 d4 d d d")
>>> s5 = converter.parse("tinynotation: 4/4 e4 e e e f f f f g g g g a a a a b b b b")
>>> for s in [ s1, s2, s3, s4, s5]:
... s.makeMeasures(inPlace = True)
>>> activeSite = stream.Part(s5)
>>> v1 = variant.Variant()
>>> for el in s1:
... v1.append(el)
>>> v1.replacementDuration = 4.0
>>> v2 = variant.Variant()
>>> sp2 = note.SpacerRest()
>>> sp2.duration.quarterLength = 4.0
>>> v2.replacementDuration = 4.0
>>> v2.append(sp2)
>>> for el in s2:
... v2.append(el)
>>> v3 = variant.Variant()
>>> sp3 = note.SpacerRest()
>>> sp3.duration.quarterLength = 8.0
>>> v3.replacementDuration = 4.0
>>> v3.append(sp3)
>>> for el in s3:
... v3.append(el)
>>> v4 = variant.Variant()
>>> sp4 = note.SpacerRest()
>>> sp4.duration.quarterLength = 16.0
>>> v4.replacementDuration = 4.0
>>> v4.append(sp4)
>>> for el in s4:
... v4.append(el)
>>> variantList = [v4,v1,v3,v2]
>>> for v in variantList :
... v.groups = ['london']
... activeSite.insert(0.0, v)
>>> lpc = lily.translate.LilypondConverter()
>>> print(lpc.lyPrefixCompositeMusicFromRelatedVariants(variantList, activeSite = activeSite)[0])
\new Staff = london... { { \times 1/2 {\startStaff \clef "treble"
a' 4
a' 4
a' 4
a' 4
\clef "treble"
| %{ end measure 1 %}
a' 1
| %{ end measure 2 %}
\stopStaff}
}
<BLANKLINE>
{\startStaff \clef "treble"
b... 4
b... 4
b... 4
b... 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
{\startStaff \clef "treble"
c' 4
c' 4
c' 4
c' 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
s 1
{\startStaff \clef "treble"
d' 4
d' 4
d' 4
d' 4
| %{ end measure 1 %}
\stopStaff}
<BLANKLINE>
}
<BLANKLINE>
'''
# Order List
def findOffsetOfFirstNonSpacerElement(inputStream):
for el in inputStream:
if "SpacerRest" in el.classes:
pass
else:
return inputStream.elementOffset(el)
variantList.sort(key = lambda v: findOffsetOfFirstNonSpacerElement(v._stream))
# Stuff that can be done on the first element only (clef, new/old, id, color)
replacedElements = variantList[0].replacedElements(activeSite)
replacedElementsClef = replacedElements[0].getContextByClass('Clef')
variantContainerStream = variantList[0].getContextByClass('Part')
if variantContainerStream is None:
variantContainerStream = variantList[0].getContextByClass('Stream')
variantList[0].insert(0.0, replacedElementsClef)
variantName = variantList[0].groups[0]
if variantName in self.addedVariants:
newVariant = False
else:
self.addedVariants.append(variantName)
newVariant = True
containerId = makeLettersOnlyId(variantContainerStream.id)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+containerId)
if coloredVariants is True:
color = self.variantColors[self.addedVariants.index(variantName) % 6]
#######################
musicList = []
highestOffsetSoFar = 0.0
self.variantMode = True
for v in variantList:
# For each variant in the list, we make a lilypond representation of the
# spacer between this variant and the previous if it is non-zero and append it
# Then we strip off the spacer and make a lilypond representation of the variant
# with the appropriate tupletting if any and append that.
# At the end we make a new lilypond context for it and return it.
firstOffset = findOffsetOfFirstNonSpacerElement(v._stream)
if firstOffset < highestOffsetSoFar:
raise LilyTranslateException("Should not have overlapping variants.")
else:
spacerDuration = firstOffset - highestOffsetSoFar
highestOffsetSoFar = v.replacementDuration + firstOffset
# make spacer with spacerDuration and append
if spacerDuration > 0.0:
spacer = note.SpacerRest()
spacer.duration.quarterLength = spacerDuration
lySpacer = self.lySimpleMusicFromNoteOrRest(spacer)
musicList.append(lySpacer)
if coloredVariants is True:
for n in v._stream.flat.notesAndRests:
n.editorial.color = color# make thing (with or without fraction)
# Strip off spacer
endOffset = v.containedHighestTime
vStripped = variant.Variant(v._stream.getElementsByOffset(firstOffset,
offsetEnd = endOffset))
vStripped.replacementDuration = v.replacementDuration
replacedElementsLength = vStripped.replacementDuration
variantLength = vStripped.containedHighestTime - firstOffset
if variantLength != replacedElementsLength:
numerator, denominator = common.decimalToTuplet(replacedElementsLength/variantLength)
fraction = str(numerator) + '/' + str(denominator)
lpOssiaMusicVariantPreFraction = self.lyOssiaMusicFromVariant(vStripped)
lpVariantTuplet = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpOssiaMusicVariantPreFraction)
lpOssiaMusicVariant = lyo.LySequentialMusic(musicList = lpVariantTuplet)
else:
lpOssiaMusicVariant = self.lyOssiaMusicFromVariant(vStripped)
musicList.append(lpOssiaMusicVariant)
longestVariant = v
# The last variant in the iteration should have the highestOffsetSoFar,
# so it has the appropriate replacementElements to return can compare with the rest in
# appendContextFromVariant.
replacedElements = longestVariant.replacedElements(activeSite, includeSpacers = True)
lpMusicList = lyo.LyMusicList(musicList)
lpInternalSequentialMusic = lyo.LySequentialMusic(musicList = lpMusicList )
if newVariant is True:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = "Staff",
music = lpInternalSequentialMusic)
else: #newVariant is False
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'context',
optionalId = variantId,
simpleString = "Staff",
music = lpInternalSequentialMusic)
#optionalContextMod = r'''
#\with {
# \remove "Time_signature_engraver"
# alignAboveContext = #"%s"
# fontSize = ##-3
# \override StaffSymbol #'staff-space = #(magstep -3)
# \override StaffSymbol #'thickness = #(magstep -3)
# \override TupletBracket #'bracket-visibility = ##f
# \override TupletNumber #'stencil = ##f
# \override Clef #'transparent = ##t
# }
# ''' % containerId #\override BarLine #'transparent = ##t is the best way of fixing #the barlines that I have come up with.
#
#lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
self.variantMode = False
return lpPrefixCompositeMusicVariant, replacedElements
def lyPrefixCompositeMusicFromVariant(self, variantObject, replacedElements, coloredVariants = False):
r'''
>>> pstream = converter.parse("tinynotation: 4/4 a4 b c d e4 f g a")
>>> pstream.makeMeasures(inPlace = True)
>>> p = stream.Part(pstream)
>>> p.id = 'p1'
>>> vstream = converter.parse("tinynotation: 4/4 a4. b8 c4 d")
>>> vstream.makeMeasures(inPlace = True)
>>> v = variant.Variant(vstream)
>>> v.groups = ['london']
>>> p.insert(0.0, v)
>>> lpc = lily.translate.LilypondConverter()
>>> replacedElements = v.replacedElements()
>>> lpPrefixCompositeMusicVariant = lpc.lyPrefixCompositeMusicFromVariant(v, replacedElements)
>>> print(lpPrefixCompositeMusicVariant) # ellipses are for non-byte fixups
\new Staff = londonpx { {\startStaff \clef "treble"
a' 4.
b...
c' 4
d' 4
\clef "treble"
| %{ end measure 1 %}
\stopStaff}
}
>>> replacedElements.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note A>
{1.0} <music21.note.Note B>
{2.0} <music21.note.Note C>
{3.0} <music21.note.Note D>
>>> print(lpc.addedVariants)
['london']
'''
replacedElementsClef = replacedElements[0].getContextByClass('Clef')
variantContainerStream = variantObject.getContextByClass('Part')
if variantContainerStream is None:
variantContainerStream = variantObject.getContextByClass('Stream')
if replacedElementsClef is not None:
if not replacedElementsClef in variantObject.elements:
variantObject.insert(0, replacedElementsClef)
if variantObject.groups:
variantName = variantObject.groups[0]
else:
variantName = 'variant'
if variantName in self.addedVariants:
newVariant = False
else:
self.addedVariants.append(variantName)
newVariant = True
containerId = makeLettersOnlyId(variantContainerStream.id)
variantId = lyo.LyOptionalId(makeLettersOnlyId(variantName)+containerId)
if coloredVariants is True:
color = self.variantColors[self.addedVariants.index(variantName) % 6]
for n in variantObject._stream.flat.notesAndRests:
n.editorial.color = color
musicList = []
varFilter = variantObject.getElementsByClass("SpacerRest")
if varFilter:
spacer = varFilter[0]
spacerDur = spacer.duration.quarterLength
if spacer.duration.quarterLength > 0.0:
lySpacer = self.lySimpleMusicFromNoteOrRest(spacer)
musicList.append(lySpacer)
variantObject.remove(spacer)
else:
spacerDur = 0.0
lpOssiaMusicVariant = self.lyOssiaMusicFromVariant(variantObject)
replacedElementsLength = variantObject.replacementDuration
variantLength = variantObject.containedHighestTime - spacerDur
self.variantMode = True
if variantLength != replacedElementsLength:
numerator, denominator = common.decimalToTuplet(replacedElementsLength/variantLength)
fraction = str(numerator) + '/' + str(denominator)
lpVariantTuplet = lyo.LyPrefixCompositeMusic(type='times',
fraction = fraction,
music = lpOssiaMusicVariant)
lpInternalSequentialMusic = lyo.LySequentialMusic(musicList = lpVariantTuplet)
musicList.append(lpInternalSequentialMusic)
else:
musicList.append(lpOssiaMusicVariant)
lpMusicList = lyo.LyMusicList(musicList)
lpOssiaMusicVariantWithSpacer = lyo.LySequentialMusic(musicList = lpMusicList )
if newVariant is True:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'new',
optionalId = variantId,
simpleString = "Staff",
music = lpOssiaMusicVariantWithSpacer)
else:
lpPrefixCompositeMusicVariant = lyo.LyPrefixCompositeMusic(type = 'context',
optionalId = variantId,
simpleString = "Staff",
music = lpOssiaMusicVariantWithSpacer)
# optionalContextMod = r'''
#\with {
# \remove "Time_signature_engraver"
# alignAboveContext = #"%s"
# fontSize = #-3
# \override StaffSymbol #'staff-space = #(magstep -3)
# \override StaffSymbol #'thickness = #(magstep -3)
# \override TupletBracket #'bracket-visibility = ##f
# \override TupletNumber #'stencil = ##f
# \override Clef #'transparent = ##t
# }
# ''' % containerId #\override BarLine #'transparent = ##t is the best way of fixing the #barlines that I have come up with.
#
# lpPrefixCompositeMusicVariant.optionalContextMod = optionalContextMod
self.variantMode = False
return lpPrefixCompositeMusicVariant
#musicList2 = []
#musicList2.append(lpPrefixCompositeMusicVariant)
#musicList2.append(lpSequentialMusicStandard )
#
#lp2MusicList = lyo.LyMusicList()
#lp2MusicList.contents = musicList2
#lp2SimultaneousMusic = lyo.LySimultaneousMusic()
#lp2SimultaneousMusic.musicList = lp2MusicList
#lp2GroupedMusicList = lyo.LyGroupedMusicList()
#lp2GroupedMusicList.simultaneousMusic = lp2SimultaneousMusic
#
#contextObject = self.context
#currentMusicList = contextObject.contents
#currentMusicList.append(lp2GroupedMusicList)
#lp2GroupedMusicList.setParent(self.context)
def lyOssiaMusicFromVariant(self, variantIn):
r'''
returns a LyOssiaMusic object from a stream
>>> c = converter.parse('tinynotation: 3/4 C4 D E F2.')
>>> v = variant.Variant(c)
>>> lpc = lily.translate.LilypondConverter()
>>> lySequentialMusicOut = lpc.lySequentialMusicFromStream(v)
>>> lySequentialMusicOut
<music21.lily.lilyObjects.LySequentialMusic object at 0x...>
>>> print(lySequentialMusicOut)
{ \clef "bass"
\time 3/4
c 4
d 4
e 4
\bar "|" %{ end measure 1 %}
f 2.
\bar "|." %{ end measure 2 %}
}
<BLANKLINE>
'''
musicList = []
lpMusicList = lyo.LyMusicList(contents = musicList)
lpOssiaMusic = lyo.LyOssiaMusic(musicList = lpMusicList)
self.newContext(lpMusicList)
self.variantMode = True
self.appendObjectsToContextFromStream(variantIn._stream)
lyObject = self.closeMeasure()
if lyObject is not None:
musicList.append(lyObject)
self.restoreContext()
self.variantMode = False
return lpOssiaMusic
def setHeaderFromMetadata(self, metadataObject = None, lpHeader = None):
r'''
Returns a lilypond.lilyObjects.LyLilypondHeader object
set with data from the metadata object
>>> md = metadata.Metadata()
>>> md.title = 'My Title'
>>> md.alternativeTitle = 'My "sub"-title'
>>> lpc = lily.translate.LilypondConverter()
>>> lpHeader = lpc.setHeaderFromMetadata(md)
>>> print(lpHeader)
\header { title = "My Title"
subtitle = "My \"sub\"-title"
}
'''
if lpHeader is None:
lpHeader = lyo.LyLilypondHeader()
if lpHeader.lilypondHeaderBody is None:
lpHeaderBody = lyo.LyLilypondHeaderBody()
lpHeader.lilypondHeaderBody = lpHeaderBody
else:
lpHeaderBody = lpHeader.lilypondHeaderBody
lpHeaderBodyAssignments = lpHeaderBody.assignments
if metadataObject is not None:
if metadataObject.title is not None:
lyTitleAssignment = lyo.LyAssignment(assignmentId = "title",
identifierInit = lyo.LyIdentifierInit(
string=metadataObject.title))
lpHeaderBodyAssignments.append(lyTitleAssignment)
lyTitleAssignment.setParent(lpHeaderBody)
if metadataObject.alternativeTitle is not None:
lySubtitleAssignment = lyo.LyAssignment(assignmentId = "subtitle",
identifierInit = lyo.LyIdentifierInit(
string=metadataObject.alternativeTitle))
lpHeaderBodyAssignments.append(lySubtitleAssignment)
lyTitleAssignment.setParent(lpHeaderBody)
lpHeaderBody.assignments = lpHeaderBodyAssignments
return lpHeader
def closeMeasure(self, barChecksOnly=False):
r'''
return a LyObject or None for the end of the previous Measure
uses self.currentMeasure
>>> lpc = lily.translate.LilypondConverter()
>>> m = stream.Measure()
>>> m.number = 2
>>> m.rightBarline = 'double'
>>> lpc.currentMeasure = m
>>> lyObj = lpc.closeMeasure()
>>> lpc.currentMeasure is None
True
>>> print(lyObj)
\bar "||" %{ end measure 2 %}
'''
m = self.currentMeasure
self.currentMeasure = None
if m is None:
return None
#if m.rightBarline is None:
# return None
#elif m.rightBarline.style == 'regular':
# return None
if self.variantMode is True:
barChecksOnly = True
lpBarline = lyo.LyEmbeddedScm()
if barChecksOnly is True:
barString = "|"
elif m.rightBarline is None:
barString = lpBarline.backslash + 'bar ' + lpBarline.quoteString("|")
else:
barString = lpBarline.backslash + 'bar ' + lpBarline.quoteString(
self.barlineDict[m.rightBarline.style])
if m.number is not None:
barString += lpBarline.comment("end measure %d" % m.number)
lpBarline.content = barString
return lpBarline
def getSchemeForPadding(self, measureObject):
r'''
lilypond partial durations are very strange and are really of
type LyMultipliedDuration. You notate how many
notes are left in the measure, for a quarter note, write "4"
for an eighth, write "8", but for 3 eighths, write "8*3" !
so we will measure in 32nd notes always... won't work for tuplets
of course.
returns a scheme object or None if not needed
>>> m = stream.Measure()
>>> m.append(meter.TimeSignature('3/4'))
>>> m.paddingLeft = 2.0
>>> lpc = lily.translate.LilypondConverter()
>>> outScheme = lpc.getSchemeForPadding(m)
>>> print(outScheme)
\partial 32*8
'''
pL = measureObject.paddingLeft
if pL == 0:
return None
tses = measureObject.getTimeSignatures()
if len(tses) == 0:
barLength = 4.0
else:
ts = tses[0]
barLength = ts.barDuration.quarterLength
remainingQL = barLength - pL
if remainingQL <= 0:
raise LilyTranslateException('your first pickup measure is non-existent!')
remaining32s = int(remainingQL * 8)
lyObject = lyo.LyEmbeddedScm()
schemeStr = lyObject.backslash + 'partial 32*' + str(remaining32s) + ' '
lyObject.content = schemeStr
return lyObject
#--------------display and converter routines ---------------------#
def writeLyFile(self, ext='', fp=None):
'''
writes the contents of the self.topLevelObject to a file.
The extension should be ly. If fp is None then a named temporary
file is created by environment.getTempFile.
'''
tloOut = str(self.topLevelObject)
if six.PY2:
tloOut = tloOut.encode('utf-8')
if fp is None:
fp = environLocal.getTempFile(ext)
self.tempName = fp
with open(self.tempName, 'w') as f:
f.write(tloOut)
return self.tempName
def runThroughLily(self, format=None, backend=None, fileName=None, skipWriting=False): #@ReservedAssignment
'''
creates a .ly file from self.topLevelObject via .writeLyFile
then runs the file through Lilypond.
Returns the full path of the file produced by lilypond including the format extension.
If skipWriting is True and a fileName is given then it will run
that file through lilypond instead
'''
LILYEXEC = self.findLilyExec()
if fileName is None:
fileName = self.writeLyFile(ext='ly')
else:
if skipWriting is False:
fileName = self.writeLyFile(ext='ly', fp=fileName)
lilyCommand = '"' + LILYEXEC + '" '
if format is not None:
lilyCommand += "-f " + format + " "
if backend is not None:
lilyCommand += self.backendString + backend + " "
lilyCommand += "-o " + fileName + " " + fileName
try:
os.system(lilyCommand)
except:
raise
try:
os.remove(fileName + ".eps")
except OSError:
pass
fileform = fileName + '.' + format
if not os.path.exists(fileform):
# cannot find full path; try current directory
fileend = os.path.basename(fileform)
if not os.path.exists(fileend):
raise LilyTranslateException("cannot find " + fileend +
" or the full path " + fileform + " original file was " + fileName)
else:
fileform = fileend
return fileform
def createPDF(self, fileName=None):
'''
create a PDF file from self.topLevelObject and return the filepath of the file.
most users will just call stream.write('lily.pdf') on a stream.
'''
self.headerScheme.content = "" # clear header
lilyFile = self.runThroughLily(backend='ps', format = 'pdf', fileName = fileName)
return lilyFile
def showPDF(self):
'''
create a SVG file from self.topLevelObject, show it with your pdf reader
(often Adobe Acrobat/Adobe Reader or Apple Preview)
and return the filepath of the file.
most users will just call stream.Stream.show('lily.pdf') on a stream.
'''
lF = self.createPDF()
if not os.path.exists(lF):
raise Exception('Something went wrong with PDF Creation')
else:
if os.name == 'nt':
command = 'start /wait %s && del /f %s' % (lF, lF)
elif sys.platform == 'darwin':
command = 'open %s' % lF
else:
command = ''
os.system(command)
def createPNG(self, fileName=None):
'''
create a PNG file from self.topLevelObject and return the filepath of the file.
most users will just call stream.write('lily.png') on a stream.
if PIL is installed then a small white border is created around the score
'''
lilyFile = self.runThroughLily(backend='eps', format='png', fileName=fileName)
if noPIL is False:
try:
lilyImage = Image.open(lilyFile) # @UndefinedVariable
lilyImage2 = ImageOps.expand(lilyImage, 10, 'white')
lilyImage2.save(lilyFile)
except Exception: # pylint: disable=broad-except
pass # no big deal probably...
return lilyFile
# if os.name == 'nt':
# format = 'png'
# # why are we changing format for darwin? -- did not work before
# elif sys.platform == 'darwin':
# format = 'jpeg'
# else: # default for all other platforms
# format = 'png'
#
# if lilyImage2.mode == "I;16":
# # @PIL88 @PIL101
# # "I;16" isn't an 'official' mode, but we still want to
# # provide a simple way to show 16-bit images.
# base = "L"
# else:
# base = Image.getmodebase(lilyImage2.mode)
# if base != lilyImage2.mode and lilyImage2.mode != "1":
# file = lilyImage2.convert(base)._dump(format=format)
# else:
# file = lilyImage2._dump(format=format)
# return file
# except:
# raise
def showPNG(self):
'''
Take the object, run it through LilyPond, and then show it as a PNG file.
On Windows, the PNG file will not be deleted, so you will need to clean out
TEMP every once in a while.
Most users will just want to call stream.Stream.show('lily.png') instead.
'''
try:
lilyFile = self.createPNG()
except LilyTranslateException as e:
raise LilyTranslateException("Problems creating PNG file: (" + str(e) + ")")
environLocal.launch('png', lilyFile)
#self.showImageDirect(lilyFile)
return lilyFile
def createSVG(self, fileName=None):
'''
create an SVG file from self.topLevelObject and return the filepath of the file.
most users will just call stream.Stream.write('lily.svg') on a stream.
'''
self.headerScheme.content = "" # clear header
lilyFile = self.runThroughLily(format='svg', backend='svg', fileName=fileName)
return lilyFile
def showSVG(self, fileName=None):
'''
create a SVG file from self.topLevelObject, show it with your svg reader (often Internet Explorer/
WebBrowser on PC)
and return the filepath of the file.
most users will just call stream.Stream.show('lily.png') on a stream.
'''
lilyFile = self.createSVG(fileName)
environLocal.launch('svg', lilyFile)
return lilyFile
class LilyTranslateException(exceptions21.Music21Exception):
pass
class Test(unittest.TestCase):
pass
def testExplicitConvertChorale(self):
lpc = LilypondConverter()
b = _getCachedCorpusFile('bach/bwv66.6')
lpc.loadObjectFromScore(b, makeNotation = False)
#print lpc.topLevelObject
def testComplexDuration(self):
from music21 import stream, meter
s = stream.Stream()
n1 = note.Note('C') # test no octave also!
n1.duration.quarterLength = 2.5 # BUG 2.3333333333 doesn't work right
self.assertEqual(n1.duration.type, 'complex')
n2 = note.Note('D4')
n2.duration.quarterLength = 1.5
s.append(meter.TimeSignature('4/4'))
s.append(n1)
s.append(n2)
#s.show('text')
lpc = LilypondConverter()
lpc.loadObjectFromScore(s)
#print lpc.topLevelObject
#lpc.showPNG()
#s.show('lily.png')
class TestExternal(unittest.TestCase):
def xtestConvertNote(self):
n = note.Note("C5")
n.show('lily.png')
def xtestConvertChorale(self):
b = _getCachedCorpusFile('bach/bwv66.6')
for n in b.flat:
n.beams = None
b.parts[0].show('lily.svg')
def xtestSlowConvertOpus(self):
fifeOpus = corpus.parse('miscFolk/americanfifeopus.abc')
fifeOpus.show('lily.png')
def xtestBreve(self):
from music21 import stream, meter
n = note.Note("C5")
n.duration.quarterLength = 8.0
m = stream.Measure()
m.append(meter.TimeSignature('8/4'))
m.append(n)
p = stream.Part()
p.append(m)
s = stream.Score()
s.append(p)
s.show('lily.png')
def testStaffLines(self):
from music21 import stream
s = stream.Score()
p = stream.Part()
p.append(note.Note("B4", type='whole'))
p.staffLines = 1
s.insert(0, p)
p2 = stream.Part()
p2.append(note.Note("B4", type='whole'))
p2.staffLines = 7
s.insert(0, p2)
s.show('lily.png')
#-------------------------------------------------------------------------------
if __name__ == "__main__":
import music21
music21.mainTest(Test, TestExternal)
#music21.mainTest(TestExternal, 'noDocTest')
#------------------------------------------------------------------------------
# eof
| mit | 6,628,346,927,522,504,000 | 37.447091 | 187 | 0.553334 | false | 4.17127 | false | false | false |
larsks/cloud-init | cloudinit/dhclient_hook.py | 3 | 2536 | # This file is part of cloud-init. See LICENSE file for license information.
"""Run the dhclient hook to record network info."""
import argparse
import os
from cloudinit import atomic_helper
from cloudinit import log as logging
from cloudinit import stages
LOG = logging.getLogger(__name__)
NAME = "dhclient-hook"
UP = "up"
DOWN = "down"
EVENTS = (UP, DOWN)
def _get_hooks_dir():
i = stages.Init()
return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
def _filter_env_vals(info):
"""Given info (os.environ), return a dictionary with
lower case keys for each entry starting with DHCP4_ or new_."""
new_info = {}
for k, v in info.items():
if k.startswith("DHCP4_") or k.startswith("new_"):
key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
new_info[key] = v
return new_info
def run_hook(interface, event, data_d=None, env=None):
if event not in EVENTS:
raise ValueError("Unexpected event '%s'. Expected one of: %s" %
(event, EVENTS))
if data_d is None:
data_d = _get_hooks_dir()
if env is None:
env = os.environ
hook_file = os.path.join(data_d, interface + ".json")
if event == UP:
if not os.path.exists(data_d):
os.makedirs(data_d)
atomic_helper.write_json(hook_file, _filter_env_vals(env))
LOG.debug("Wrote dhclient options in %s", hook_file)
elif event == DOWN:
if os.path.exists(hook_file):
os.remove(hook_file)
LOG.debug("Removed dhclient options file %s", hook_file)
def get_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
"event", help='event taken on the interface', choices=EVENTS)
parser.add_argument(
"interface", help='the network interface being acted upon')
# cloud-init main uses 'action'
parser.set_defaults(action=(NAME, handle_args))
return parser
def handle_args(name, args, data_d=None):
"""Handle the Namespace args.
Takes 'name' as passed by cloud-init main. not used here."""
return run_hook(interface=args.interface, event=args.event, data_d=data_d)
if __name__ == '__main__':
import sys
parser = get_parser()
args = parser.parse_args(args=sys.argv[1:])
return_value = handle_args(
NAME, args, data_d=os.environ.get('_CI_DHCP_HOOK_DATA_D'))
if return_value:
sys.exit(return_value)
# vi: ts=4 expandtab
| gpl-3.0 | 5,726,797,158,366,657,000 | 28.835294 | 78 | 0.62776 | false | 3.455041 | false | false | false |
malimome/game-auth | classifier.py | 1 | 4439 | import data as mldata
import pdb
class ClassificationBase(object):
def __init__(self, start, length):
self.start = start
self.length = length
self.profiles = {}
self.attempt = {}
self.userlvl = []
self.mindtPr = {}
self.level = -1
def readProfiles(self):
""" Get data for all users + the min in each level for all users """
users = mldata.getUsers()
ud = {}
udcount = {}
for user in users:
dtuser = mldata.UserData(user, self.level)
udcount[user] = dtuser.getUserFeatureLevels()
ud[user] = dtuser.ftlevels #data from all levels and features for one user
minc = 1000000
self.userlvl = []
for user in users:
if mldata.DEBUGL >= 2:
print ("User %s, Level %d -> Length:%d"%(user,self.level,udcount[user]))
cntuserlvl = udcount[user]
if cntuserlvl <= 109:
continue
self.userlvl.append(user)
if cntuserlvl < minc:
minc = cntuserlvl
if minc == 1000000:
minc = 0
# Only get the last portion of the profile
for user in self.userlvl:
for ft in ud[user]:
ud[user][ft] = ud[user][ft][-minc:]
return ud, minc
def readAttempt(self, level, user):
users = mldata.getUsers(is_profile = False)
if user not in users:
return False, False
dtuser = mldata.UserData(user, self.level, is_profile = False)
udcount = dtuser.getUserFeatureLevels()
return dtuser.ftlevels,udcount
def readPAdata(self, level, user=''):
self.level = level
if not self.profiles:
self.profiles,self.mindtPr = self.readProfiles()
if user=='':
return True
self.attempt, tmp = self.readAttempt(level, user)
if tmp < 30:
print "0"
print "0"
print("Not enough data for login. At least 30 rounds of game is needed but %d is provided!"%tmp)
exit(0)
return tmp
def classifyByFeature(self, feature):
levelscores = self.classifyByLevelFeature(level, feature)
def classifyUsers(self):
allscores = {}
for level in mldata.levelenum:
allscores[level] = self.classifyByLevel(level)
return allscores
class ClassificationOneD(ClassificationBase):
def __init__(self, start, length):
super(ClassificationOneD, self).__init__(start, length)
def classifyByLevelFeature(self, level, feature):
if not self.readPAdata(level):
return {}
refscores = {}
for ref in self.userlvl:
refscores[ref] = self.classifyByLevelFeatureRef(level, feature)
return refscores
def classifyByLevel(self, level):
featurecores = {}
if not self.readPAdata(level):
return {}
for ft in mldata.enfeatures:
featurecores[ft] = self.classifyByLevelFeature(level, ft)
return featurecores
class ClassificationMultiD(ClassificationBase):
def __init__(self, start, length):
super(ClassificationMultiD, self).__init__(start, length)
def classifyByLevelFeature(self, level, user = ''):
#if not self.readPAdata(level):
# return {}
refscores = {}
if user != '':
return self.classifyByLevelMultiRef(user)
for ref in self.userlvl:
refscores[ref] = self.classifyByLevelMultiRef(ref)
return refscores
def classifyByLevelUser(self, level, user):
cnt = self.readPAdata(level, user)
if mldata.DEBUGL >=2:
print("User login data length: %d"%cnt)
if cnt < 30:
return {}
self.level = level
scores = self.classifyByLevelFeature(level, user)
return scores
def classifyByLevel(self, level):
scores = {}
self.level = level
if not self.profiles:
self.profiles,self.mindtPr = self.readProfiles()
for user in self.userlvl:
sc = self.classifyByLevelUser(level, user)
if len(sc):
scores[user] = sc
return scores
class ClassificationFusion(ClassificationMultiD):
def __init__(self, start, length):
super(ClassificationFusion, self).__init__(start, length)
#weights = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
self.weights = [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125]
def classifyByLevelMultiRef(self, ref):
scores = {}
for ft in mldata.enfeatures:
scores[ft] = self.classifyByLevelFeatureRef(self.level, ft)
finalscores = {}
for user in self.userlvl:
finalscores[user] = 0
for ft in mldata.enfeatures:
finalscores[user] += scores[ft][user] * self.weights[ft]
return finalscores
| gpl-2.0 | 1,073,319,948,299,777,800 | 28.993243 | 102 | 0.646091 | false | 3.467969 | false | false | false |
netkicorp/python-partner-client | netki/NetkiClient.py | 2 | 10490 | __author__ = 'frank'
from Certificate import Certificate
from Domain import Domain
from Partner import Partner
from Requestor import process_request
from WalletName import WalletName
class Netki:
"""
General methods for interacting with Netki's Partner API.
:param partner_id: Your Partner ID available in the API Keys section of your My Account page.
:param api_key: API Key available in the API Key section of your My Account page.
:param api_url: https://api.netki.com unless otherwise noted
"""
def __init__(self, api_key, partner_id, api_url='https://api.netki.com'):
self.api_key = api_key
self.api_url = api_url
self.partner_id = partner_id
self._auth_type = 'api_key'
@classmethod
def distributed_api_access(cls, key_signing_key, signed_user_key, user_key, api_url='https://api.netki.com'):
"""
Instantiate the Netki Client for distributed_api_access if your user's clients will communicate directly with
Netki to manage Wallet Names instead of communicating with your servers. More information can be found here:
http://docs.netki.apiary.io/#reference/partner-api
:param key_signing_key:
:param signed_user_key:
:param user_key:
:param api_url: https://api.netki.com unless otherwise noted
:return: Netki client.
"""
client = cls(None, None, api_url)
client.key_signing_key = key_signing_key
client.signed_user_key = signed_user_key
client.user_key = user_key
client._auth_type = 'distributed'
if not client.key_signing_key:
raise ValueError('key_signing_key Required for Distributed API Access')
if not client.signed_user_key:
raise ValueError('signed_user_key Required for Distributed API Access')
if not user_key:
raise ValueError('user_key Required for Distributed API Access')
return client
@classmethod
def certificate_api_access(cls, user_key, partner_id, api_url='https://api.netki.com'):
"""
Instantiate the Netki Client for certificate_api_access in order manage your user's Digital Identity Certificates
:param user_key:
:param partner_id:
:param api_url: https://api.netki.com unless otherwise noted
:return: Netki client.
"""
client = cls(None, None, api_url)
client.user_key = user_key
client.partner_id = partner_id
client._auth_type = 'certificate'
if not client.user_key:
raise ValueError('user_key Required for Certificate API Access')
if not client.partner_id:
raise ValueError('partner_id Required for Certificate API Access')
return client
# Wallet Name Operations #
def get_wallet_names(self, domain_name=None, external_id=None):
"""
Wallet Name Operation
Retrieve Wallet Names from the Netki API. Four options are available for retrieval:
* Retrieve all Wallet Names associated with your partner_id by not specifying a domain_name or external_id.
* Retrieve all Wallet Names associated with a particular partner domain_name by specifying a domain_name.
* Retrieve all Wallet Names associated with a particular external_id by specifying an external_id.
* Retrieve all Wallet Names associated with a domain_name and external_id by specifying both domain_name
and external_id.
:param domain_name: Domain name to which the requested Wallet Names belong. ``partnerdomain.com``
:param external_id: Your unique customer identifier specified when creating a Wallet Name.
:return: List of WalletName objects.
"""
args = []
if domain_name:
args.append('domain_name=%s' % domain_name)
if external_id:
args.append('external_id=%s' % external_id)
uri = '/v1/partner/walletname'
if args:
uri = uri + '?' + '&'.join(args)
response = process_request(self, uri, 'GET')
if not response.wallet_name_count:
return []
# Assemble and return a list of Wallet Name objects from the response data
all_wallet_names = []
for wn in response.wallet_names:
wallet_name = WalletName(
domain_name=wn.domain_name,
name=wn.name,
external_id=wn.external_id,
id=wn.id
)
for wallet in wn.wallets:
wallet_name.set_currency_address(wallet.currency, wallet.wallet_address)
wallet_name.set_netki_client(self)
all_wallet_names.append(wallet_name)
return all_wallet_names
def create_wallet_name(self, domain_name, name, external_id, currency, wallet_address):
"""
Wallet Name Operation
Create a new WalletName object with the required data. Execute save() to commit your changes to the API.
:param domain_name: Domain name to which the requested Wallet Name's belong. ``partnerdomain.com``
:param name: Customers Wallet Name appended to domain_name. ``joe``
:param external_id: Your unique customer identifier for this user's Wallet Name.
:param currency: Digital currency abbreviation noted in Netki API documentation
:param wallet_address: Digital currency address
:return: WalletName object
"""
wallet_name = WalletName(
domain_name=domain_name,
name=name,
external_id=external_id
)
wallet_name.set_currency_address(currency, wallet_address)
wallet_name.set_netki_client(self)
return wallet_name
# Partner Operations #
def get_partners(self):
"""
Sub-partner Operation
Get all partners (partner and sub-partners) associated with your account.
:return: List containing Partner objects
"""
response = process_request(self, '/v1/admin/partner', 'GET')
partner_objects = list()
for p in response.partners:
partner = Partner(id=p.id, name=p.name)
partner_objects.append(partner)
partner.set_netki_client(self)
return partner_objects
def create_partner(self, partner_name):
"""
Sub-partner Operation
Create a sub-partner.
:param partner_name: Partner Name
:return: Partner object
"""
response = process_request(self, '/v1/admin/partner/' + partner_name, 'POST')
partner = Partner(id=response.partner.id, name=response.partner.name)
partner.set_netki_client(self)
return partner
# Domain Operations #
def get_domains(self, domain_name=None):
"""
Domain Operation
Retrieve all domains associated with your partner_id or a specific domain_name if supplied
:return: List of Domain objects.
"""
response = process_request(self, '/api/domain/%s' % domain_name if domain_name else '/api/domain', 'GET')
if not response.get('domains'):
return []
domain_list = list()
for d in response.domains:
domain = Domain(d.domain_name)
domain.set_netki_client(self)
domain_list.append(domain)
return domain_list
def create_partner_domain(self, domain_name, sub_partner_id=None):
"""
Domain Operation
Create a partner domain used to offer Wallet Names.
:param domain_name: ``partnerdomain.com``
:param (Optional) sub_partner_id: When provided, create a domain_name under the sub_partner_id that you are
managing.
:return: Domain object with status and information required to complete domain setup.
"""
post_data = {'partner_id': sub_partner_id} if sub_partner_id else ''
response = process_request(self, '/v1/partner/domain/' + domain_name, 'POST', post_data)
domain = Domain(response.domain_name)
domain.status = response.status
domain.nameservers = response.nameservers
domain.set_netki_client(self)
return domain
# Certificate Operations #
def create_certificate(self, customer_data, product_id):
"""
Certificate Operation
Create a partner domain used to offer Wallet Names.
:param customer_data: Customer personal idenity information to be validated and used in the final certificate.
:param product_id: Specific product_id (Certificate type). Product IDs can be retrieved from
get_available_products() below.
:return: Certificate Object
"""
certificate = Certificate(customer_data, product_id)
certificate.set_netki_client(self)
return certificate
def get_certificate(self, id):
"""
Certificate Operation
Retrieve an existing certificate by certificate ID from the API.
:param id: Unique certificate ID issued after successful creation of a certificate object and save() to the API.
:return: Certificate Object
"""
if not id:
raise ValueError('Certificate ID Required')
certificate = Certificate()
certificate.id = id
certificate.set_netki_client(self)
certificate.get_status()
return certificate
def get_available_products(self):
"""
Certificate Operation
Get all available certificate products associated with your account including tier and pricing information.
:return: Dictionary containing product details.
"""
return process_request(self, '/v1/certificate/products', 'GET').get('products')
def get_ca_bundle(self):
"""
Certificate Operation
Download the root bundle used to validate the certificate chain for Netki issued certificates.
:return: Dictionary containing certificate bundle.
"""
return process_request(self, '/v1/certificate/cacert', 'GET').get('cacerts')
def get_account_balance(self):
"""
Certificate Operation
Get available balance for certificate purchases when using Deposit/Retainer billing.
:return: Dictionary containing available balance.
"""
return process_request(self, '/v1/certificate/balance', 'GET').get('available_balance')
| bsd-3-clause | 2,813,477,632,977,907,000 | 32.621795 | 121 | 0.638608 | false | 4.405712 | false | false | false |
laprice/ducttape | ducttape/package.py | 1 | 1233 | from fabric.api import sudo
class PackageBase(object):
def update(self):
raise Exception('Not Implemented')
def upgrade():
raise Exception('Not Implemented')
def install():
raise Exception('Not Implemented')
class PackageOpenBSD(PackageBase):
pass
class PackageYum(PackageBase):
pass
class PackageApt(PackageBase):
def add_repo(self, repo):
self.install('python-software-properties', quiet=True)
sudo('add-apt-repository "%s"' % repo)
self.update()
def update(self):
sudo('apt-get update')
def upgrade(self, update=False, quiet=False):
if update:
self.update()
cmd = "%s apt-get %s upgrade" % (
"DEBIAN_FRONTEND=noninteractive" if quiet else "",
"--yes" if quiet else ""
)
sudo(cmd)
def install(self, packages, quiet=False):
if isinstance(packages, (list, tuple)):
packages = " ".join(packages)
cmd = "%s apt-get %s install %s %s" % (
"DEBIAN_FRONTEND=noninteractive" if quiet else "",
"--yes" if quiet else "",
"--force-yes" if quiet else "",
packages
)
sudo(cmd)
| isc | -7,792,383,946,054,784,000 | 22.264151 | 62 | 0.565288 | false | 4.003247 | false | false | false |
bozokyzoltan/nmr | nmr_read_fit_plot.py | 1 | 120762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Zoltan Bozoky on 2013.03.13.
Under GPL licence.
Module handling a Sparky NMR file without nmrglue or additional nmr proccessing
or handling softwares
Purpose:
========
* One program that reads, visualizes multidimensional NMR data, finds peaks,
fit peak shapes, calculate volumes and intensities
"""
import struct
import random
import math
from pylab import plt
# Color handling module
import zcolor
# Fitting module
import GeneralFit as GF
################################################################################
# TODO Make the plot axes ratio to the right N-H-C ratios
################################################################################
def ppm2fid(ppm, spectral_data):
"""
Convert ppm information into fid number
"""
Frequency, MiddlePPM, SpectralWidth, NumberOfPoints = spectral_data
return int((NumberOfPoints/2 - ((ppm-MiddlePPM) * Frequency * NumberOfPoints) / SpectralWidth) % NumberOfPoints)
###########################
def fid2ppm(fid, spectral_data):
"""
Convert fid number into ppm information
"""
Frequency, MiddlePPM, SpectralWidth, NumberOfPoints = spectral_data
return MiddlePPM + (NumberOfPoints*SpectralWidth - 2*fid*SpectralWidth) / (2.0*Frequency*NumberOfPoints)
###########################
def distance(pos1, pos2):
"""
Calculate Euclidean distance between two points
"""
distance_value = 0.0
for (p1, p2) in (pos1, pos2):
distance_value += (p1 - p2)**2
return math.sqrt(distance_value)
###########################
def ceil(number):
"""
Return the closest higher integer to the number
"""
if number - int(number) != 0:
number = int(number) + 1
return int(number)
###########################
def Gauss(Peak_coordinate, Coordinate, Szoras):
""" gaussian peak """
return (1/(Szoras*math.sqrt(2.0*math.pi)))*math.exp(-1*(Peak_coordinate-Coordinate)**2/(2.0*Szoras**2))
def Lorentz(Peak_coordinate, Coordinate, Szoras):
""" lorentzian peak """
return 1/(math.pi*Szoras*(1+((Peak_coordinate-Coordinate)/float(Szoras))**2))
###########################
def parabolic(x, p):
"""
Fit a parabolic to the tip of the peaks
"""
c, b, a = p
y = a*(x-b)**2 + c
return y
###########################
def linewidth_fit(x, p):
"""
Linewidth fit error function
"""
lw, [height, peak_pos] = p
value = height * math.exp(-1 * (peak_pos - x)**2 / (2.0 * lw**2))
print 'param',lw, height, peak_pos, value
return value
def linewidth_fit2(x, p):
"""
Linewidth fit error function 2
"""
height, lw, peak_pos = p
value = height * math.exp(-1 * (peak_pos - x)**2 / (2.0 * lw**2)) #- 38237.4296875
return value
################################################################################
################################################################################
################################################################################
############################
## Sparky file header class
############################
class SparkyFileHeader(object):
"""
Everything about the sparky file header
- the first 180 bytes in the sparky file
"""
def __init__(self, headerinfo):
"""
"""
self._extract_file_header_information(headerinfo)
self._header_info = {}
#
return None
##########
def _extract_file_header_information(self, header):
"""
"""
infos = struct.unpack('>10s 4c 9s 26s 80s 3x l 40s 4x', header)
self._header_info['Sparky ID' ] = str(infos[0]).strip('\x00')
self._header_info['Number of Axis' ] = ord(infos[1]) #
self._header_info['Number of Components'] = ord(infos[2]) # = 1 for real data
self._header_info['Encoding' ] = ord(infos[3])
self._header_info['Version' ] = ord(infos[4]) # = 2 for current format
self._header_info['Owner' ] = str(infos[5]).strip('\x00')
self._header_info['Date' ] = str(infos[6]).strip('\x00')
self._header_info['Comment' ] = str(infos[7]).strip('\x00')
self._header_info['Seek Position' ] = str(infos[8]).strip('\x00')
self._header_info['Scratch' ] = str(infos[9]).strip('\x00')
return None
##########
def _get_number_of_axis(self):
return self._header_info['Number of Axis']
##########
number_of_axis = property(_get_number_of_axis)
##########
################################################################################
################################################################################
################################################################################
############################
## Sparky axis class
############################
class SparkyFileAxis(object):
"""
Everything what axis must know
- 128 bytes for each axis
"""
def __init__(self, axisinfo):
"""
"""
self._extract_axis_information(axisinfo)
return None
##########
def _extract_axis_information(self, axisdata):
"""
"""
infos = struct.unpack('>6s h 3I 6f 84s', axisdata)
self._axis_info = {}
self._axis_info['Nucleus' ] = str(infos[0]).strip('\x00') # nucleus name (1H, 13C, 15N, 31P, ...
self._axis_info['Spectral Shift' ] = infos[1] # to left or right shift
self._axis_info['Number of Points' ] = infos[2] # # of active data points - integer number of data points along this axis
self._axis_info['Size' ] = infos[3] # total size of axis
self._axis_info['BlockSize' ] = infos[4] # # of points per cache block - integer tile size along this axis
self._axis_info['Spectrometer frequency'] = infos[5] # MHz - float spectrometer frequency for this nucleus (MHz)
self._axis_info['Spectral width' ] = infos[6] # Hz - float spectral width
self._axis_info['xmtr frequency' ] = infos[7] # transmitter offset (ppm) - float center of data (ppm)
self._axis_info['Zero order' ] = infos[8] # phase corrections
self._axis_info['First order' ] = infos[9] # phase corrections
self._axis_info['First pt scale' ] = infos[10] # scaling for first point
self._axis_info['Extended' ] = str(infos[11]).strip('\x00') #
#
self._axis_info['Scale'] = []
for fid in range(0, int(self._axis_info['Number of Points']) + 1, 1):
self._axis_info['Scale'].append(fid2ppm(fid, self.frq_carrier_sw_np))
return None
##########
def _get_parameter(self, parameter_name):
return self._axis_info[parameter_name]
##########
def _get_blocksize(self):
return self._get_parameter('BlockSize')
def _get_nucleus(self):
return self.nucleus_info[-1]
def _get_nucleus_info(self):
return self._get_parameter('Nucleus')
def _get_numberofpoints(self):
return self._get_parameter('Number of Points')
def _get_scale(self):
return self._get_parameter('Scale')
def _get_size(self):
return self._get_parameter('Size')
def _get_spectrometer_frequency(self):
return self._get_parameter('Spectrometer frequency')
def _get_spectral_width(self):
return self._get_parameter('Spectral width')
def _get_xmtr_frequency(self):
return self._get_parameter('xmtr frequency')
def _get_infos(self):
return (self.spectrometer_frequency, self.xmtr_frequency,
self.spectral_width, self.number_of_points)
def ppm2index(self, ppm):
index = 0
while (index < self.number_of_points) and (self.scale[index] > ppm):
index += 1
return index
def index2ppm(self, index):
return fid2ppm(index, self.frq_carrier_sw_np)
##########
blocksize = property(_get_blocksize)
nucleus = property(_get_nucleus)
nucleus_info = property(_get_nucleus_info)
number_of_points = property(_get_numberofpoints)
scale = property(_get_scale)
size = property(_get_size)
spectral_width = property(_get_spectral_width)
spectrometer_frequency = property(_get_spectrometer_frequency)
xmtr_frequency = property(_get_xmtr_frequency)
frq_carrier_sw_np = property(_get_infos)
##########
################################################################################
################################################################################
################################################################################
############################
## Sparky spectral object
############################
class SparkySpectrum(object):
"""
"""
def __init__(self, spectralinfo, blocksize_size_for_each_axis, log = True):
"""
Parameters:
===========
* spectralinfo = sparky file content with the spectral information
* blocksize_size_for_each_axis = blocksize,size pairs for the axis
* log = print out file processing information on the fly
"""
self._log = log
self._number_of_dimensions = len(blocksize_size_for_each_axis)
self._d1 = None
self._d2 = None
self._d3 = None
self._Spectrum = []
self._noise_level = None
#
if self._log:
print 'File read has started:',
eval('self._extract_' + str(self.number_of_dimensions) +
'D_data(spectralinfo, blocksize_size_for_each_axis)')
if self._log:
print '100% file read is done.'
return None
##########
def _extract_1D_data(self, Filecontent, Blocksize):
"""
"""
self._Spectrum = list(struct.unpack
('>'+'f'*(len(Filecontent)/4), Filecontent))
return None
##########
def _extract_2D_data(self, Filecontent, Blocksize):
"""
"""
# First dimensional data
FirstDimensionBlockSize = Blocksize[0]['BlockSize']
FirstDimensionSpectralSize = Blocksize[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = Blocksize[1]['BlockSize']
SecondDimensionSpectralSize = Blocksize[1]['Size']
#
Blocksize = FirstDimensionBlockSize * SecondDimensionBlockSize
# The number of blocks needed for a spectral size is
# not necessary an integer number
NumberOfBlocksInSecondDimension = (
ceil(SecondDimensionSpectralSize / float(SecondDimensionBlockSize)))
#---------------------------------
# Rearrange the data from a list to an array
for i_FirstDimension in range(FirstDimensionSpectralSize):
# Print out info to follow the processing
if self._log and i_FirstDimension % 50 == 0:
print '{0:3.2f}%'.format(100.0 * i_FirstDimension
/ FirstDimensionSpectralSize),
#---------------------------------
BlockNumber = (i_FirstDimension / FirstDimensionBlockSize
* NumberOfBlocksInSecondDimension)
PositionWithinBlock = (i_FirstDimension
% FirstDimensionBlockSize
* SecondDimensionBlockSize)
# Concatenate the block portions in a list
SpectralInfo1D = []
#---------------------------------
# Go through all second dimension protion to get a line
for i_SecondDimension in range(NumberOfBlocksInSecondDimension):
# If this is the last Block in line then the dimension is
# not necessary the blocksize
if i_SecondDimension < NumberOfBlocksInSecondDimension:
SecondDimension = SecondDimensionBlockSize
else:
SecondDimension = (SecondDimensionSpectralSize
% SecondDimensionBlockSize)
#---------------------------------
# The actual position within the block; 1 float number = 4 bytes
pos = (4 * (Blocksize * (BlockNumber + i_SecondDimension)
+ PositionWithinBlock))
#---------------------------------
# Unpack the data. Note that the coding is big endian ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*SecondDimension,
Filecontent[pos : pos + 4 * SecondDimension]))
#---------------------------------
# Add a line into the spectrum
self._Spectrum.append(SpectralInfo1D)
return None
##########
def _extract_3D_data(self, Filecontent, Blocksize):
"""
"""
# Third dimensional data
ThirdDimensionBlockSize = Blocksize[0]['BlockSize']
ThirdDimensionSpectralSize = Blocksize[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = Blocksize[1]['BlockSize']
SecondDimensionSpectralSize = Blocksize[1]['Size']
# First dimensional data
FirstDimensionBlockSize = Blocksize[2]['BlockSize']
FirstDimensionSpectralSize = Blocksize[2]['Size']
#
Blocksize = (FirstDimensionBlockSize
* SecondDimensionBlockSize
* ThirdDimensionBlockSize)
#---------------------------------
# The number of blocks needed for a spectral size is not necessary
# an integer number
NumberOfBlocksInFirstDimension = ceil(FirstDimensionSpectralSize
/ float(FirstDimensionBlockSize ))
NumberOfBlocksInSecondDimension = ceil(SecondDimensionSpectralSize
/ float(SecondDimensionBlockSize))
#---------------------------------
# Rearrange the data from a list to an 3D array
for i_ThirdDimension in range(ThirdDimensionSpectralSize):
# Print out log information
if self._log and i_ThirdDimension % 10 == 0:
print '{0:3.2f}%'.format(100.0 * i_ThirdDimension
/ ThirdDimensionSpectralSize),
#---------------------------------
BlockNumberDim3 = ((i_ThirdDimension
/ ThirdDimensionBlockSize)
* NumberOfBlocksInSecondDimension
* NumberOfBlocksInFirstDimension)
PositionWithinBlockDim3 = ((i_ThirdDimension
% ThirdDimensionBlockSize)
* SecondDimensionBlockSize
* FirstDimensionBlockSize)
#---------------------------------
# Collect data of 2D in a variable
SpectralInfo2D = []
# Go through each block in 2D
#for i_SecondDimension in range(SecondDimensionBlockSize * NumberOfBlocksInSecondDimension):
for i_SecondDimension in range(SecondDimensionSpectralSize):
#
BlockNumberDim2 = (BlockNumberDim3
+ (i_SecondDimension / SecondDimensionBlockSize)
* NumberOfBlocksInFirstDimension)
PositionWithinBlockDim2 = (PositionWithinBlockDim3
+ (i_SecondDimension % SecondDimensionBlockSize)
* FirstDimensionBlockSize)
#---------------------------------
# Collect data of 1D in a variable
SpectralInfo1D = []
# Go through each block in 1D
for i_FirstDimension in range(NumberOfBlocksInFirstDimension):
# The last block size might be smaller than a blocksize
if i_FirstDimension < NumberOfBlocksInFirstDimension-1:
FirstDimension = FirstDimensionBlockSize
else:
FirstDimension = FirstDimensionSpectralSize % FirstDimensionBlockSize
#---------------------------------
# Position within block; 1 float number = 4 bytes
pos = 4 * (Blocksize * (BlockNumberDim2 + i_FirstDimension) + PositionWithinBlockDim2)
#---------------------------------
# Unpack the data. NOTE: big endian data storage ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*FirstDimension,Filecontent[pos: pos + 4*FirstDimension]))
#---------------------------------
# Put each 1D slice into the 2D
SpectralInfo2D.append(SpectralInfo1D)
#---------------------------------
# Store a 2D slice into the final array
self._Spectrum.append(SpectralInfo2D)
return None
##########
def intensity(self, position):
"""
Return an intensity value corresponds to the position
"""
data_intensity = 0.0
if self.number_of_dimensions == 1:
data_intensity = (self._Spectrum[position[0] % self.dim1])
if self.number_of_dimensions == 2:
data_intensity = (self._Spectrum[position[1] % self.dim1]
[position[0] % self.dim2])
if self.number_of_dimensions == 3:
data_intensity = (self._Spectrum[position[2] % self.dim1]
[position[1] % self.dim2]
[position[0] % self.dim3])
return data_intensity
##########
def calculate_noise_level(self, number_of_points = 10000):
"""
"""
noise = 0.0
# calculate the average level on a small subset of data
average = 0.0
pre_set_size = 100
for i in range(pre_set_size):
if self.number_of_dimensions == 1:
average += self.intensity([random.randint(0, self.dim1 - 1)])
if self.number_of_dimensions == 2:
average += self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1)])
if self.number_of_dimensions == 3:
average += self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1),
random.randint(0, self.dim3 - 1)])
average /= float(pre_set_size)
# Calculate the actual noise level
numberofdata = 0
sumofdata = 0.0
highestvalue = 0.0
i = 0
while (i <= number_of_points*2) and (numberofdata <= number_of_points):
if self.number_of_dimensions == 1:
value = abs(self.intensity([random.randint(0, self.dim1 - 1)]))
if self.number_of_dimensions == 2:
value = abs(self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1)]))
if self.number_of_dimensions == 3:
value = abs(self.intensity([random.randint(0, self.dim1 - 1),
random.randint(0, self.dim2 - 1),
random.randint(0, self.dim3 - 1)]))
# Only count a value if that is not far from the average
# (= not a peak)
if value < average * 5:
numberofdata += 1
sumofdata += value
average = sumofdata / float(numberofdata)
if value > highestvalue:
highestvalue = value
i += 1
# Cut back from the highest to have a bit of noise
noise = highestvalue / 1.2
# Return the value as well
return noise
##########
def slice_1d(self, minmax, orderXY):
"""
Return a 1D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 1D
if self.number_of_dimensions == 1:
for x in range(min(minmax['X']), max(minmax['X']), 1):
value = self.intensity([x])
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
#---------------------------------
# 2D
if self.number_of_dimensions == 2:
y = min(minmax['Y'])
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0] == '0':
value = self.intensity([y, x])
else:
value = self.intensity([x, y])
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
#---------------------------------
# 3D
if self.number_of_dimensions == 3:
y = min(minmax['Y'])
z = min(minmax['Z'])
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
spectrum.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
return highestvalue, lowestvalue, spectrum
##########
def slice_2d(self, minmax, orderXY):
"""
Return a 2D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 2D
if self.number_of_dimensions == 2:
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0] == '0':
value = self.intensity([y, x])
else:
value = self.intensity([x, y])
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
# 3D
if self.number_of_dimensions == 3:
z = min(minmax['Z'])
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
return highestvalue, lowestvalue, spectrum
##########
def slice_3d(self, minmax, orderXY):
"""
Return a 3D sub spectrum
"""
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 3D
if self.number_of_dimensions == 3:
for z in range(min(minmax['Z']), max(minmax['Z']), 1):
fid2d = []
for y in range(min(minmax['Y']), max(minmax['Y']), 1):
fid = []
for x in range(min(minmax['X']), max(minmax['X']), 1):
if orderXY[0:2] == '02':
value = self.intensity([y, z, x])
elif orderXY[0:2] == '01':
value = self.intensity([z, y, x])
elif orderXY[0:2] == '20':
value = self.intensity([x, z, y])
elif orderXY[0:2] == '21':
value = self.intensity([x, y, z])
elif orderXY[0:2] == '10':
value = self.intensity([z, x, y])
elif orderXY[0:2] == '12':
value = self.intensity([y, x, z])
else:
value = 0.0
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
fid2d.append(fid)
spectrum.append(fid2d)
return highestvalue, lowestvalue, spectrum
##########
def _get_dimension1(self):
if not self._d1 and self.number_of_dimensions >= 1:
self._d1 = len(self._Spectrum)
return self._d1
def _get_dimension2(self):
if not self._d2 and self.number_of_dimensions >= 2:
self._d2 = len(self._Spectrum[0])
return self._d2
def _get_dimension3(self):
if not self._d3 and self.number_of_dimensions >= 3:
self._d3 = len(self._Spectrum[0][0])
return self._d3
def _get_dimensions(self):
return self._number_of_dimensions
def _get_noiselevel(self):
if not self._noise_level:
self._noise_level = self.calculate_noise_level()
return self._noise_level
##########
dim1 = property(_get_dimension1)
dim2 = property(_get_dimension2)
dim3 = property(_get_dimension3)
number_of_dimensions = property(_get_dimensions)
noise_level = property(_get_noiselevel)
##########
################################################################################
################################################################################
################################################################################
class ChemicalShift(object):
"""
Storing one chemical shift
"""
def __init__(self):
self._value = None
self._original_value = None
return None
##########
def shift(self, value):
self.chemical_shift = self.chemical_shift + value
return None
##########
def _get_cs(self):
if not self._value:
value = 0.0
else:
value = self._value
return value
def _set_cs(self, newvalue):
self._value = newvalue
if not self._original_value:
self._original_value = newvalue
return None
def _get_original_cs(self):
if not self._original_value:
value = 0.0
else:
value = self._original_value
return value
##########
chemical_shift = property(_get_cs, _set_cs)
original_chemical_shift = property(_get_original_cs)
##########
class Peak(object):
"""
Storing all chemical shift for a peak:
Parameters:
===========
* adjusted
* info
* name
* nucleus
* chemical_shift
* original_chemical_shift
* add_chemical_shift
* shift
"""
def __init__(self):
self.CSs = {}
self._adjusted = False
self._intensity = None
return None
def add_chemical_shift(self, nucleus, chemical_shift):
if not nucleus in self.CSs:
self.CSs[nucleus] = ChemicalShift()
self.CSs[nucleus].chemical_shift = float(chemical_shift)
return None
def chemical_shift(self, nucleus):
if nucleus in self.CSs:
value = self.CSs[nucleus].chemical_shift
else:
value = 0.0
return value
def original_chemical_shift(self, nucleus):
if nucleus in self.CSs:
value = self.CSs[nucleus].original_chemical_shift
else:
value = 0.0
return value
def shift(self, nucleus, value):
if nucleus in self.CSs:
self.CSs[nucleus].shift(value)
return None
def set_peak_info(self, line, peaknameinfo):
colomns = line.split()
self._info = colomns[0]
spins = self.info.split('-')
self._peakname = eval('spins[0]' + peaknameinfo)
for i,spin in enumerate(spins):
self.add_chemical_shift(spin[-1], colomns[i+1])
return None
##########
def _get_adjusted(self):
return self._adjusted
def _set_adjusted( self, value):
self._adjusted = value
return None
def _get_info(self):
return self._info
def _set_info(self, value):
self._info = value
return None
def _get_intensity(self):
if not self._intensity:
value = 0.0
else:
value = self._intensity
return value
def _set_intensity(self, value):
self._intensity = value
return None
def _get_name(self):
return self._peakname
def _set_name(self, value):
self._peakname = value
return None
def _get_nucleuses(self):
return self.CSs.keys()
##########
adjusted = property(_get_adjusted, _set_adjusted)
info = property(_get_info, _set_info)
intensity = property(_get_intensity, _set_intensity)
name = property(_get_name, _set_name)
nucleus = property(_get_nucleuses)
##########
class Peaklist(object):
"""
Everything about peaklists
"""
def __init__(self):
self._peaks = {}
self._number_of_peaks = -1
return None
# public procedures
def read_peaklist(self, filename, info):
"""
"""
self.filename = filename
try:
peaklist_file = open(filename, 'r')
except IOError:
print 'Error opening ' + filename + '!!! Please check it!'
exit()
lines = peaklist_file.readlines()
peaklist_file.close()
num = 0
for line in lines:
if (not 'Assignment' in line) and (len(line) > 5):
line.strip()
self._peaks[num] = Peak()
self._peaks[num].set_peak_info(line, info)
num += 1
self._number_of_peaks = num - 1
return None
def print_peaklist(self, filename = None):
"""
"""
if filename:
fil = open(filename,'w')
for i in range(self.number_of_peaks):
nucleus = self._peaks[i].nucleus
nucleus.reverse()
line = self._peaks[i].name
for j, nuc in enumerate(nucleus):
if j == 0:
line = ''.join([line, '_', nuc])
else:
line = ''.join([line, '-', nuc])
for nuc in nucleus:
line = ' '.join([line, str(self._peaks[i].chemical_shift(nuc))])
if filename:
line = ''.join([line, '\n'])
fil.write(line)
else:
print line
if filename:
fil.close()
return None
def add_peak(self, peak_info):
"""
Needs a hash line {'N':129.3,'H':8.5,'C':178.2}
"""
number = self.number_of_peaks
self._peaks[number] = Peak()
for info in peak_info:
self._peaks[number].add_chemical_shift(info, peak_info[info])
self._peaks[number].info = str(number + 1)
self._peaks[number].name = str(number + 1)
self._number_of_peaks += 1
return None
def adjust(self, num):
self._peaks[num].adjusted = True
return None
def adjusted(self, num):
return self._peaks[num].adjusted
def add_cs(self, num, nucleus, value):
self._peaks[num].add_chemical_shift(nucleus, value)
return None
def cs(self, num, nucleus):
return self._peaks[num].chemical_shift(nucleus)
def add_intensity(self, num, value):
self._peaks[num].intensity = value
return None
def intensity(self, num):
return self._peaks[num].intensity
def original_cs(self, num, nucleus):
return self._peaks[num].original_chemical_shift(nucleus)
def name(self, num):
return self._peaks[num].name
def nucleus(self, num):
return self._peaks[num].nucleus
def info(self, num):
return self._peaks[num].info
def shift(self, num, nucleus, value):
self._peaks[num].shift(nucleus, value)
return None
# private procedures
##########
def _get_number_of_peaks(self):
return self._number_of_peaks + 1
##########
number_of_peaks = property(_get_number_of_peaks)
##########
################################################################################
################################################################################
################################################################################
class Sparky_plot(object):
"""
Make a plot of 1d or 2d spectrum
"""
_plot_already = None
def __init__(self):
self._noiselevel = 0.0
self._number_of_contours = 25
self._factor = 1.1
self._colors = []
self._first_plot_on_figure = False
self._plot_negatives = True
#
self.mycolor = zcolor.MyColor()
self.colors = [self.mycolor.series(i, self.number_of_contours, 0, 330, 100.0) for i in range(self.number_of_contours)]
#
if not self._plot_already:
self._plot_already = 1
self.newfigure()
return None
##########
def newfigure(self):
plt.figure()
self._plot_already = 1
self._first_plot_on_figure = True
return None
##########
def plot_1d(self, x_axis_scale, spectral_data, axes_label, color = None):
"""
Plot a 1D slice
"""
if self._first_plot_on_figure:
# plot zero
plt.plot([x_axis_scale[0],x_axis_scale[-1]],[0.0,0.0],'k-')
# plot noise level
plt.plot([x_axis_scale[0],x_axis_scale[-1]],[self.noise_level,self.noise_level],'k--')
#----------------
# color selection
if not color:
try:
plotcolor = self.colors[0]
except IndexError:
plotcolor = 'k'
else:
plotcolor = color
# plot the data
plt.plot(x_axis_scale, spectral_data, color = plotcolor)
# set the x axis limit
plt.xlim(x_axis_scale[0],x_axis_scale[-1])
#
if self._first_plot_on_figure:
plt.xlabel(axes_label[0] + ' (ppm)', size = 15)
plt.ylabel('Intensity', size = 15)
return None
##########
def plot_2d(self, x_axis_scale, y_axis_scale, spectral_data, axes_label, color = None):
"""
Plot a 2D spectrum
"""
# Colors
if not color:
if len(self.colors) < self.number_of_contours:
plotcolors = []
for i in range(self.number_of_contours):
plotcolors.append([0.0, 0.0, 0.0])
else:
plotcolors = self.colors
else:
plotcolors = color
# Contour levels
contourlevels = [self.noise_level * self.factor**i for i in range(self.number_of_contours)]
# plot positive contours
plt.contour(x_axis_scale, y_axis_scale, spectral_data, contourlevels, colors = plotcolors)
if self.plot_negatives:
# plot negatives if needed!
plt.contour(x_axis_scale, y_axis_scale, spectral_data, [-1*i for i in contourlevels], colors = [[0.0,0.0,0.0] for i in range(self.number_of_contours)])
if self._first_plot_on_figure:
# Invert the axis direction
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Put label on axes
plt.xlabel(axes_label[0] + ' (ppm)', size = 15)
plt.ylabel(axes_label[1] + ' (ppm)', size = 15)
return None
##########
def show(self, filename = None):
"""
Show or save the figure depending on whether filename is provided
"""
if not filename:
plt.show()
else:
plt.savefig(filename)
return None
##########
def plot_peaklist_2d(self, peaklist, orderXY):
"""
"""
print 'Peaks on the plot:'
print ' # name cs1 cs2 intensity adjusted'
print '--------------------------------------------------------'
for number, peak in enumerate(peaklist):
#
info = peak
loc_x = peaklist[peak][orderXY[0]]
loc_y = peaklist[peak][orderXY[1]]
adj = peaklist[peak]['Adjusted']
intensity = peaklist[peak]['Intensity']
#
print '{0:3d}. {1:>5s} {2:7.3f} {3:7.3f} {4:14.3f} '.format(number + 1, peak, loc_y, loc_x, intensity),
if adj:
print 'true'
labelcolor = 'black'
else:
print 'false'
labelcolor = 'red'
#
dx = 0.0
dy = 0.2
plt.gca().annotate(info,
xy = (loc_x, loc_y),
color = labelcolor,
xytext = (loc_x - dx,loc_y - dy),
arrowprops = dict(arrowstyle = "-|>",
connectionstyle = "arc3",
facecolor = labelcolor))
print '--------------------------------------------------------'
return None
##########
def set_factor(self, highestvalue):
#
self.factor = math.exp(math.log(highestvalue /float(self.noise_level)) * 1.0/(float(self.number_of_contours)))
return self.factor
##########
def _set_colors(self, levels):
self._colors = levels
return None
def _get_colors(self):
return self._colors
def _set_factor(self, level):
self._factor = level
return None
def _get_factor(self):
return self._factor
def _set_noiselevel(self, level):
self._noiselevel = level
return None
def _get_noiselevel(self):
return self._noiselevel
def _set_number_of_contours(self, level):
self._number_of_contours = level
return None
def _get_number_of_contours(self):
return self._number_of_contours
def _set_plot_negatives(self, level):
self._plot_negatives = level
return None
def _get_plot_negatives(self):
return self._plot_negatives
##########
colors = property(_get_colors, _set_colors)
factor = property(_get_factor, _set_factor)
noise_level = property(_get_noiselevel, _set_noiselevel)
number_of_contours = property(_get_number_of_contours, _set_number_of_contours)
plot_negatives = property(_get_plot_negatives, _set_plot_negatives)
##########
################################################################################
################################################################################
################################################################################
class ZB_spectrum(object):
"""
"""
def __init__(self, filename):
"""
"""
self._peaklist = Peaklist()
#
try:
filehandler = open(filename, 'rb')
except IOError:
print ('ERROR!!!\nPlease check the ' + filename + ' location, '
'because an error happened during the file open...\n')
exit()
#---------------------------------
# Read the file header information
self.header = SparkyFileHeader(filehandler.read(180))
#---------------------------------
# Read the axes information
self.axis = {}
self.axis_order = ''
blocksize_info = []
for i in range(self.header.number_of_axis):
axis = SparkyFileAxis(filehandler.read(128))
self.axis_order += axis.nucleus
self.axis[self.axis_order[-1]] = axis
blocksize_info.append({'BlockSize':axis.blocksize, 'Size':axis.size})
#---------------------------------
# Read the spectral information
self.spectrum = SparkySpectrum(filehandler.read(), blocksize_info)
#---------------------------------
filehandler.close()
#
return None
##########
def _get_limits(self, limit, nucleus):
if limit[nucleus] == []:
index_min = 0
index_max = self.axis[nucleus].number_of_points - 1
else:
index_min = self.axis[nucleus].ppm2index(max(limit[nucleus]))
index_max = self.axis[nucleus].ppm2index(min(limit[nucleus]))
return index_min, index_max + 1
##########
def plot1d(self, limits, orderXY):
"""
Parameters:
===========
* limits: a hash with the limits in ppm
* orderxY:
example: plot1d({'H':[5.5,9.2],'N':[105,122]})
"""
if not orderXY:
orderXY = 'HN'
# Dealing with the limits
xy_limits = {}
xy_limits['X'] = self._get_limits(limits, orderXY[0])
if self.header.number_of_axis > 1:
xy_limits['Y'] = self._get_limits(limits, orderXY[1])
if self.header.number_of_axis > 2:
xy_limits['Z'] = self._get_limits(limits, orderXY[2])
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
#
highest, lowest, spectral_data = self.spectrum.slice_1d(xy_limits, axes_order)
scale = self.axis[orderXY[0]].scale[xy_limits['X'][0] : xy_limits['X'][1]]
self.figure = Sparky_plot()
self.figure.noise_level = self.spectrum.noise_level
self.figure.plot_1d(scale, spectral_data, self.axis[orderXY[0]].nucleus_info, 'b')
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', self.figure.noise_level
print 'Highest value =', highest
print 'Lowest value =', lowest
print '#############################################'
return None
##########
def plot(self, limits, orderXY = None):
"""
Parameters:
===========
* limits: a hash with the limits in ppm
* orderxY:
example: plot1d({'H':[5.5,9.2],'N':[105,122]})
"""
if not orderXY:
orderXY = 'HN'
# Dealing with the limits
xy_limits = {}
xy_limits['X'] = self._get_limits(limits, orderXY[0])
xy_limits['Y'] = self._get_limits(limits, orderXY[1])
if self.header.number_of_axis > 2:
xy_limits['Z'] = self._get_limits(limits, orderXY[2])
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
# Axis labels
labels = []
for o in orderXY:
labels.append(self.axis[o].nucleus_info)
#----------------
highest, lowest, spectral_data = self.spectrum.slice_2d(xy_limits, axes_order)
x_scale = self.axis[orderXY[0]].scale[xy_limits['X'][0] : xy_limits['X'][1]]
y_scale = self.axis[orderXY[1]].scale[xy_limits['Y'][0] : xy_limits['Y'][1]]
self.figure = Sparky_plot()
self.figure.noise_level = self.spectrum.noise_level
self.figure.set_factor(highest)
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', self.figure.noise_level
print 'Factor =', self.figure.factor
print 'Highest value =', highest
print 'Lowest value =', lowest
print '#############################################'
self.figure.plot_2d(x_scale, y_scale, spectral_data, labels)
# prepare peaklist
peaklist = {}
for i in range(self._peaklist.number_of_peaks):
within = True
for o in orderXY:
cs = self._peaklist.cs(i, o)
if limits[o] != []:
if (cs < min(limits[o])) or (max(limits[o]) < cs):
within = False
if within:
peaklist[self._peaklist.name(i)] = {}
peaklist[self._peaklist.name(i)][orderXY[0]] = self._peaklist.cs(i, orderXY[0])
peaklist[self._peaklist.name(i)][orderXY[1]] = self._peaklist.cs(i, orderXY[1])
peaklist[self._peaklist.name(i)]['Adjusted'] = self._peaklist.adjusted(i)
peaklist[self._peaklist.name(i)]['Intensity'] = self.spectrum.intensity([
self.axis[orderXY[0]].ppm2index(self._peaklist.cs(i, orderXY[0])),
self.axis[orderXY[1]].ppm2index(self._peaklist.cs(i, orderXY[1]))])
# plot peaklist
self.figure.plot_peaklist_2d(peaklist, orderXY)
return None
###########################
def show(self, filename = ''):
"""
"""
self.figure.show(filename)
return None
###########################
def _extremes_finder(self, position, dimension, axis_order, find_max = True):
"""
find positive and negative extremes on the spectrum
Parameters:
===========
* position = spectrum starting position for the peak finding,
order must be same as in the spectrum
* dimension = find local maximum or minimum in 2D or 3D
* find_max = maximum or minimum finding
Return:
=======
* local extreme
"""
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[ 0, 0,-1],[ 0, 0,+1], # z
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
#
spectral_width = []
for o in axis_order:
spectral_width.append(eval('self.spectrum.dim' + str(int(o)+1)))
#spectral_width = [self.spectrum.dim2, self.spectrum.dim1, self.spectrum.dim3]
# If the dimension 2D, we find check the value in x,y otherwise in x,y,z
if dimension == 2:
checklist_size = 4
else:
checklist_size = len(checklist)
# minimum or maximum finder
finder_type = [['min','<'],['max','>']][find_max]
# It goes till it finds a local maximum
not_on_an_extreme_value = True
while not_on_an_extreme_value:
# check all values according to the checklist
checked_values = []
for check in checklist[0 : checklist_size]:
checked_values.append(self.spectrum.intensity([pos + ch for (pos, ch) in zip(position[0 : dimension], check[0 : dimension])]))
# if the position data is the most extreme, than we are done
most_extreme_in_array = eval(eval('finder_type[0]') + '(checked_values)')
if eval('self.spectrum.intensity(position)' + eval('finder_type[1]') + 'most_extreme_in_array'):
not_on_an_extreme_value = False
else:
# modifiy the position to the highest
checked_values_max_index = checked_values.index(most_extreme_in_array)
for i in range(dimension):
position[i] += checklist[checked_values_max_index][i]
position[i] %= spectral_width[i]
return position
###########################
def _find_peak_1d(self, data, noiselevel):
hits = []
direction = True
for i in range(len(data)-1):
if data[i] > data[i+1] and data[i] > noiselevel and direction:
hits.append(i)
direction = False
if data[i] < data[i+1]:
direction = True
if len(hits) > 0 and False:
plt.figure()
plt.plot(range(len(data)),data)
plt.plot(hits,[50000 for j in range(len(hits))], 'k', marker= 'o', linestyle = '')
plt.show()
return hits
###########################
def _find_peak_2d(self, data2d, noiselevel, order):
hits = {}
for i, data1d in enumerate(data2d):
hit1d = self._find_peak_1d(data1d, noiselevel)
for hit in hit1d:
hits[' '.join(str(d) for d in self._extremes_finder([hit, i], 2, order))] = 0
peaks = []
for hit in hits:
peaks.append(hit.split())
return peaks
###########################
def peak_finder_2d(self, orderXY = 'HN', times_noiselevel = 1.5):
# Dealing with the order
axes_order = ''
for i in range(len(orderXY)):
axes_order += str(self.axis_order.index(orderXY[i]))
#
xy = {}
xy['X'] = [0, self.axis[orderXY[0]].number_of_points - 1]
xy['Y'] = [0, self.axis[orderXY[1]].number_of_points - 1]
#
print 'Finding peaks...',
peaklist = {}
for i,peak in enumerate(self._find_peak_2d(self.spectrum.slice_2d(xy, axes_order)[-1],self.spectrum.noise_level*times_noiselevel, axes_order)):
peak_info = {}
for j, o in enumerate(orderXY):
peak_info[o] = self.axis[o].index2ppm(float(peak[j]))
self._peaklist.add_peak(peak_info)
self._peaklist.adjust(self._peaklist.number_of_peaks - 1)
print str(i + 1) + ' peaks found!'
return peaklist
###########################
def _one_peak(self, peaknumber, orderXY):
if (0 <= peaknumber) and (peaknumber < self._peaklist.number_of_peaks):
window = {'H':0.08,'N':0.5,'C':0.5}
limit = {}
for o in orderXY:
limit[o] = [self._peaklist.cs(peaknumber, o) - window[o],self._peaklist.cs(peaknumber, o) + window[o]]
self.plot(limit, orderXY)
lim1d = {}
o = orderXY[0]
lim1d[o] = [self._peaklist.cs(peaknumber, o) - window[o], self._peaklist.cs(peaknumber, o) + window[o]]
o = orderXY[1]
lim1d[o] = [self._peaklist.cs(peaknumber, o)]
self.plot1d(lim1d,orderXY)
lim1dd = {}
o = orderXY[1]
lim1dd[o] = [self._peaklist.cs(peaknumber, o) - window[o], self._peaklist.cs(peaknumber, o) + window[o]]
o = orderXY[0]
lim1dd[o] = [self._peaklist.cs(peaknumber, o)]
self.plot1d(lim1dd,orderXY[1]+orderXY[0])
return None
###########################
def _get_spectrum_around_peak(self, axis_order, position):
"""
It returns 1d slices of the spectrum for peak fitting
Parameters:
===========
* axis_order = nucleus order in XYZ format
* position = info as in spectrum
Returns:
========
* One dimensional slices: all, left, right, top
"""
topwindow = 2
permutation = {1 : {0: {'left':[ -1], 'right':[ +1]}},
2 : {0: {'left':[ 0, -1], 'right':[ 0, +1]},
1: {'left':[ -1, 0], 'right':[ +1, 0]}},
3 : {0: {'left':[ 0, 0, -1], 'right':[ 0, 0, +1]},
1: {'left':[ 0, -1, 0], 'right':[ 0, +1, 0]},
2: {'left':[-1, 0, 0], 'right':[+1, 0, 0]}}}
slices = {}
for dimension in axis_order:
slices[dimension] = {}
# Get the left and the right side of the peak separately
for direction in ['left','right']:
# Start from the original postion
pos = []
for p in position:
pos.append(p)
# Collect the data
tomb = []
while self.spectrum.intensity(pos) >= self.spectrum.noise_level:
tomb.append(self.spectrum.intensity(pos))
for j in range(len(pos)):
pos[j] += permutation[len(position)][axis_order.index(dimension)][direction][j]
# Store the data
slices[dimension][direction] = tomb
# extract the whole peak and just the top part
slices[dimension]['all'] = []
slices[dimension]['top'] = []
for i in range(len(slices[dimension]['left'])):
slices[dimension]['all'].append(slices[dimension]['left'][len(slices[dimension]['left']) - i - 1])
if i <= topwindow:
slices[dimension]['top'].append(slices[dimension]['left'][topwindow - i])
for i in range(1,len(slices[dimension]['right'])):
slices[dimension]['all'].append(slices[dimension]['right'][i])
if i <= topwindow:
slices[dimension]['top'].append(slices[dimension]['right'][i])
return slices
###########################
def _fit_one_slice_around_peak(self, spectrum, pos):
"""
Fit a 1d array with a gaussian or lorentian curve
"""
fit = GF.Fit_general(range(len(spectrum)),
spectrum,
[max(spectrum), len(spectrum)*0.7],
linewidth_fit2,
z = [pos for i in range(len(spectrum))],
Log = False,
NoErrors = 'NoErrors')
print fit.Value, fit.Chi2/len(spectrum)
# a,b = fit.GenerateCurve(0,len(spectrum))
# plt.figure()
# plt.plot(range(len(spectrum)), spectrum,'k',linestyle='',marker='o')
# plt.plot(a,b)
# plt.show()
return fit.Value
###########################
def peakfit(self, peaknumber):
"""
"""
peakposition = []
cs = []
axisorder = ''
for i in range(len(self.axis_order), 0, -1):
ax = self.axis_order[i - 1]
axisorder += ax
cs.append(self._peaklist.cs(peaknumber, ax))
peakposition.append(self.axis[ax].ppm2index(self._peaklist.cs(peaknumber, ax)))
#
slices = self._get_spectrum_around_peak(axisorder, peakposition)
# fitting the tip of the peak
intensity = []
new_index = []
linewidth = {}
for i,ax in enumerate(axisorder):
print ax
linewidth[ax] = []
spectrum = slices[ax]['top']
fit = GF.Fit_general(range(len(spectrum)), spectrum, [max(spectrum), len(spectrum)//2, -1E+5], parabolic, Log = False, NoErrors = 'NoErrors')
intensity.append(fit.Value[0])
new_index.append(fit.Value[1] - len(spectrum)//2)
# a,b = fit.GenerateCurve(0,len(spectrum)-1)
# plt.figure()
# plt.plot(range(len(spectrum)), spectrum,'k',linestyle='',marker='o')
# plt.plot(a,b)
# plt.show()
# fit the sides of the peak
for side in ['left','right','all']:
spectrum = slices[ax][side]
lw = self._fit_one_slice_around_peak(spectrum, spectrum.index(max(spectrum)) + new_index[-1])
linewidth[ax].append(lw[1])
print 'intensity:',sum(intensity) / len(intensity), intensity
for i,ax in enumerate(axisorder):
print 'position:',ax, self.axis[ax].index2ppm(peakposition[i] + new_index[i])
print 'lw:',min(linewidth[ax]),self.axis[ax].index2ppm(min(linewidth[ax]))*self.axis[ax].spectrometer_frequency
print axisorder
print cs
print peakposition
print new_index
exit()
window = 3
max_window_peak = 8
order = {1:['0'], 2:['10','01'],3:['210','102','021']}
axis = ['X','Y','Z']
nucleuses = self._peaklist.nucleus(peaknumber)
#
index = {}
for nuc in nucleuses:
index[nuc] = self.axis[nuc].ppm2index(self._peaklist.cs(peaknumber, nuc))
for orderXY in order[len(nucleuses)]:
xy = {}
xypeak_left = {}
xypeak_right = {}
for i, o in enumerate(orderXY):
nuc = nucleuses[int(o)]
ax = axis[i]
xy[ax] = [index[nuc]]
xypeak_left[ax] = [index[nuc]]
xypeak_right[ax] = [index[nuc]]
xy['X'] = [xy['X'][0] - window, xy['X'][0] + window + 1]
xypeak_left['X'] = [xypeak_left['X'][0] - max_window_peak, xypeak_left['X'][0]]
xypeak_right['X'] = [xypeak_right['X'][0], xypeak_right['X'][0] + max_window_peak + 1]
rev_order = ''
for o in orderXY:
rev_order = ''.join([o, rev_order])
# Fitting the tip of the peak with a parabolic
spectrum = self.spectrum.slice_1d(xy, rev_order)[2]
spectrum_peak_left = self.spectrum.slice_1d(xypeak_left, rev_order)[2]
spectrum_peak_right = self.spectrum.slice_1d(xypeak_right, rev_order)[2]
fit = GF.Fit_general(range(len(spectrum)), spectrum, [max(spectrum), window, -1E+5], parabolic, Log = False, NoErrors = 'NoErrors')
xaxisnuc = nucleuses[int(orderXY[0])]
index_diff = fit.Value[1] - window
new_index = index[xaxisnuc] + index_diff
ppm = self.axis[xaxisnuc].index2ppm(new_index)
intensity = fit.Value[0]
self._peaklist.add_cs(peaknumber, xaxisnuc, ppm)
if xaxisnuc == 'H':
self._peaklist.add_intensity(peaknumber, intensity)
# Fitting the peak with a gaussian
## fit_left = GF.Fit_general(range(len(spectrum_peak_left)),
## spectrum_peak_left,
## [intensity, 2],
## linewidth_fit2,
## z = [max_window_peak + index_diff for i in range(len(spectrum_peak_left))],
## #z = [(max_window_peak + index_diff, min(spectrum_peak_left)) for i in range(len(spectrum_peak_left))],
## Log = False,
## NoErrors = 'NoErrors')
### fit_left = GF.Fit_general(range(len(spectrum_peak_left)), spectrum_peak_left, [window_peak], linewidth_fit, z = [[intensity, window_peak + index_diff] for i in range(len(spectrum_peak_left))], Log = True, NoErrors = 'NoErrors')
### fit_right = GF.Fit_general(range(len(spectrum_peak_right)), spectrum_peak_right, [window_peak], linewidth_fit, z = [[intensity, index_diff] for i in range(len(spectrum_peak_right))], Log = False, NoErrors = 'NoErrors')
##
## print fit_left.Value
# print fit_right.Value
## a,b = fit_left.GenerateCurve(0,7)
xy = {}
for i, o in enumerate(orderXY):
nuc = nucleuses[int(o)]
ax = axis[i]
xy[ax] = [index[nuc]]
left = []
y = xy['Y'][0]
x = xy['X'][0]
dd = self._get_spectrum_around_peak([y,x], 2)
# print dd
exit()
while self.spectrum.intensity([y,x]) >= self.spectrum.noise_level:
left.append(self.spectrum.intensity([y,x]))
x = x - 1
left.append(self.spectrum.intensity([y,x]))
left.reverse()
print len(left) + index_diff
left_fit = GF.Fit_general(range(len(left)),
left,
[max(left), 1.0],
linewidth_fit2,
z = [len(left) - 1 + index_diff for i in range(len(left))],
Log = True,
NoErrors = 'NoErrors')
e,f = left_fit.GenerateCurve(0,7)
plt.figure()
## plt.plot(range(len(spectrum_peak_left)), spectrum_peak_left,'k',marker = 'o',linestyle= '')
## plt.plot(a,b)
plt.plot(range(len(left)), left,'r',marker = 'o',linestyle= '')
plt.plot(e,f,'r--')
plt.show()
exit()
return None
###########################
def read_peaklist(self, peaklist_filename, info = '[:-2]'):
self._peaklist.read_peaklist(peaklist_filename, info)
return None
###########################
def print_peaklist(self):
self._peaklist.print_peaklist()
return None
###########################
def save_peaklist(self, peaklist_filename):
self._peaklist.print_peaklist(peaklist_filename)
return None
###########################
def _get_noiselevel(self):
return self.spectrum.noise_level
###########################
noise_level = property(_get_noiselevel)
###########################
################################################################################
################################################################################
################################################################################
class SparkyFile(object):
"""
"""
###########################
Plotting_parameters = []
###########################
def __init__(self, filename, log = True):
"""
Parameters:
* filename = A sparky file with path information
* log = True to print out information during processing
"""
# Information on dimensionarity of the measured data
self._FileHeader_ = {}
# Information on measured axes
self.myaxis = {}
#
self._AxisOrder_ = []
# Spectrum data
self._Spectrum_ = []
# Peaklist information
self._Peaklist_ = {}
# Store the peaklist keys in order of the read in
self._Peaklistkeysorder_ = []
#
self._Peaklistchemicalshiftorder_ = []
#
self._PeaklistDoNotCare_ = []
#
self.Noiselevel = None
#---------------------------------
self.log = log
# Open the sparky file
try:
filehandler = open(filename, 'rb')
except IOError:
print ('ERROR!!!\nPlease check the ' + filename + ' location, '
'because an error happened during the file open...\n')
exit()
#---------------------------------
# Read the file header information
data = filehandler.read(180)
head = SparkyFileHeader(data)
print head.number_of_axis
self.GetFileHeaderInformation(data)
#---------------------------------
# Read all axis information
for AxisNumber in range(self._FileHeader_['Number of Axis']):
datax = filehandler.read(128)
self.GetAxisInformation(datax, AxisNumber)
self._AxisOrder_.append(self.myaxis[AxisNumber]['Nucleus'][-1])
# exit()
#---------------------------------
# Only 2D and 3D are ok
if not self.NumberOfAxis in [2, 3]:
print ('Sorry! The dimension of your spectrum (' +
str(self.NumberOfAxis) +
'D) is not handled by this program...\n')
exit()
#---------------------------------
# Calculate the block size information
Blocksize = 1
for AxisNumber in range(self.NumberOfAxis):
Blocksize *= self.myaxis[AxisNumber]['BlockSize']
#---------------------------------
# Read the data from the file
Filecontent = filehandler.read()
#---------------------------------
# Close the file
filehandler.close()
#---------------------------------
# Get the actual specral information
if self.log:
print 'File read has started',
self._Spectrum_ = []
# It can read 2D and 3D datafile
if self.NumberOfAxis in [2,3]:
eval('self._extract_'+str(self.NumberOfAxis)+'D_data(Filecontent, Blocksize)')
#---------------------------------
# Calculate a noise level for the spectrum
self.CalculateNoiseLevel()
#---------------------------------
if self.log:
print '100% file read is done.'
return None
###########################
def GetFileHeaderInformation(self, data):
infos = struct.unpack('>10s 4c 9s 26s 80s 3x l 40s 4x',data)
self._FileHeader_['Sparky ID' ] = str(infos[0]).strip('\x00')
self._FileHeader_['Number of Axis' ] = ord(infos[1]) #
self._FileHeader_['Number of Components'] = ord(infos[2]) # = 1 for real data
self._FileHeader_['Encoding' ] = ord(infos[3])
self._FileHeader_['Version' ] = ord(infos[4]) # = 2 for current format
self._FileHeader_['Owner' ] = str(infos[5]).strip('\x00')
self._FileHeader_['Date' ] = str(infos[6]).strip('\x00')
self._FileHeader_['Comment' ] = str(infos[7]).strip('\x00')
self._FileHeader_['Seek Position' ] = str(infos[8]).strip('\x00')
self._FileHeader_['Scratch' ] = str(infos[9]).strip('\x00')
return None
###########################
def GetAxisInformation(self, data, Number):
infos = struct.unpack('>6s h 3I 6f 84s',data)
self.myaxis[Number] = {}
self.myaxis[Number]['Nucleus' ] = str(infos[0]).strip('\x00') # nucleus name (1H, 13C, 15N, 31P, ...
self.myaxis[Number]['Spectral Shift' ] = infos[1] # to left or right shift
self.myaxis[Number]['Number of Points' ] = infos[2] # # of active data points - integer number of data points along this axis
self.myaxis[Number]['Size' ] = infos[3] # total size of axis
self.myaxis[Number]['BlockSize' ] = infos[4] # # of points per cache block - integer tile size along this axis
self.myaxis[Number]['Spectrometer frequency'] = infos[5] # MHz - float spectrometer frequency for this nucleus (MHz)
self.myaxis[Number]['Spectral width' ] = infos[6] # Hz - float spectral width
self.myaxis[Number]['xmtr frequency' ] = infos[7] # transmitter offset (ppm) - float center of data (ppm)
self.myaxis[Number]['Zero order' ] = infos[8] # phase corrections
self.myaxis[Number]['First order' ] = infos[9] # phase corrections
self.myaxis[Number]['First pt scale' ] = infos[10] # scaling for first point
self.myaxis[Number]['Extended' ] = str(infos[11]).strip('\x00') #
self.myaxis[Number]['Scale'] = []
for i in range(0, int(self.myaxis[Number]['Number of Points']) + 1, 1):
self.myaxis[Number]['Scale'].append(self._fid2ppm(i, infos[5], infos[7], infos[6], infos[2]))
return None
###########################
def _extract_2D_data(self, Filecontent, Blocksize):
"""
"""
# First dimensional data
FirstDimensionBlockSize = self.myaxis[0]['BlockSize']
FirstDimensionSpectralSize = self.myaxis[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = self.myaxis[1]['BlockSize']
SecondDimensionSpectralSize = self.myaxis[1]['Size']
# The number of blocks needed for a spectral size is
# not necessary an integer number
NumberOfBlocksInSecondDimension = (
self._ceil(SecondDimensionSpectralSize /
float(SecondDimensionBlockSize)))
print FirstDimensionBlockSize, SecondDimensionBlockSize, Blocksize
exit()
#---------------------------------
# Rearrange the data from a list to an array
for i_FirstDimension in range(FirstDimensionSpectralSize):
# Print out info to follow the processing
if self.log and i_FirstDimension % 50 == 0:
print '{0:3.2f}%'.format(100.0 * i_FirstDimension
/ FirstDimensionSpectralSize),
#---------------------------------
BlockNumber = (i_FirstDimension / FirstDimensionBlockSize
* NumberOfBlocksInSecondDimension)
PositionWithinBlock = (i_FirstDimension
% FirstDimensionBlockSize
* SecondDimensionBlockSize)
# Concatenate the block portions in a list
SpectralInfo1D = []
#---------------------------------
# Go through all second dimension protion to get a line
for i_SecondDimension in range(NumberOfBlocksInSecondDimension):
# If this is the last Block in line then the dimension is
# not necessary the blocksize
if i_SecondDimension < NumberOfBlocksInSecondDimension:
SecondDimension = SecondDimensionBlockSize
else:
SecondDimension = (SecondDimensionSpectralSize
% SecondDimensionBlockSize)
#---------------------------------
# The actual position within the block; 1 float number = 4 bytes
pos = (4 * (Blocksize * (BlockNumber + i_SecondDimension)
+ PositionWithinBlock))
#---------------------------------
# Unpack the data. Note that the coding is big endian ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*SecondDimension,
Filecontent[pos : pos + 4 * SecondDimension]))
#---------------------------------
# Add a line into the spectrum
self._Spectrum_.append(SpectralInfo1D)
self.myaxis[0]['Actual size'] = len(self._Spectrum_)
self.myaxis[1]['Actual size'] = len(self._Spectrum_[0])
return None
###########################
def _extract_3D_data(self, Filecontent, Blocksize):
"""
"""
# Third dimensional data
ThirdDimensionBlockSize = self.myaxis[0]['BlockSize']
ThirdDimensionSpectralSize = self.myaxis[0]['Size']
# Second dimensional data
SecondDimensionBlockSize = self.myaxis[1]['BlockSize']
SecondDimensionSpectralSize = self.myaxis[1]['Size']
# First dimensional data
FirstDimensionBlockSize = self.myaxis[2]['BlockSize']
FirstDimensionSpectralSize = self.myaxis[2]['Size']
#---------------------------------
# The number of blocks needed for a spectral size is not necessary an integer number
NumberOfBlocksInFirstDimension = self._ceil(FirstDimensionSpectralSize /float(FirstDimensionBlockSize ))
NumberOfBlocksInSecondDimension = self._ceil(SecondDimensionSpectralSize/float(SecondDimensionBlockSize))
#---------------------------------
# Rearrange the data from a list to an 3D array
for i_ThirdDimension in range(ThirdDimensionSpectralSize):
# Print out log information
if i_ThirdDimension % 10 == 0:
print '{0:3.2f}%'.format(100.0*i_ThirdDimension/ThirdDimensionSpectralSize),
#---------------------------------
BlockNumberDim3 = (i_ThirdDimension / ThirdDimensionBlockSize) * NumberOfBlocksInSecondDimension * NumberOfBlocksInFirstDimension
PositionWithinBlockDim3 = (i_ThirdDimension % ThirdDimensionBlockSize) * SecondDimensionBlockSize * FirstDimensionBlockSize
#---------------------------------
# Collect data of 2D in a variable
SpectralInfo2D = []
# Go through each block in 2D
#for i_SecondDimension in range(SecondDimensionBlockSize * NumberOfBlocksInSecondDimension):
for i_SecondDimension in range(SecondDimensionSpectralSize):
#
BlockNumberDim2 = BlockNumberDim3 + (i_SecondDimension / SecondDimensionBlockSize) * NumberOfBlocksInFirstDimension
PositionWithinBlockDim2 = PositionWithinBlockDim3 + (i_SecondDimension % SecondDimensionBlockSize) * FirstDimensionBlockSize
#---------------------------------
# Collect data of 1D in a variable
SpectralInfo1D = []
# Go through each block in 1D
for i_FirstDimension in range(NumberOfBlocksInFirstDimension):
# The last block size might be smaller than a blocksize
if i_FirstDimension < NumberOfBlocksInFirstDimension-1:
FirstDimension = FirstDimensionBlockSize
else:
FirstDimension = FirstDimensionSpectralSize % FirstDimensionBlockSize
#---------------------------------
# Position within block; 1 float number = 4 bytes
pos = 4 * (Blocksize * (BlockNumberDim2 + i_FirstDimension) + PositionWithinBlockDim2)
#---------------------------------
# Unpack the data. NOTE: big endian data storage ">"
SpectralInfo1D += list(struct.unpack('>'+'f'*FirstDimension,Filecontent[pos: pos + 4*FirstDimension]))
#---------------------------------
# Put each 1D slice into the 2D
SpectralInfo2D.append(SpectralInfo1D)
#---------------------------------
# Store a 2D slice into the final array
self._Spectrum_.append(SpectralInfo2D)
self.myaxis[0]['Actual size'] = len(self._Spectrum_)
self.myaxis[1]['Actual size'] = len(self._Spectrum_[0])
self.myaxis[2]['Actual size'] = len(self._Spectrum_[0][0])
return None
###########################
def DataIntensity(self, position):
if len(position) == 3:
intensity = (self._Spectrum_[position[0] % self.myaxis[0]['Actual size']]
[position[1] % self.myaxis[1]['Actual size']]
[position[2] % self.myaxis[2]['Actual size']])
else:
intensity = (self._Spectrum_[position[0] % self.myaxis[0]['Actual size']]
[position[1] % self.myaxis[1]['Actual size']])
return intensity
###########################
def distance(self, pos1, pos2):
distance_value = 0.0
for (p1, p2) in (pos1, pos2):
distance_value += (p1 - p2)**2
return math.sqrt(distance_value)
###########################
def read_peaklist(self, PeaklistFilename, Info='[0:-1]', shifts = [0.0, 0.0, 0.0]):
"""
Reads a sparky peaklist file
"""
try:
pfile = open(PeaklistFilename, 'r')
except IOError:
print 'Error opening ' + PeaklistFilename + '!!! Please check it!'
exit()
lines = pfile.readlines()
pfile.close()
for line in lines:
if (len(line) > 12) and (not 'Assig' in line):
data = line.split()
key = data[0]
self._Peaklistkeysorder_.append(key)
self._Peaklist_[key] = {}
order = key.split('-')
if self.NumberOfAxis == 2:
self._Peaklist_[key]['Info'] = eval('order[0]' + Info)
self._Peaklist_[key][order[-1][-1]] = float(data[2]) + shifts[1]
self._Peaklist_[key][order[-2][-1]] = float(data[1]) + shifts[0]
self._Peaklist_[key]['Adjusted'] = 'red'
#
if not self._Peaklistchemicalshiftorder_:
self._Peaklistchemicalshiftorder_.append(order[-2][-1])
self._Peaklistchemicalshiftorder_.append(order[-1][-1])
else:
self._Peaklist_[key]['Info'] = eval('order[0]'+Info)
self._Peaklist_[key][order[-1][-1]] = float(data[3]) + shifts[2]
self._Peaklist_[key][order[-2][-1]] = float(data[2]) + shifts[1]
self._Peaklist_[key][order[-3][-1]] = float(data[1]) + shifts[0]
self._Peaklist_[key]['Adjusted'] = 'red'
#
if not self._Peaklistchemicalshiftorder_:
self._Peaklistchemicalshiftorder_.append(order[-3][-1])
self._Peaklistchemicalshiftorder_.append(order[-2][-1])
self._Peaklistchemicalshiftorder_.append(order[-1][-1])
return None
###########################
def save_peaklist(self, filename):
pfile = open(filename, 'w')
for peak in self._Peaklistkeysorder_:
line = peak
for axis in self._Peaklistchemicalshiftorder_:
line = ' '.join([line, str(self._Peaklist_[peak][axis])])
line = ' '.join([line, str(self._Peaklist_[peak]['Intensity'])])
line = ' '.join([line, '{0:5.2f}'.format(self._Peaklist_[peak]['Intensity']/self.Noiselevel)])
line = ' '.join([line, '\n'])
pfile.write(line)
pfile.close()
return None
###########################
def extremes_finder(self, position, dimension, find_max = True):
"""
find positive and negative extremes on the spectrum
Parameters:
===========
* position = spectrum staring position for the peak finding,
order must be same as in the spectrum
* dimension = find local maximum or minimum in 2D or 3D
* find_max = maximum or minimum finding
Return:
=======
* local extreme
"""
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[ 0, 0,-1],[ 0, 0,+1], # z
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
# If the dimension 2D, we find check the value in x,y otherwise in x,y,z
if dimension == 2:
checklist_size = 4
else:
checklist_size = len(checklist)
# minimum or maximum finder
finder_type = [['min','<'],['max','>']][find_max]
# It goes till it finds a local maximum
not_on_an_extreme_value = True
while not_on_an_extreme_value:
# check all values according to the checklist
checked_values = []
for check in checklist[0 : checklist_size]:
checked_values.append(self.DataIntensity([pos + ch for (pos, ch) in zip(position[0 : dimension], check[0 : dimension])]))
# if the position data is the most extreme, than we are done
most_extreme_in_array = eval(eval('finder_type[0]') + '(checked_values)')
if eval('self.DataIntensity(position)' + eval('finder_type[1]') + 'most_extreme_in_array'):
not_on_an_extreme_value = False
else:
# modifiy the position to the highest
checked_values_max_index = checked_values.index(most_extreme_in_array)
for i in range(dimension):
position[i] += checklist[checked_values_max_index][i]
position[i] %= self.myaxis[i]['Actual size']
return position
###########################
def ClimbUpTheHill3D(self,ResidueKey, Modify = False, delta = [0.0,0.0,0.0]):
if ResidueKey in self._Peaklistkeysorder_:
#
p = []
original = []
for i in range(3):
p.append(int(round(delta[i])) + self._FidNumberbyAxis(self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i])))
original.append(int(round(delta[i])) + self._FidNumberbyAxis(self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i])))
checklist = [[-1, 0, 0],[+1, 0, 0], # x
[ 0,-1, 0],[ 0,+1, 0], # y
[ 0, 0,-1],[ 0, 0,+1], # z
[-1,-1, 0],[+1,-1, 0], # xy
[-1,+1, 0],[+1,+1, 0], # xy
[-1, 0,-1],[+1, 0,-1], # xz
[-1, 0,+1],[+1, 0,+1], # xz
[ 0,-1,-1],[ 0,-1,-1], # yz
[ 0,+1,+1],[ 0,+1,+1]] # yz
Iteration = True
while Iteration:
tomb = []
for ch in checklist:
tomb.append(self.DataIntensity([p[0] + ch[0],p[1] + ch[1],p[2] + ch[2]]))
if self.DataIntensity(p) >= max(tomb):
Iteration = False
else:
ti = tomb.index(max(tomb))
for i in range(3):
p[i] = (p[i] + checklist[ti][i]) % self.myaxis[i]['Size']
if ResidueKey == 'T680_N-C-H':
print 'PPM:',self._PPMNumberbyAxis(p[2],2)
if Modify:
for i in range(3):
self._Peaklist_[ResidueKey][self._Peaklistchemicalshiftorder_[i]] = self._PPMNumberbyAxis(p[i],self._AxisOrder_.index(self._Peaklistchemicalshiftorder_[i]))
return p,original
###########################
def AdjustAllPeakPositions3D(self):
numberofpeaks = 0
diff = [0.0, 0.0, 0.0]
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
a,b = self.ClimbUpTheHill3D(key)
numberofpeaks += 1
for i in range(3):
diff[i] += (a[i]-b[i])
for i in range(3):
diff[i] /= float(numberofpeaks)
print diff
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
a,b = self.ClimbUpTheHill3D(key, Modify=True, delta= diff)
return None
###########################
def adjust_peaklist_2d(self):
numberofpeaks = 0
diff = [0.0, 0.0, 0.0]
peaks = {}
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
position = [self._FidNumberbyAxis(self._Peaklist_[key]['N'],'N'),
self._FidNumberbyAxis(self._Peaklist_[key]['H'],'H')]
peaks[key] = {}
peaks[key]['original'] = []
peaks[key]['firsthit'] = []
peaks[key]['secondhit'] = []
#
for pos in position:
peaks[key]['original'].append(pos)
#
peaks[key]['firsthit'] = self.extremes_finder(position, 2)
numberofpeaks += 1
for i in range(len(position)):
diff[i] += (peaks[key]['firsthit'][i] - peaks[key]['original'][i])
for i in range(len(diff)):
diff[i] /= numberofpeaks
diff[i] = round(diff[i])
#
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
position = []
for i,pos in enumerate(peaks[key]['original']):
position.append(int(pos + diff[i]))
peaks[key]['secondhit'] = self.extremes_finder(position, 2)
#
for i in range(len(self._Peaklistkeysorder_)):
key = self._Peaklistkeysorder_[i]
if not (key in self._PeaklistDoNotCare_):
multiple = []
j = 0
while j < len(self._Peaklistkeysorder_):
key2 = self._Peaklistkeysorder_[j]
if (peaks[key]['secondhit'] == peaks[key2]['secondhit']) and (i != j):
multiple.append(j)
j += 1
if not multiple:
# Unique peak found
peaks[key]['final'] = peaks[key]['secondhit']
peaks[key]['fit'] = 'black'
else:
# Move the peak which is the closest
closest = True
for j in multiple:
key2 = self._Peaklistkeysorder_[j]
if (self.distance(peaks[key]['original'], peaks[key]['secondhit']) >=
self.distance(peaks[key2]['original'], peaks[key2]['secondhit'])):
closest = False
# if this peak is the most likely
if closest:
peaks[key]['final'] = peaks[key]['secondhit']
peaks[key]['fit'] = 'black'
else:
# If other peaks are closer, than just move to the average
peaks[key]['final'] = []
for (i, o) in enumerate(peaks[key]['original']):
peaks[key]['final'].append(int(o + diff[i]))
peaks[key]['fit'] = 'red'
# print key, peaks[key]['original'], peaks[key]['firsthit'], peaks[key]['secondhit'],multiple, peaks[key]['final']
for key in self._Peaklistkeysorder_:
if not (key in self._PeaklistDoNotCare_):
self._Peaklist_[key]['N'] = self._PPMNumberbyAxis(peaks[key]['final'][0],'N')
self._Peaklist_[key]['H'] = self._PPMNumberbyAxis(peaks[key]['final'][1],'H')
self._Peaklist_[key]['Adjusted'] = peaks[key]['fit']
self._Peaklist_[key]['Intensity'] = self.DataIntensity(peaks[key]['final'])
# TODO Fit the tip?
return None
###########################
def find_peak_1d(self, data, noiselevel):
hits = []
direction = True
for i in range(len(data)-1):
if data[i] > data[i+1] and data[i] > noiselevel and direction:
hits.append(i)
direction = False
if data[i] < data[i+1]:
direction = True
return hits
###########################
def find_peak_2d(self, data2d, noiselevel):
hits = {}
for i, data1d in enumerate(data2d):
hit1d = self.find_peak_1d(data1d, noiselevel)
for hit in hit1d:
hits[' '.join(str(d) for d in self.extremes_finder([i, hit], 2))] = 0
peaks = []
for hit in hits:
peaks.append(hit.split())
return peaks
###########################
def peak_finder(self, times_noiselevel):
print 'Finding peaks...',
peaklist = {}
for i,peak in enumerate(self.find_peak_2d(self._Spectrum_,self.Noiselevel*times_noiselevel)):
peaklist[i] = {}
peaklist[i]['Info'] = str(i+1)
peaklist[i]['N'] = self._PPMNumberbyAxis(float(peak[0]),'N')
peaklist[i]['H'] = self._PPMNumberbyAxis(float(peak[1]),'H')
peaklist[i]['Adjusted'] = 'black'
print str(i + 1) + ' peaks found!'
return peaklist
###########################
def Plot1D(self, chemicalshift):
dim = self._ppm2fid(chemicalshift,self.myaxis[0]['Spectrometer frequency'],self.myaxis[0]['xmtr frequency'],self.myaxis[0]['Spectral width'],self.myaxis[0]['Number of Points'])
data = self._Spectrum_[dim]
plt.figure()
plt.plot(data)
plt.show()
return None
###########################
def Plot1Dfid(self, fid):
data = self._Spectrum_[fid]
plt.figure()
plt.plot(data)
plt.show()
return None
###########################
def PPM_to_index(self,ppm,axisnumber):
index = 0
while (index < self.myaxis[axisnumber]['Number of Points']) and (self.myaxis[axisnumber]['Scale'][index] > ppm):
index += 1
return index
###########################
def Limits_to_index(self, limits, axisnumber):
if not limits:
index_min = 0
index_max = self.myaxis[axisnumber]['Number of Points']-1
else:
index_min = self.PPM_to_index(max(limits), axisnumber)
index_max = self.PPM_to_index(min(limits), axisnumber)
if index_max > self.myaxis[axisnumber]['Actual size']:
index_max = self.myaxis[axisnumber]['Actual size']
return index_min, index_max
###########################
def spectrum_2d_slice(self, x_axis_min_index, x_axis_max_index,y_axis_min_index, y_axis_max_index, orderXY):
highestvalue = 0.0
lowestvalue = 0.0
spectrum = []
#---------------------------------
# 2D
if self.NumberOfAxis == 2:
for y in range(y_axis_min_index, y_axis_max_index, 1):
fid = []
for x in range(x_axis_min_index, x_axis_max_index, 1):
if orderXY[0] == 'H':
value = self._Spectrum_[y][x]
else:
value = self._Spectrum_[x][y]
fid.append(value)
if highestvalue < value:
highestvalue = value
if lowestvalue > value:
lowestvalue = value
spectrum.append(fid)
return highestvalue, lowestvalue, spectrum
###########################
def Plot_peaklist(self, Peaklist, x_min, x_max, y_min, y_max, orderXY):
print 'Peaks on the plot:'
number = 0
for k in Peaklist:
loc_x = Peaklist[k][orderXY[-2]]
loc_y = Peaklist[k][orderXY[-1]]
if ((x_min < loc_x) and (loc_x < x_max) and
(y_min < loc_y) and (loc_y < y_max)):
# TODO make is adjustable
peak_info_pos_x = 0.0
peak_info_pos_y = 0.0
# plt.text(loc_x + peak_info_pos_x, loc_y + peak_info_pos_y, Peaklist[k]['Info'])
number += 1
print '{0:3d}.'.format(number),Peaklist[k]['Info'], loc_y, loc_x,
if Peaklist[k]['Adjusted'] == 'black':
print 'ok'
else:
print ''
# TODO Make the dx,dy to be adjustable
dx = 0.05
dy = 0.2
plt.gca().annotate(Peaklist[k]['Info'],
xy=(loc_x,loc_y),
color = Peaklist[k]['Adjusted'],
xytext=(loc_x,loc_y - dy),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
facecolor = Peaklist[k]['Adjusted']))
#
# plt.plot([loc_x , loc_x + dx],[loc_y , loc_y + dy], 'k-')
# plt.plot([loc_x , loc_x + dx],[loc_y , loc_y - dy], 'k-')
return None
###########################
def Plot(self, limits, orderXY='HN', color = [0, 0, 0], nf = True, peaklist = None):
#
axis_x = self._nucleustype2axisindex(orderXY[0])
axis_y = self._nucleustype2axisindex(orderXY[1])
# Figure out the limits
x_axis_min_index, x_axis_max_index = self.Limits_to_index(limits[0],axis_x)
y_axis_min_index, y_axis_max_index = self.Limits_to_index(limits[1],axis_y)
x_scale = self.myaxis[axis_x]['Scale'][x_axis_min_index : x_axis_max_index]
y_scale = self.myaxis[axis_y]['Scale'][y_axis_min_index : y_axis_max_index]
# 2D
if self.NumberOfAxis == 2:
highestvalue, lowestvalue, spectrum = self.spectrum_2d_slice(x_axis_min_index, x_axis_max_index, y_axis_min_index, y_axis_max_index, orderXY)
#---------------------------------
mc = zcolor.MyColor()
contour_start = self.Noiselevel
contour_number = 25
contour_factor = math.exp(math.log((highestvalue) /float(contour_start)) * 1.0/(float(contour_number)))
contourlevels = [contour_start*contour_factor**i for i in range(contour_number)]
contourcolors = [mc.series(i,contour_number,0,300) for i in range(contour_number)]
print '#############################################'
print '### P L O T # P A R A M E T E R S ###'
print '#############################################'
print 'Noise level =', contour_start
print 'Factor =', contour_factor
print 'Highest value =', highestvalue
print 'Lowest value =', lowestvalue
print '#############################################'
if nf:
plt.figure()
plt.contour(x_scale, y_scale, spectrum, contourlevels, colors = contourcolors)
# plot negatives if needed!
plt.contour(x_scale, y_scale, spectrum, [-1*i for i in contourlevels], colors = [[0.0,0.0,0.0] for i in range(contour_number)])
if nf:
plt.xlabel(self.myaxis[axis_x]['Nucleus']+' (ppm)',size=15)
plt.ylabel(self.myaxis[axis_y]['Nucleus']+' (ppm)',size=15)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# If peak labels are needed
if self._Peaklist_ or peaklist:
if not peaklist:
self.Plot_peaklist(self._Peaklist_, x_scale[-1], x_scale[0], y_scale[-1], y_scale[0], orderXY)
else:
self.Plot_peaklist(peaklist, x_scale[-1], x_scale[0], y_scale[-1], y_scale[0], orderXY)
# plt.show()
return None
###########################
def Plot_ori(self, limits, orderXY='HN', color = [0, 0, 0], Negatives = False, Peaklist=True, negcolors = 'o', ContourNumber = 15, Factor = 0.0, Noiselevel = 0, linewidth = 1.0, newfigure = True, figuresize=(8,5), figdpi=72, textsize=15):
"""
Parameters:
* limits = an array of arrays with the PPM value limits, empty array means the whole spectral width
* color = one color value in [r,g,b] format eg. [1.0,0.0,0.0]
= array of color values (number must be the same as ContourNumber) eg. [[0.1,0.0,0.0],[0.2,0.0,0.0],...]
= built-in color eg. 'blue-cyan'
= built-in color + lighting info eg. ['g',0.5]
* ContourNumber = Number of contours on the figure
* Factor = factor between each contour level, provide 0.0 to calculate the value
* Noiselevel = If 0 is provided noise level is calculated from the sepctrum
* linewidth = contour line width, increase it when the zoom is high eg. 1.5
* newfigure = Boolean depending on the overlay plot option
* figuresize = figuresize in inch
* figdpi = dpi value, use 72 for screen, 300 for prints
* textsize = label size in pt eg. 12
Examples:
* Plot2D([[],[]],color = 'rainbow1')
* Plot2D([[110,125],[7.2,9.5]],color = ['green',0.5], ContourNumber = 20, Factor = 1.2, Noiselevel = 100000, linewidth = 1.5, NumberOfThicksXY=[3,8], newfigure=False, figuresize=(5,5), figdpi=300, textsize=18)
"""
ShowPeakLabelWithinPPM = [0.15,0.15,0.05] #NCH
ShiftLabel = [0.0,0.0,0.0]
#ShiftLabel = [0.05,0.05,0.02]
CrossSize = [0.05,0.05,0.01]
Nucleuses = ['N','C','H']
#---------------------------------
axisorder = []
for ch in orderXY.upper():
o = 0
while (o < self.NumberOfAxis) and self.myaxis[o]['Nucleus'][-1] != ch:
o += 1
if o < self.NumberOfAxis:
axisorder.append(o)
else:
print 'Please check the axes: ',orderXY
exit()
#---------------------------------
# Check the limits to be within the spectrum range
originallimits = limits
lim = []
for i in range(2):
lim.append(self._AxisLimitCheck(axisorder[i],limits[i]))
limits = lim
if len(originallimits) == 3:
limits.append(originallimits[2])
#---------------------------------
areamin = []
areamax = []
for i in range(2):
areamax.append(self._ppm2fid(min(limits[i]),self.myaxis[axisorder[i]]['Spectrometer frequency'],self.myaxis[axisorder[i]]['xmtr frequency'],self.myaxis[axisorder[i]]['Spectral width'],self.myaxis[axisorder[i]]['Number of Points']))
areamin.append(self._ppm2fid(max(limits[i]),self.myaxis[axisorder[i]]['Spectrometer frequency'],self.myaxis[axisorder[i]]['xmtr frequency'],self.myaxis[axisorder[i]]['Spectral width'],self.myaxis[axisorder[i]]['Number of Points']))
#exit()
# Get axis chemical shifts
xscale = []
for i in range(areamin[0],areamax[0]+1,1):
xscale.append(self.myaxis[axisorder[0]]['Scale'][len(self.myaxis[axisorder[0]]['Scale'])-i-1])
# print xscale[0],xscale[-1]
# exit()
yscale = []
for i in range(areamin[1],areamax[1]+1,1):
yscale.append(self.myaxis[axisorder[1]]['Scale'][len(self.myaxis[axisorder[1]]['Scale'])-i-1])
print 'limits = ',areamin[0],areamax[0]
#---------------------------------
# Get the spectral information to plot
highestvalue = 0.0
area = []
#---------------------------------
# 2D
if self.NumberOfAxis == 2:
# Proton is on x
if orderXY[0] == 'H':
#
for y in range(areamin[1],areamax[1]+1,1):
area.append(self._Spectrum_[y][areamin[0]:areamax[0]+1])
#
if max(self._Spectrum_[y][areamin[0]:areamax[0]+1]) > highestvalue:
highestvalue = max(self._Spectrum_[y][areamin[0]:areamax[0]+1])
# Proton is on y
else:
for y in range(areamin[1],areamax[1]+1,1):
data = []
for x in range(areamin[0],areamax[0]+1,1):
value = self._Spectrum_[x][y]
data.append(value)
if value > highestvalue:
highestvalue = value
area.append(data)
#---------------------------------
# 3D
if self.NumberOfAxis == 3:
# Calculate the third dimension fid number
zfid = self._ppm2fid(limits[2][0],self.myaxis[axisorder[2]]['Spectrometer frequency'],self.myaxis[axisorder[2]]['xmtr frequency'],self.myaxis[axisorder[2]]['Spectral width'],self.myaxis[axisorder[2]]['Number of Points'])
# Extract the 2D from the 3D
for y in range(areamin[1],areamax[1]+1,1):
data = []
for x in range(areamin[0],areamax[0]+1,1):
if orderXY[0:2] == 'HN':
value = self._Spectrum_[y][zfid][x]
elif orderXY[0:2] == 'HC':
value = self._Spectrum_[zfid][y][x]
elif orderXY[0:2] == 'NH':
value = self._Spectrum_[x][zfid][y]
elif orderXY[0:2] == 'NC':
value = self._Spectrum_[x][y][zfid]
elif orderXY[0:2] == 'CH':
value = self._Spectrum_[zfid][x][y]
elif orderXY[0:2] == 'CN':
value = self._Spectrum_[y][x][zfid]
else:
value = 0.0
# Store the value
data.append(value)
# Check whether it is the highest
if value > highestvalue:
highestvalue = value
area.append(data)
#---------------------------------
# If the user did not set up a noise level, use the calculated one
if Noiselevel == 0:
contour_start = self.Noiselevel
else:
contour_start = Noiselevel
contour_number = ContourNumber
#---------------------------------
# If the user do not provide factor information
if Factor == 0.0:
# Calculcate based on the noise level and the highest peak height
try:
contour_factor = math.exp(math.log((highestvalue) /float(contour_start)) * 1.0/(float(contour_number)))
except ValueError:
contour_factor = 0.0
# if the user provided the factor information
else:
contour_factor = Factor
#---------------------------------
# Set the contour levels
contourlevels = [contour_start*contour_factor**i for i in range(contour_number)]
#---------------------------------
# If the user provided a color
contourcolors = self._ColorChoise(color,contour_number)
if Negatives:
# Colors
negcontourcolors = self._ColorChoise(negcolors,contour_number)
# Levels
negcontourlevels = []
for level in contourlevels:
negcontourlevels.append(-1.0*level)
#---------------------------------
print '---------------'
print self.myaxis[axisorder[0]]['Nucleus']+':',min(limits[0]),'-',max(limits[0])
print self.myaxis[axisorder[1]]['Nucleus']+':',min(limits[1]),'-',max(limits[1])
if self.NumberOfAxis == 3:
print self.myaxis[axisorder[2]]['Nucleus']+':',limits[2][0]
print 'Noise level =', contour_start
print 'Factor =', contour_factor
print 'Highest value =', highestvalue
print '---------------'
#---------------------------------
# To be able to plot several figure on each other, the new figure is an option
if newfigure:
plt.figure(figsize=figuresize,dpi=figdpi)
#---------------------------------
# Generate the plot
plt.contour(xscale,yscale,area,contourlevels,colors = contourcolors,linewidths = linewidth)
if Negatives:
plt.contour(xscale,yscale,area,negcontourlevels,colors = negcontourcolors,linewidths = linewidth)
#---------------------------------
# Invert the axes direction
if newfigure:
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
#---------------------------------
# Put on axis label
plt.xlabel(self.myaxis[axisorder[0]]['Nucleus']+' (ppm)',size=textsize)
plt.ylabel(self.myaxis[axisorder[1]]['Nucleus']+' (ppm)',size=textsize)
if self.NumberOfAxis == 3:
plt.title(self.myaxis[axisorder[2]]['Nucleus']+': {0:6.3f} ppm'.format(limits[2][0]),size=textsize)
#---------------------------------
# If peak labels are needed
if Peaklist and (self._Peaklist_ != {}):
print 'Peaks on the plot:'
for k in self._Peaklistkeysorder_:
ItIsOn = True
p = []
for i in range(self.NumberOfAxis):
p.append(self._Peaklist_[k][self.myaxis[axisorder[i]]['Nucleus'][-1]])
i = 0
while (i < 2 ) and ItIsOn:
if (areamin[i] > p[i]) or (p[i] > areamax[i]):
ItIsOn = False
i += 1
if self.NumberOfAxis == 3:
if abs(p[2] - limits[2][0]) > ShowPeakLabelWithinPPM[axisorder[2]]:
ItIsOn = False
if ItIsOn:
print self._Peaklist_[k]['Info'],p[0],p[1],self._Peaklist_[k][Nucleuses[axisorder[2]]]
plt.text(p[0]-ShiftLabel[axisorder[0]],p[1]-ShiftLabel[axisorder[1]],self._Peaklist_[k]['Info'],size=textsize)
# Put on the crosspeak
dx = CrossSize[axisorder[0]]
dy = CrossSize[axisorder[1]]
#
plt.plot([p[0]-dx,p[0]+dx],[p[1]-dy,p[1]+dy],'k-')
plt.plot([p[0]-dx,p[0]+dx],[p[1]+dy,p[1]-dy],'k-')
#
return None
###########################
def Show(self,FileName = ''):
if FileName == '':
plt.show()
else:
plt.savefig(FileName)
return None
###########################
def _AxisTicks(self,limits,number,PPMscale = True):
# Calculate the step size
step = abs(limits[0]-limits[1])/float(number-1)
# Store the scales in data
data = []
for i in range(number):
# if it is a ppm scale, then the values go down
if PPMscale:
value = max(limits)-i*step
# if it is point scale then it goes up
else:
value = i*step
#---------------------------------
# if the value is extreme, then let 3 digits
if int(value*1000) != value*1000:
value = '{0:6.3f}'.format(value)
data.append(value)
return data
###########################
def _AxisLimitCheck(self,Axisnumber,limits):
# If there is no data provided, use the full spectrum
if limits == []:
limits = [-9.99E-99,+9.99E+99]
# Store the data
newlimits = []
# Spectrum information
ppmlimit = self.PPM_limit[Axisnumber]
# Lower limit
if min(ppmlimit) > min(limits):
newlimits.append(self.myaxis[Axisnumber]['Scale'][1])
else:
newlimits.append(min(limits))
# Upper limit
if max(ppmlimit) < max(limits):
newlimits.append(max(ppmlimit))
else:
newlimits.append(max(limits))
return newlimits
###########################
def _ppm2fid(self, ppm, Frequency, MiddlePPM, SpectralWidth, NumberOfPoints):
return int((NumberOfPoints/2 - ((ppm-MiddlePPM) * Frequency * NumberOfPoints) / SpectralWidth) % NumberOfPoints)
###########################
def _fid2ppm(self, fid, Frequency, MiddlePPM, SpectralWidth, NumberOfPoints):
return MiddlePPM + (NumberOfPoints*SpectralWidth - 2*fid*SpectralWidth) / (2.0*Frequency*NumberOfPoints)
###########################
def _nucleustype2axisindex(self, nucleus):
axis = 0
while (axis < self.NumberOfAxis) and (self.myaxis[axis]['Nucleus'][-1] != nucleus):
axis += 1
return axis
###########################
def _axisindex2nucleustype(self, axisindex):
return self.myaxis[axisindex]['Nucleus'][-1]
###########################
def _FidNumberbyAxis(self, ppm, Axis):
if type(Axis) == type(''):
Axis = self._nucleustype2axisindex(Axis)
return self._ppm2fid(ppm,
self.myaxis[Axis]['Spectrometer frequency'],
self.myaxis[Axis]['xmtr frequency'],
self.myaxis[Axis]['Spectral width'],
self.myaxis[Axis]['Number of Points'])
###########################
def _PPMNumberbyAxis(self, fid, Axis):
if type(Axis) == type(''):
Axis = self._nucleustype2axisindex(Axis)
return self._fid2ppm(fid,
self.myaxis[Axis]['Spectrometer frequency'],
self.myaxis[Axis]['xmtr frequency'],
self.myaxis[Axis]['Spectral width'],
self.myaxis[Axis]['Number of Points'])
###########################
def _ceil(self, number):
if number - int(number) != 0:
number = int(number) + 1
return int(number)
###########################
def CalculateNoiseLevel(self,NumberOfDataPoints = 10000):
Noise = 0.0
# calculate the average level on a small subset of data
average = 0.0
for i in range(100):
# 2D
if self.NumberOfAxis == 2:
average += abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-150)])
# 3D
if self.NumberOfAxis == 3:
average += abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-1)][random.randint(0,self.myaxis[2]['Number of Points']-1)])
average /= 100.0
# Calculate the actual noise level
numberofdata = 0
sumofdata = 0.0
highestvalue = 0.0
i = 0
while (i <= NumberOfDataPoints*2) and (numberofdata <= NumberOfDataPoints):
# 2D
if self.NumberOfAxis == 2:
value = abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-150)])
# 3D
if self.NumberOfAxis == 3:
value = abs(self._Spectrum_[random.randint(0,self.myaxis[0]['Number of Points']-1)][random.randint(0,self.myaxis[1]['Number of Points']-1)][random.randint(0,self.myaxis[2]['Number of Points']-1)])
# Only count a value if that is not far from the average (= not a peak)
if value < average * 5:
numberofdata += 1
sumofdata += value
average = sumofdata / float(numberofdata)
if value > highestvalue:
highestvalue = value
i += 1
# Cut back from the highest to have a bit of noise
Noise = highestvalue / 1.2
# Assign the self.Noise to this value
self.Noiselevel = Noise
# Return the value as well
return Noise
###########################
def _ColorChoise(self,color,contour_number):
if (type(color) == type([])) and (len(color) == 3):
contourcolors = [color for _ in range(contour_number)]
# if the user provided all the colors
elif (type(color) == type([])) and (len(color) == contour_number):
contourcolors = color
# if the color is selected and light information is provided as well
elif (type(color) == type([])) and (len(color) == 2):
light = color[1]
if (0.0 < light) or (light < 1.0):
light = 1.0
contourcolors = self.ColorSchemer(contour_number,color[0],light)
# if there is no color information or built in colors has been selected
else:
contourcolors = self.ColorSchemer(contour_number,color)
return contourcolors
###########################
def ColorSchemer(self, Number, color, light = 1.0):
data = []
step = 1 / float(Number-1)
for i in range(Number):
element = [0.0,0.0,0.0]
if (color == 'r') or (color == 'red'):
element = [1.0,0.0,0.0]
if (color == 'g') or (color == 'green'):
element = [0.0,1.0,0.0]
if (color == 'b') or (color == 'blue'):
element = [0.0,0.0,1.0]
#---------------------------------
if (color == 'c') or (color == 'cyan'):
element = [0.0,1.0,1.0]
if (color == 'y') or (color == 'yellow'):
element = [1.0,1.0,0.0]
if (color == 'p') or (color == 'purple'):
element = [1.0,0.0,1.0]
#---------------------------------
if (color == 'm') or (color == 'magenta'):
element = [1.0,0.0,0.5]
if (color == 'pi') or (color == 'pink'):
element = [1.0,0.5,0.5]
if (color == 'o') or (color == 'orange'):
element = [1.0,0.5,0.0]
#---------------------------------
if (color == 'g1') or (color == 'grey1'):
element = [0.1 for _ in range(3)]
if (color == 'g2') or (color == 'grey2'):
element = [0.2 for _ in range(3)]
if (color == 'g3') or (color == 'grey3'):
element = [0.3 for _ in range(3)]
if (color == 'g4') or (color == 'grey4'):
element = [0.4 for _ in range(3)]
if (color == 'g5') or (color == 'grey5'):
element = [0.5 for _ in range(3)]
if (color == 'g6') or (color == 'grey6'):
element = [0.6 for _ in range(3)]
if (color == 'g7') or (color == 'grey7'):
element = [0.7 for _ in range(3)]
if (color == 'g8') or (color == 'grey8'):
element = [0.8 for _ in range(3)]
if (color == 'g9') or (color == 'grey9'):
element = [0.9 for _ in range(3)]
#---------------------------------
if (color == 'w') or (color == 'white'):
element = [1.0, 1.0, 1.0]
#---------------------------------
if (color == 'kr') or (color == 'black-red'):
element = [0.0 + i * step, 0.0, 0.0]
if (color == 'kg') or (color == 'black-green'):
element = [0.0, 0.0 + i * step, 0.0]
if (color == 'kb') or (color == 'black-blue'):
element = [0.0, 0.0, 0.0 + i * step]
#---------------------------------
if (color == 'kc') or (color == 'black-cyan'):
element = [0.0, 0.0 + i * step, 0.0 + i * step]
if (color == 'ky') or (color == 'black-yellow'):
element = [0.0 + i * step, 0.0 + i * step, 0.0]
if (color == 'kp') or (color == 'black-purple'):
element = [0.0 + i * step, 0.0, 0.0 + i * step]
#---------------------------------
if (color == 'km') or (color == 'black-magenta'):
element = [0.0 + i * step, 0.0, 0.0 + (i / 2.0) * step]
if (color == 'kpi') or (color == 'black-pink'):
element = [0.0 + i * step, 0.0 + (i / 2.0) * step, 0.0 + (i / 2.0) * step]
if (color == 'ko') or (color == 'black-orange'):
element = [0.0 + i * step, 0.0 +(i / 2.0) * step, 0.0]
#---------------------------------
if (color == 'kw') or (color == 'black-white'):
element = [0.0 + i * step, 0.0 + i * step, 0.0 + i * step]
#---------------------------------
if (color == 'rr') or (color == 'red-ring'):
if i % 5 != 0:
element = [1.0, 0.0, 0.0]
else:
element = [0.0, 0.0, 0.0]
if (color == 'gr') or (color == 'green-ring'):
if i % 5 != 0:
element = [0.0, 1.0, 0.0]
else:
element = [0.0, 0.0, 0.0]
if (color == 'br') or (color == 'blue-ring'):
if i % 5 != 0:
element = [0.0, 0.0, 1.0]
else:
element = [0.0, 0.0, 0.0]
#---------------------------------
if (color == 'red-yellow') or (color == 'rainbow1'):
element = [1.0, 0.0 + i * step, 0.0]
#---------------------------------
if (color == 'blue-cyan') or (color == 'rainbow2'):
element = [0.0, 0.0 + i * step, 1.0]
#---------------------------------
if (color == 'green-red') or (color == 'rainbow3'):
element = [0.0 + i * step, 0.5 - (i / 2.0) * step, 0.0]
#---------------------------------
if type(light) != type(1.0):
light = 1.0
element = [element[c] * light for c in range(3)]
#---------------------------------
data.append(element)
return data
###########################
def _getNumberOfAxis(self):
return len(self.myaxis.keys())
###########################
def _getAxisInfo(self, field):
info = []
for axisnumber in range(self.NumberOfAxis):
info.append(self.myaxis[axisnumber][field])
return info
###########################
def _getNucleus(self):
return self._getAxisInfo('Nucleus')
###########################
def _getFrequency(self):
return self._getAxisInfo('Spectrometer frequency')
###########################
def _getSpectralwidth(self):
return self._getAxisInfo('Spectral width')
###########################
def _getxmtrfreqency(self):
return self._getAxisInfo('xmtr frequency')
###########################
def _getscales(self):
return self._getAxisInfo('Scale')
###########################
def _getnumberofpoints(self):
return self._getAxisInfo('Number of Points')
###########################
def _getlimit(self):
info = []
for axisnumber in range(self.NumberOfAxis):
info.append([self.myaxis[axisnumber]['Scale'][0],self.myaxis[axisnumber]['Scale'][-1]])
return info
###########################
NumberOfAxis = property(_getNumberOfAxis)
Nucleus = property(_getNucleus)
Frequency = property(_getFrequency)
SpectralWidth = property(_getSpectralwidth)
MiddlePPM = property(_getxmtrfreqency)
Scale = property(_getscales)
NumberOfPoints = property(_getnumberofpoints)
PPM_limit = property(_getlimit)
#########################################
myspectrum= ZB_spectrum('13030_tcs_e.fid_1.ucsf')
print myspectrum.noise_level
# 1D proton plot
myspectrum.plot1d({'H':[] },'H')
myspectrum.show()
# Find peaks and plot a region with peaks and labeles
peaks = myspecrum.peak_finder(1.5)
print peaks
myspectrum.plot([[6.8,10.2],[]],orderXY = 'HN', color = [],peaklist = peaks)
myspectrum.Show()
| gpl-2.0 | -5,602,564,242,661,520,000 | 42.691027 | 243 | 0.480101 | false | 4.034949 | false | false | false |
radicalbit/ambari | ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/service_advisor.py | 1 | 8471 | #!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import re
import socket
import fnmatch
import math
from resource_management.core.logger import Logger
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class LogSearchServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(LogSearchServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
# Always call these methods
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
pass
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
# Nothing to do
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors.
Must be overriden in child class.
"""
return []
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
putLogSearchProperty = self.putProperty(configurations, "logsearch-properties", services)
putLogSearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
putLogSearchCommonEnvProperty = self.putProperty(configurations, "logsearch-common-env", services)
putLogSearchCommonEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-common-env")
putLogSearchEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-env")
putLogFeederEnvAttribute = self.putPropertyAttribute(configurations, "logfeeder-env")
logSearchServerHosts = self.getComponentHostNames(services, "LOGSEARCH", "LOGSEARCH_SERVER")
# if there is no Log Search server on the cluster, i.e. there is an external server
if logSearchServerHosts is None or len(logSearchServerHosts) == 0:
# hide logsearch specific attributes
for key in services['configurations']['logsearch-env']['properties']:
putLogSearchEnvAttribute(key, 'visible', 'false')
for key in services['configurations']['logsearch-properties']['properties']:
putLogSearchAttribute(key, 'visible', 'false')
for key in services['configurations']['logsearch-audit_logs-solrconfig']['properties']:
self.putPropertyAttribute(configurations, "logsearch-audit_logs-solrconfig")(key, 'visible', 'false')
for key in services['configurations']['logsearch-service_logs-solrconfig']['properties']:
self.putPropertyAttribute(configurations, "logsearch-service_logs-solrconfig")(key, 'visible', 'false')
for key in services['configurations']['logsearch-log4j']['properties']:
self.putPropertyAttribute(configurations, "logsearch-log4j")(key, 'visible', 'false')
for key in services['configurations']['logsearch-admin-json']['properties']:
self.putPropertyAttribute(configurations, "logsearch-admin-json")(key, 'visible', 'false')
# if there is a Log Search server on the cluster
else:
infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
# if there is AMBARI_INFRA, calculate the min/max shards and recommendations based on the number of infra solr hosts
if infraSolrHosts is not None and len(infraSolrHosts) > 0 and "logsearch-properties" in services["configurations"]:
replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
recommendedMinShards = len(infraSolrHosts)
recommendedShards = 2 * len(infraSolrHosts)
recommendedMaxShards = 3 * len(infraSolrHosts)
# if there is no AMBARI_INFRA (i.e. external solr is used), use default values for min/max shards and recommendations
else:
recommendedReplicationFactor = 2
recommendedMinShards = 1
recommendedShards = 1
recommendedMaxShards = 100
putLogSearchCommonEnvProperty('logsearch_use_external_solr', 'true')
# recommend number of shard
putLogSearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
putLogSearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
putLogSearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
putLogSearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
putLogSearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
putLogSearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
# recommend replication factor
putLogSearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
putLogSearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
kerberos_authentication_enabled = self.isSecurityEnabled(services)
# if there is no kerberos enabled hide kerberor related properties
if not kerberos_authentication_enabled:
putLogSearchCommonEnvProperty('logsearch_external_solr_kerberos_enabled', 'false')
putLogSearchCommonEnvAttribute('logsearch_external_solr_kerberos_enabled', 'visible', 'false')
putLogSearchEnvAttribute('logsearch_external_solr_kerberos_keytab', 'visible', 'false')
putLogSearchEnvAttribute('logsearch_external_solr_kerberos_principal', 'visible', 'false')
putLogFeederEnvAttribute('logfeeder_external_solr_kerberos_keytab', 'visible', 'false')
putLogFeederEnvAttribute('logfeeder_external_solr_kerberos_principal', 'visible', 'false')
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
"""
Entry point.
Validate configurations for the service. Return a list of errors.
The code for this function should be the same for each Service Advisor.
"""
#Logger.info("Class: %s, Method: %s. Validating Configurations." %
# (self.__class__.__name__, inspect.stack()[0][3]))
return []
| apache-2.0 | 6,362,549,142,980,725,000 | 43.584211 | 123 | 0.733325 | false | 4.177022 | true | false | false |
jpalladino84/roguelike-game | tdl/__init__.py | 2 | 53198 | """
The documentation for python-tdl. A Pythonic port of
U{libtcod<http://doryen.eptalys.net/libtcod/>}.
You can find the project page on Google Code
U{here<http://code.google.com/p/python-tdl/>}.
Report any bugs or issues to the Google Code issue tracker
U{here<https://code.google.com/p/python-tdl/issues/list>}.
Getting Started
===============
Once the library is imported you can load the font you want to use with
L{tdl.setFont}.
This is optional and when skipped will use a decent default font.
After that you call L{tdl.init} to set the size of the window and get the
root console in return.
This console is the canvas to what will appear on the screen.
Indexing Consoles
=================
For most methods taking a position you can use Python-style negative
indexes to refer to the opposite side of a console with (-1, -1)
starting at the bottom right.
You can also check if a point is part of a console using containment
logic i.e. ((x, y) in console).
You may also iterate over a console using a for statement. This returns
every x,y coordinate available to draw on but it will be extremely slow
to actually operate on every coordinate individualy.
Try to minimize draws by using an offscreen L{Console}, only drawing
what needs to be updated, and using L{Console.blit}.
Drawing
=======
Once you have the root console from L{tdl.init} you can start drawing on
it using a method such as L{Console.drawChar}.
When using this method you can have the char parameter be an integer or a
single character string.
The fgcolor and bgcolor parameters expect a three item list
[red, green, blue] with integers in the 0-255 range with [0, 0, 0] being
black and [255, 255, 255] being white.
Or instead you can use None in the place of any of the three parameters
to tell the library to not overwrite colors.
After the drawing functions are called a call to L{tdl.flush} will update
the screen.
"""
import sys
import os
import ctypes
import weakref
import array
import itertools
import textwrap
import struct
import re
import warnings
from . import event, map, noise
from .__tcod import _lib, _Color, _unpackfile
_IS_PYTHON3 = (sys.version_info[0] == 3)
if _IS_PYTHON3: # some type lists to use with isinstance
_INTTYPES = (int,)
_NUMTYPES = (int, float)
_STRTYPES = (str, bytes)
else:
_INTTYPES = (int, long)
_NUMTYPES = (int, long, float)
_STRTYPES = (str,)
def _encodeString(string): # still used for filepaths, and that's about it
"changes string into bytes if running in python 3, for sending to ctypes"
if _IS_PYTHON3 and isinstance(string, str):
return string.encode()
return string
#def _formatString(string):
# pass
def _formatChar(char):
"""Prepares a single characters for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current characters
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return None
#if isinstance(char, _INTTYPES):
# return char
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
return int(char) # conversion faster than type check
#raise TypeError('Expected char parameter to be a single characters string, number, or None, got: %s' % repr(char))
_fontinitialized = False
_rootinitialized = False
_rootConsoleRef = None
# remove dots from common functions
_setchar = _lib.TCOD_console_set_char
_setfore = _lib.TCOD_console_set_char_foreground
_setback = _lib.TCOD_console_set_char_background
_setcharEX = _lib.TCOD_console_put_char_ex
def _verify_colors(*colors):
"""Used internally.
Raise an assertion error if the parameters can not be converted into colors.
"""
for color in colors:
assert _iscolor(color), 'a color must be a 3 items tuple, web format, or None, received %s' % repr(color)
return True
def _iscolor(color):
"""Used internally.
A debug function to see if an object can be used as a TCOD color struct.
None counts as a parameter to keep the current colors instead.
This function is often part of an inner-loop and can slow a program down.
It has been made to work with assert and can be skipped with the -O flag.
Still it's called often and must be optimized.
"""
if color is None:
return True
if isinstance(color, (tuple, list, _Color)):
return len(color) == 3
if isinstance(color, _INTTYPES):
return True
return False
## not using this for now
#class Color(object):
#
# def __init__(self, r, g, b):
# self._color = (r, g, b)
# self._ctype = None
#
# def _getCType(self):
# if not self._ctype:
# self._ctype = _Color(*self._color)
# return self._ctype
#
# def __len__(self):
# return 3
# Format the color to ctypes, will preserve None and False
_formatColor = _Color.new
def _getImageSize(filename):
"""Try to get the width and height of a bmp of png image file"""
file = open(filename, 'rb')
if file.read(8) == b'\x89PNG\r\n\x1a\n': # PNG
while 1:
length, = struct.unpack('>i', file.read(4))
chunkID = file.read(4)
if chunkID == '': # EOF
return None
if chunkID == b'IHDR':
# return width, height
return struct.unpack('>ii', file.read(8))
file.seek(4 + length, 1)
file.seek(0)
if file.read(8) == b'BM': # Bitmap
file.seek(18, 0) # skip to size data
# return width, height
return struct.unpack('<ii', file.read(8))
# return None on error, unknown file
class TDLError(Exception):
"""
The catch all for most TDL specific errors.
"""
class _MetaConsole(object):
"""
Contains methods shared by both the L{Console} and L{Window} characters.
"""
__slots__ = ('width', 'height', 'console', '_cursor', '_fgcolor',
'_bgcolor', '_bgblend', '_colorLock', '__weakref__', '__dict__')
def __init__(self):
self._cursor = (0, 0)
self._scrollMode = 'error'
self._fgcolor = _formatColor((255, 255, 255))
self._bgcolor = _formatColor((0, 0, 0))
self._bgblend = 1 # SET
self._colorLock = None # which object sets the ctype color options
def _normalizePoint(self, x, y):
"""Check if a point is in bounds and make minor adjustments.
Respects Pythons negative indexes. -1 starts at the bottom right.
Replaces the _drawable function
"""
#assert isinstance(x, _INTTYPES), 'x must be an integer, got %s' % repr(x)
#assert isinstance(y, _INTTYPES), 'y must be an integer, got %s' % repr(y)
# force int, always faster than type checking
x = int(x)
y = int(y)
assert (-self.width <= x < self.width) and (-self.height <= y < self.height), \
('(%i, %i) is an invalid postition on %s' % (x, y, self))
# handle negative indexes
if x < 0:
x += self.width
if y < 0:
y += self.height
return (x, y)
def _normalizeRect(self, x, y, width, height):
"""Check if the rectangle is in bounds and make minor adjustments.
raise AssertionError's for any problems
"""
x, y = self._normalizePoint(x, y) # inherit _normalizePoint logic
assert width is None or isinstance(width, _INTTYPES), 'width must be an integer or None, got %s' % repr(width)
assert height is None or isinstance(height, _INTTYPES), 'height must be an integer or None, got %s' % repr(height)
# if width or height are None then extend them to the edge
if width is None:
width = self.width - x
elif width < 0: # handle negative numbers
width += self.width
width = max(0, width) # a 'too big' negative is clamped zero
if height is None:
height = self.height - y
height = max(0, height)
elif height < 0:
height += self.height
# reduce rect size to bounds
width = min(width, self.width - x)
height = min(height, self.height - y)
return x, y, width, height
def _normalizeCursor(self, x, y):
"""return the normalized the cursor position."""
width, height = self.getSize()
assert width != 0 and height != 0, 'can not print on a console with a width or height of zero'
while x >= width:
x -= width
y += 1
while y >= height:
if self._scrollMode == 'scroll':
y -= 1
self.scroll(0, -1)
elif self._scrollMode == 'error':
# reset the cursor on error
self._cursor = (0, 0)
raise TDLError('Cursor has reached the end of the console')
return (x, y)
def _lockColors(self, forceUpdate=False):
"""Make sure the color options on the root console match ths instance"""
if self.console._lockColors is not self or forceUpdate:
self.console._lockColors = self
_lib.TCOD_console_set_default_background(self.console, self.bgcolor)
_lib.TCOD_console_set_default_foreground(self.console, self.fgcolor)
#
def setMode(self, mode):
"""Configure how this console will react to the cursor writing past the
end if the console.
This is for methods that use the virtual cursor, such as L{printStr}.
@type mode: string
@param mode: Possible settings are:
- 'error' - A TDLError will be raised once the cursor
reaches the end of the console. Everything up until
the error will still be drawn.
This is the default setting.
- 'scroll' - The console will scroll up as stuff is
written to the end.
You can restrict the region with L{tdl.Window} when
doing this.
"""
MODES = ['error', 'scroll']
if mode.lower() not in MODES:
raise TDLError('mode must be one of %s, got %s' % (MODES, repr(mode)))
self._scrollMode = mode.lower()
def setColors(self, fg=None, bg=None):
"""Sets the colors to be used with the L{printStr} function.
Values of None will only leave the current values unchanged.
"""
if self.console._lockColors is self:
self.console._lockColors = None
if fg is not None:
self._fgcolor = _formatColor(fg)
if bg is not None:
self._bgcolor = _formatColor(bg)
def printStr(self, string):
"""Print a string at the virtual cursor.
Handles special characters such as '\\n' and '\\r'.
Printing past the bottom of the console will scroll everying upwards.
Colors can be set with L{setColors} and the virtual cursor can be moved
with L{move}.
@type string: string
@param string:
"""
x, y = self._cursor
for char in string:
if char == '\n': # line break
x = 0
y += 1
continue
if char == '\r': # return
x = 0
continue
x, y = self._normalizeCursor(x, y)
self.drawChar(x, y, char, self._fgcolor, self._bgcolor)
x += 1
self._cursor = (x, y)
def write(self, string):
"""This method mimics basic file-like behaviour.
Because of this method you can replace sys.stdout or sys.stderr with
a L{Typewriter} instance.
This is a convoluted process and behaviour seen now can be excepted to
change on later versions.
@type string: string
"""
# some 'basic' line buffer stuff.
# there must be an easier way to do this. The textwrap module didn't
# help much.
x, y = self._normalizeCursor(*self._cursor)
width, height = self.getSize()
wrapper = textwrap.TextWrapper(initial_indent=(' '*x), width=width)
writeLines = []
for line in string.split('\n'):
if line:
writeLines += wrapper.wrap(line)
wrapper.initial_indent = ''
else:
writeLines.append([])
for line in writeLines:
x, y = self._normalizeCursor(x, y)
self.drawStr(x, y, line[x:], self._fgcolor, self._bgcolor)
y += 1
x = 0
y -= 1
self._cursor = (x, y)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a single characters.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type char: int, string, or None
@param char: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want to change
the colors of the tile.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
assert _verify_colors(fgcolor, bgcolor)
x, y = self._normalizePoint(x, y)
x, y = ctypes.c_int(x), ctypes.c_int(y)
self._setChar(x, y, _formatChar(char),
_formatColor(fgcolor), _formatColor(bgcolor))
def drawStr(self, x, y, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a string starting at x and y. Optinally colored.
A string that goes past the right side will wrap around. A string
wraping to below the console will raise a L{TDLError} but will still be
written out. This means you can safely ignore the errors with a
try... except block if you're fine with partily written strings.
\\r and \\n are drawn on the console as normal characters tiles. No
special encoding is done and any string will translate to the characters
table as is.
For a string drawing operation that respects special characters see the
L{Typewriter} class.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type string: string or iterable
@param string: Can be a string or an iterable of numbers.
Special characters are ignored and rendered as any other
characters.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y = self._normalizePoint(x, y)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
width, height = self.getSize()
batch = [] # prepare a batch operation
def _drawStrGen(x=x, y=y, string=string, width=width, height=height):
"""Generator for drawStr
Iterates over ((x, y), ch) data for _setCharBatch, raising an
error if the end of the console is reached.
"""
for char in string:
if y == height:
raise TDLError('End of console reached.')
#batch.append(((x, y), _formatChar(char))) # ((x, y), ch)
yield((x, y), _formatChar(char))
x += 1 # advance cursor
if x == width: # line break
x = 0
y += 1
self._setCharBatch(_drawStrGen(), fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a rectangle starting from x and y and extending to width and height.
If width or height are None then it will extend to the edge of the console.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
# use itertools to make an x,y grid
# using ctypes here reduces type converstions later
grid = itertools.product((ctypes.c_int(x) for x in range(x, x + width)),
(ctypes.c_int(y) for y in range(y, y + height)))
# zip the single characters in a batch variable
batch = zip(grid, itertools.repeat(char, width * height))
self._setCharBatch(batch, fgcolor, bgcolor, nullChar=(char is None))
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Similar to L{drawRect} but only draws the outline of the rectangle.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
if width == 1 or height == 1: # it's just a single width line here
return self.drawRect(x, y, width, height, char, fgcolor, bgcolor)
# draw sides of frame with drawRect
self.drawRect(x, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y, width, 1, char, fgcolor, bgcolor)
self.drawRect(x + width - 1, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y + height - 1, width, 1, char, fgcolor, bgcolor)
def blit(self, source, x=0, y=0, width=None, height=None, srcX=0, srcY=0):
"""Blit another console or Window onto the current console.
By default it blits the entire source to the topleft corner.
@type source: L{Console} or L{Window}
@param source: Source window can be a L{Console} or L{Window} instance.
It can even blit to itself without any problems.
@type x: int
@param x: X coordinate to blit to.
@type y: int
@param y: Y coordinate to blit to.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend as far as possible to the
bottom right corner of the blit areas or can be a negative
number to be sized reltive to the total size of the
B{destination} console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type srcX: int
@param srcX: The source consoles x coordinate to blit from.
@type srcY: int
@param srcY: The source consoles y coordinate to blit from.
"""
# hardcode alpha settings for now
fgalpha=1.0
bgalpha=1.0
assert isinstance(source, (Console, Window)), "source muse be a Window or Console instance"
# handle negative indexes and rects
# negative width and height will be set realtive to the destination
# and will also be clamped to the smallest Console
x, y, width, height = self._normalizeRect(x, y, width, height)
srcX, srcY, width, height = source._normalizeRect(srcX, srcY, width, height)
# translate source and self if any of them are Window instances
srcX, srcY = source._translate(srcX, srcY)
source = source.console
x, y = self._translate(x, y)
self = self.console
if self == source:
# if we are the same console then we need a third console to hold
# onto the data, otherwise it tries to copy into itself and
# starts destroying everything
tmp = Console(width, height)
_lib.TCOD_console_blit(source, srcX, srcY, width, height, tmp, 0, 0, fgalpha, bgalpha)
_lib.TCOD_console_blit(tmp, 0, 0, width, height, self, x, y, fgalpha, bgalpha)
else:
_lib.TCOD_console_blit(source, srcX, srcY, width, height, self, x, y, fgalpha, bgalpha)
def getCursor(self):
"""Return the virtual cursor position.
@rtype: (x, y)
@return: Returns (x, y) a 2-integer tuple containing where the next
L{addChar} or L{addStr} will start at.
This can be changed with the L{move} method."""
x, y = self._cursor
width, height = self.parent.getSize()
while x >= width:
x -= width
y += 1
if y >= height and self.scrollMode == 'scroll':
y = height - 1
return x, y
def getSize(self):
"""Return the size of the console as (width, height)
@rtype: (width, height)
"""
return self.width, self.height
def __iter__(self):
"""Return an iterator with every possible (x, y) value for this console.
It goes without saying that working on the console this way is a
slow process, especially for Python, and should be minimized.
@rtype: iter((x, y), ...)
"""
return itertools.product(range(self.width), range(self.height))
def move(self, x, y):
"""Move the virtual cursor.
@type x: int
@param x: X position to place the cursor.
@type y: int
@param y: Y position to place the cursor.
"""
self._cursor = self._normalizePoint(x, y)
def scroll(self, x, y):
"""Scroll the contents of the console in the direction of x,y.
Uncovered areas will be cleared.
Does not move the virutal cursor.
@type x: int
@param x: Distance to scroll along x-axis
@type y: int
@param y: Distance to scroll along y-axis
@rtype: iter((x, y), ...)
@return: Iterates over the (x, y) of any tile uncovered after scrolling.
"""
assert isinstance(x, _INTTYPES), "x must be an integer, got %s" % repr(x)
assert isinstance(y, _INTTYPES), "y must be an integer, got %s" % repr(x)
def getSlide(x, length):
"""get the parameters needed to scroll the console in the given
direction with x
returns (x, length, srcx)
"""
if x > 0:
srcx = 0
length -= x
elif x < 0:
srcx = abs(x)
x = 0
length -= srcx
else:
srcx = 0
return x, length, srcx
def getCover(x, length):
"""return the (x, width) ranges of what is covered and uncovered"""
cover = (0, length) # everything covered
uncover = None # nothing uncovered
if x > 0: # left side uncovered
cover = (x, length - x)
uncover = (0, x)
elif x < 0: # right side uncovered
x = abs(x)
cover = (0, length - x)
uncover = (length - x, x)
return cover, uncover
width, height = self.getSize()
if abs(x) >= width or abs(y) >= height:
return self.clear() # just clear the console normally
# get the ranges of the areas that will be uncovered
coverX, uncoverX = getCover(x, width)
coverY, uncoverY = getCover(y, height)
# so at this point we know that coverX and coverY makes a rect that
# encases the areas that we end up blitting to. uncoverX/Y makes a
# rect in the corner of the uncovered areas. So we need to combine
# the uncoverX/Y with coverY/X to make what's left of the uncovered
# areas. Explaining it makes it mush easier to do now.
# But first we need to blit.
x, width, srcx = getSlide(x, width)
y, height, srcy = getSlide(y, height)
self.blit(self, x, y, width, height, srcx, srcy)
if uncoverX: # clear sides (0x20 is space)
self.drawRect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 0x20, 0x000000, 0x000000)
if uncoverY: # clear top/bottom
self.drawRect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
if uncoverX and uncoverY: # clear corner
self.drawRect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
def getChar(self, x, y):
"""Return the characters and colors of a tile as (ch, fg, bg)
This method runs very slowly as is not recommended to be called
frequently.
@rtype: (int, (r, g, b), (r, g, b))
@returns: Returns a 3-items tuple. The first items is an integer of the
characters at the position (x, y) the second and third are the
foreground and background colors respectfully.
"""
raise NotImplementedError('Method here only exists for the docstring')
def __contains__(self, position):
"""Use ((x, y) in console) to check if a position is drawable on this console.
"""
x, y = position
return (0 <= x < self.width) and (0 <= y < self.height)
class Console(_MetaConsole):
"""Contains characters and color data and can be drawn to.
The console created by the L{tdl.init} function is the root console and is the
console that is rendered to the screen with L{flush}.
Any console created from the Console class is an off-screen console that
can be drawn on before being L{blit} to the root console.
"""
__slots__ = ('_as_parameter_', '_typewriter')
def __init__(self, width, height):
"""Create a new offscreen console.
@type width: int
@param width: Width of the console in tiles
@type height: int
@param height: Height of the console in tiles
"""
_MetaConsole.__init__(self)
if not _rootinitialized:
raise TDLError('Can not create Console\'s before tdl.init')
self._as_parameter_ = _lib.TCOD_console_new(width, height)
self.console = self
self.width = width
self.height = height
self._typewriter = None # "typewriter lock", makes sure the colors are set to the typewriter
# will be phased out with the Typewriter class
@classmethod
def _newConsole(cls, console):
"""Make a Console instance, from a console ctype"""
self = cls.__new__(cls)
_MetaConsole.__init__(self)
self._as_parameter_ = console
self.console = self
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
self._typewriter = None
return self
def __del__(self):
"""
If the main console is garbage collected then the window will be closed as well
"""
# If this is the root console the window will close when collected
try:
if isinstance(self._as_parameter_, ctypes.c_void_p):
global _rootinitialized, _rootConsoleRef
_rootinitialized = False
_rootConsoleRef = None
_lib.TCOD_console_delete(self)
except StandardError:
pass # I forget why I put this here but I'm to afraid to delete it
def __copy__(self):
# make a new class and blit
clone = self.__class__(self.width, self.height)
clone.blit(self)
return clone
def __getstate__(self):
# save data from getChar
data = [self.getChar(x, y) for x,y in
itertools.product(range(self.width), range(self.height))]
return self.width, self.height, data
def __setstate__(self, state):
# make console from __init__ and unpack a getChar array
width, height, data = state
self.__init__(width, height)
for (x, y), graphic in zip(itertools.product(range(width),
range(height)), data):
self.drawChar(x, y, *graphic)
def _replace(self, console):
"""Used internally
Mostly used just to replace this Console object with the root console
If another Console object is used then they are swapped
"""
if isinstance(console, Console):
self._as_parameter_, console._as_parameter_ = \
console._as_parameter_, self._as_parameter_ # swap tcod consoles
else:
self._as_parameter_ = console
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
return self
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console for this Window
Because this is a Console instead of a Window we return the paramaters
untouched"""
return x, y
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Console.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you cannot use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self._typewriter = None
_lib.TCOD_console_set_default_background(self, _formatColor(bgcolor))
_lib.TCOD_console_set_default_foreground(self, _formatColor(fgcolor))
_lib.TCOD_console_clear(self)
def _setChar(self, x, y, char, fgcolor=None, bgcolor=None, bgblend=1):
"""
Sets a characters.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_formatChar and _formatColor before passing to this."""
# buffer values as ctypes objects
console = self._as_parameter_
if char is not None and fgcolor is not None and bgcolor is not None:
_setcharEX(console, x, y, char, fgcolor, bgcolor)
return
if char is not None:
_setchar(console, x, y, char)
if fgcolor is not None:
_setfore(console, x, y, fgcolor)
if bgcolor is not None:
_setback(console, x, y, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1, nullChar=False):
"""
Try to perform a batch operation otherwise fall back to _setChar.
If fgcolor and bgcolor are defined then this is faster but not by very
much.
batch is a iterable of [(x, y), ch] items
"""
if fgcolor and not nullChar:
# buffer values as ctypes objects
self._typewriter = None # clear the typewriter as colors will be set
console = self._as_parameter_
bgblend = ctypes.c_int(bgblend)
if not bgcolor:
bgblend = 0
else:
_lib.TCOD_console_set_default_background(console, bgcolor)
_lib.TCOD_console_set_default_foreground(console, fgcolor)
_putChar = _lib.TCOD_console_put_char # remove dots and make local
for (x, y), char in batch:
_putChar(console, x, y, char, bgblend)
else:
for (x, y), char in batch:
self._setChar(x, y, char, fgcolor, bgcolor, bgblend)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
char = _lib.TCOD_console_get_char(self, x, y)
bgcolor = _lib.TCOD_console_get_char_background_wrapper(self, x, y)
fgcolor = _lib.TCOD_console_get_char_foreground_wrapper(self, x, y)
return char, tuple(fgcolor), tuple(bgcolor)
def __repr__(self):
return "<Console (Width=%i Height=%i)>" % (self.width, self.height)
class Window(_MetaConsole):
"""A Window contains a small isolated part of a Console.
Drawing on the Window draws on the Console.
Making a Window and setting its width or height to None will extend it to
the edge of the console.
"""
__slots__ = ('parent', 'x', 'y')
def __init__(self, console, x, y, width, height):
"""Isolate part of a L{Console} or L{Window} instance.
@type console: L{Console} or L{Window}
@param console: The parent object which can be a L{Console} or another
L{Window} instance.
@type x: int
@param x: X coordinate to place the Window.
This follows the normal rules for indexing so you can use a
negative integer to place the Window relative to the bottom
right of the parent Console instance.
@type y: int
@param y: Y coordinate to place the Window.
See x.
@type width: int or None
@param width: Width of the Window.
Can be None to extend as far as possible to the
bottom right corner of the parent Console or can be a
negative number to be sized reltive to the Consoles total
size.
@type height: int or None
@param height: Height of the Window.
See width.
"""
_MetaConsole.__init__(self)
assert isinstance(console, (Console, Window)), 'console parameter must be a Console or Window instance, got %s' % repr(console)
self.parent = console
self.x, self.y, self.width, self.height = console._normalizeRect(x, y, width, height)
if isinstance(console, Console):
self.console = console
else:
self.console = self.parent.console
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console"""
# we add our position relative to our parent and then call then next parent up
return self.parent._translate((x + self.x), (y + self.y))
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Window.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you can not use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self.drawRect(0, 0, None, None, 0x20, fgcolor, bgcolor)
def _setChar(self, x, y, char=None, fgcolor=None, bgcolor=None, bgblend=1):
self.parent._setChar((x + self.x), (y + self.y), char, fgcolor, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1):
myX = self.x # remove dots for speed up
myY = self.y
self.parent._setCharBatch((((x + myX, y + myY), ch) for ((x, y), ch) in batch),
fgcolor, bgcolor, bgblend)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y = self._normalizePoint(x, y)
self.parent.drawChar(x + self.x, y + self.y, char, fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawRect(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawFrame(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
return self.console.getChar(self._translate(x, y))
def __repr__(self):
return "<Window(X=%i Y=%i Width=%i Height=%i)>" % (self.x, self.y,
self.width,
self.height)
def init(width, height, title=None, fullscreen=False, renderer='OPENGL'):
"""Start the main console with the given width and height and return the
root console.
Call the consoles drawing functions. Then remember to use L{tdl.flush} to
make what's drawn visible on the console.
@type width: int
@param width: width of the root console (in tiles)
@type height: int
@param height: height of the root console (in tiles)
@type title: string
@param title: Text to display as the window title.
If left None it defaults to the running scripts filename.
@type fullscreen: boolean
@param fullscreen: Can be set to True to start in fullscreen mode.
@type renderer: string
@param renderer: Can be one of 'GLSL', 'OPENGL', or 'SDL'.
Due to way Python works you're unlikely to see much of an
improvement by using 'GLSL' or 'OPENGL' as most of the
time Python is slow interacting with the console and the
rendering itself is pretty fast even on 'SDL'.
@rtype: L{Console}
@return: The root console. Only what is drawn on the root console is
what's visible after a call to L{tdl.flush}.
After the root console is garbage collected, the window made by
this function will close.
"""
RENDERERS = {'GLSL': 0, 'OPENGL': 1, 'SDL': 2}
global _rootinitialized, _rootConsoleRef
if not _fontinitialized: # set the default font to the one that comes with tdl
setFont(_unpackfile('terminal8x8.png'), None, None, True, True)
if renderer.upper() not in RENDERERS:
raise TDLError('No such render type "%s", expected one of "%s"' % (renderer, '", "'.join(RENDERERS)))
renderer = RENDERERS[renderer.upper()]
# If a console already exists then make a clone to replace it
if _rootConsoleRef and _rootConsoleRef():
oldroot = _rootConsoleRef()
rootreplacement = Console(oldroot.width, oldroot.height)
rootreplacement.blit(oldroot)
oldroot._replace(rootreplacement)
del rootreplacement
if title is None: # use a default title
if sys.argv:
# Use the script filename as the title.
title = os.path.basename(sys.argv[0])
else:
title = 'python-tdl'
_lib.TCOD_console_init_root(width, height, _encodeString(title), fullscreen, renderer)
#event.get() # flush the libtcod event queue to fix some issues
# issues may be fixed already
event._eventsflushed = False
_rootinitialized = True
rootconsole = Console._newConsole(ctypes.c_void_p())
_rootConsoleRef = weakref.ref(rootconsole)
return rootconsole
def flush():
"""Make all changes visible and update the screen.
Remember to call this function after drawing operations.
Calls to flush will enfore the frame rate limit set by L{tdl.setFPS}.
This function can only be called after L{tdl.init}
"""
if not _rootinitialized:
raise TDLError('Cannot flush without first initializing with tdl.init')
_lib.TCOD_console_flush()
def setFont(path, columns=None, rows=None, columnFirst=False,
greyscale=False, altLayout=False):
"""Changes the font to be used for this session.
This should be called before L{tdl.init}
If the font specifies its size in its filename (i.e. font_NxN.png) then this
function can auto-detect the tileset formatting and the parameters columns
and rows can be left None.
While it's possible you can change the font mid program it can sometimes
break in rare circumstances. So use caution when doing this.
@type path: string
@param path: Must be a string filepath where a bmp or png file is found.
@type columns: int
@param columns: Number of columns in the tileset.
Can be left None for auto-detection.
@type rows: int
@param rows: Number of rows in the tileset.
Can be left None for auto-detection.
@type columnFirst: boolean
@param columnFirst: Defines if the characer order goes along the rows or
colomns.
It should be True if the charater codes 0-15 are in the
first column.
And should be False if the characters 0-15
are in the first row.
@type greyscale: boolean
@param greyscale: Creates an anti-aliased font from a greyscale bitmap.
Otherwise it uses the alpha channel for anti-aliasing.
Unless you actually need anti-aliasing from a font you
know uses a smooth greyscale channel you should leave
this on False.
@type altLayout: boolean
@param altLayout: An alternative layout with space in the upper left
corner.
The colomn parameter is ignored if this is True,
find examples of this layout in the font/libtcod/
directory included with the python-tdl source.
@raise TDLError: Will be raised if no file is found at path or if auto-
detection fails.
@note: A png file that's been optimized can fail to load correctly on
MAC OS X creating a garbled mess when rendering.
Don't use a program like optipng or just use bmp files instead if
you want your program to work on macs.
"""
# put up some constants that are only used here
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_LAYOUT_TCOD = 8
global _fontinitialized
_fontinitialized = True
flags = 0
if altLayout:
flags |= FONT_LAYOUT_TCOD
elif columnFirst:
flags |= FONT_LAYOUT_ASCII_INCOL
else:
flags |= FONT_LAYOUT_ASCII_INROW
if greyscale:
flags |= FONT_TYPE_GREYSCALE
if not os.path.exists(path):
raise TDLError('no file exists at: "%s"' % path)
path = os.path.abspath(path)
# and the rest is the auto-detect script
imgSize = _getImageSize(path) # try to find image size
if imgSize:
imgWidth, imgHeight = imgSize
# try to get font size from filename
match = re.match('.*?([0-9]+)[xX]([0-9]+)', os.path.basename(path))
if match:
fontWidth, fontHeight = match.groups()
fontWidth, fontHeight = int(fontWidth), int(fontHeight)
# estimate correct tileset size
estColumns, remC = divmod(imgWidth, fontWidth)
estRows, remR = divmod(imgHeight, fontHeight)
if remC or remR:
warnings.warn("Font may be incorrectly formatted.")
if not columns:
columns = estColumns
if not rows:
rows = estRows
else:
# the font name excluded the fonts size
if not (columns and rows):
# no matched font size and no tileset is given
raise TDLError('%s has no font size in filename' % os.path.basename(path))
if columns and rows:
# confirm user set options
if (fontWidth * columns != imgWidth or
fontHeight * rows != imgHeight):
warnings.warn("setFont parameters are set as if the image size is (%d, %d) when the detected size is actually (%i, %i)"
% (fontWidth * columns, fontHeight * rows,
imgWidth, imgHeight))
else:
warnings.warn("%s is probably not an image." % os.path.basename(path))
if not (columns and rows):
# didn't auto-detect
raise TDLError('Can not auto-detect the tileset of %s' % os.path.basename(path))
_lib.TCOD_console_set_custom_font(_encodeString(path), flags, columns, rows)
def getFullscreen():
"""Returns True if program is fullscreen.
@rtype: boolean
@return: Returns True if the window is in fullscreen mode.
Otherwise returns False.
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
return _lib.TCOD_console_is_fullscreen()
def setFullscreen(fullscreen):
"""Changes the fullscreen state.
@type fullscreen: boolean
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
_lib.TCOD_console_set_fullscreen(fullscreen)
def setTitle(title):
"""Change the window title.
@type title: string
"""
if not _rootinitialized:
raise TDLError('Not initilized. Set title with tdl.init')
_lib.TCOD_console_set_window_title(_encodeString(title))
def screenshot(path=None):
"""Capture the screen and save it as a png file
@type path: string
@param path: The filepath to save the screenshot.
If path is None then the image will be placed in the current
folder with the names:
screenshot001.png, screenshot002.png, ...
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
if isinstance(path, str):
_lib.TCOD_sys_save_screenshot(_encodeString(path))
elif path is None: # save to screenshot001.png, screenshot002.png, ...
filelist = os.listdir('.')
n = 1
filename = 'screenshot%.3i.png' % n
while filename in filelist:
n += 1
filename = 'screenshot%.3i.png' % n
_lib.TCOD_sys_save_screenshot(_encodeString(filename))
else: # assume file like obj
#save to temp file and copy to file-like obj
tmpname = os.tempnam()
_lib.TCOD_sys_save_screenshot(_encodeString(tmpname))
with tmpname as tmpfile:
path.write(tmpfile.read())
os.remove(tmpname)
#else:
# raise TypeError('path is an invalid type: %s' % type(path))
def setFPS(frameRate):
"""Set the maximum frame rate.
@type frameRate: int
@param frameRate: Further calls to L{tdl.flush} will limit the speed of
the program to run at <frameRate> frames per second. Can
also be set to 0 to run without a limit.
Defaults to None.
"""
if frameRate is None:
frameRate = 0
assert isinstance(frameRate, _INTTYPES), 'frameRate must be an integer or None, got: %s' % repr(frameRate)
_lib.TCOD_sys_set_fps(frameRate)
def getFPS():
"""Return the current frames per second of the running program set by
L{setFPS}
@rtype: int
@return: Returns the frameRate set by setFPS.
If set to no limit, this will return 0.
"""
return _lib.TCOD_sys_get_fps()
def forceResolution(width, height):
"""Change the fullscreen resoulution
@type width: int
@type height: int
"""
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
__all__ = [_var for _var in locals().keys() if _var[0] != '_' and _var not in
['sys', 'os', 'ctypes', 'array', 'weakref', 'itertools', 'textwrap',
'struct', 're', 'warnings']] # remove modules from __all__
__all__ += ['_MetaConsole'] # keep this object public to show the documentation in epydoc
__license__ = "New BSD License"
__email__ = "4b796c65+pythonTDL@gmail.com"
file = open(os.path.join(os.path.dirname(__file__), 'VERSION.txt'), 'r')
__version__ = file.read()
file.close()
| mit | -9,165,600,255,512,424,000 | 38.78908 | 135 | 0.578086 | false | 4.092784 | false | false | false |
codeforamerica/typeseam | typeseam/form_filler/queries.py | 1 | 8649 | import io, csv, json
from datetime import datetime
from sqlalchemy import desc, inspect, func, text
from sqlalchemy.orm import subqueryload
from flask import abort, Markup
from flask.ext.login import current_user
from typeseam.app import db
from .models import (
TypeformResponse,
Typeform, SeamlessDoc,
FormSubmission,
LogEntry
)
from .serializers import (
TypeformResponseSerializer,
FlatResponseSerializer,
TypeformSerializer,
SerializationError,
DeserializationError
)
response_serializer = TypeformResponseSerializer()
flat_response_serializer = FlatResponseSerializer()
typeform_serializer = TypeformSerializer()
def save_new_form_submission(data, county="sanfrancisco"):
submission = FormSubmission(
answers=data,
county=county
)
db.session.add(submission)
db.session.commit()
return submission
def get_submissions(uuids):
query = db.session.query(FormSubmission).filter(
FormSubmission.uuid.in_(uuids))
return query.all()
def get_submission_by_uuid(submission_uuid):
q = db.session.query(FormSubmission).filter(
FormSubmission.uuid == submission_uuid)
return q.first()
def delete_submission_forever(submission_uuid):
q = db.session.query(FormSubmission).filter(
FormSubmission.uuid == submission_uuid)
submission = q.first()
db.session.delete(submission)
db.session.commit()
def get_unopened_submissions():
data = get_submissions_with_logs()
unopened = []
for row in data:
if 'logs' not in row:
unopened.append(row['submission'])
else:
if not row['submission'].was_opened(row['logs']):
unopened.append(row['submission'])
return unopened
def get_latest_logentry():
q = db.session.query(LogEntry).\
filter(LogEntry.source == 'front').\
order_by(desc(LogEntry.datetime))
return q.first()
def save_new_logentries_from_front_events(events=None):
for event in events:
logentry = LogEntry.from_parsed_front_event(event)
db.session.add(logentry)
db.session.commit()
def get_all_submissions():
q = db.session.query(FormSubmission).\
order_by(desc(FormSubmission.date_received))
return q.all()
def get_logentries():
q = db.session.query(LogEntry).\
order_by(desc(LogEntry.datetime))
return q.all()
def save_new_logentry(uuid, event_type):
log = LogEntry(
datetime=datetime.now(),
user=current_user.email,
submission_key=uuid,
event_type=event_type,
source='form_filler'
)
db.session.add(log)
db.session.commit()
def save_multiple_logentries(uuids, event_type):
for uuid in uuids:
log = LogEntry(
datetime=datetime.now(),
user=current_user.email,
submission_key=uuid,
event_type=event_type,
source='form_filler'
)
db.session.add(log)
db.session.commit()
def get_submissions_with_logs():
lookups = {}
submissions = get_all_submissions()
logs = get_logentries()
for submission in submissions:
lookups[submission.uuid] = {'submission': submission}
for log in logs:
uuid = log.submission_key
if uuid in lookups:
if 'logs' not in lookups[uuid]:
lookups[uuid]['logs'] = [log]
else:
lookups[uuid]['logs'].append(log)
results = list(lookups.values())
for row in results:
if 'logs' in row:
row['logs'].sort(key=lambda e: e.datetime, reverse=True)
return sorted(results, key=lambda s: s['submission'].date_received, reverse=True)
def get_stats():
base_data = get_submissions_with_logs()
stats = {
'received': len(base_data),
'opened': len([
s for s in base_data
if s['submission'].was_opened(s['logs'])
]),
'days':[]
}
day_lookup = {}
for row in base_data:
for log in row['logs']:
day = log.day()
if day in day_lookup:
day_lookup[day].append(log)
else:
day_lookup[day] = [log]
for day, logs in day_lookup.items():
stats['days'].append({
'date': day,
'received': len([
n for n in logs if n.event_type == 'received']),
'referred': len([
n for n in logs if n.event_type == 'referred']),
'opened': len([
n for n in logs if (
n.event_type == 'opened' and n.user == 'Louise.Winterstein@sfgov.org'
)]),
})
stats['days'].sort(key=lambda d: d['date'])
stats['days'] = Markup(json.dumps(stats['days']))
return stats
def save_new_typeform_data(data, typeform=None):
if typeform:
data['user_id'] = typeform.user_id
data['typeform_id'] = typeform.id
data['translator'] = typeform.translator
models, errors = response_serializer.load(
data, many=True, session=db.session)
new_responses = []
if errors:
raise DeserializationError(str(errors))
if not models:
return []
for m in models:
if not inspect(m).persistent:
db.session.add(m)
new_responses.append(m)
if new_responses and typeform:
update_typeform_with_new_responses(typeform, new_responses)
db.session.commit()
def update_typeform_with_new_responses(typeform, responses):
latest_date = max(responses, key=lambda r: r.date_received).date_received
typeform.latest_response = latest_date
db.session.add(typeform)
def get_typeforms_for_user(user):
q = db.session.query(Typeform).\
options(subqueryload(Typeform.responses)).\
filter(Typeform.user_id == user.id).\
order_by(desc(Typeform.latest_response))
return typeform_serializer.dump(q.all(), many=True).data
def get_responses_for_typeform(typeform_id):
q = db.session.query(TypeformResponse).\
filter(TypeformResponse.typeform_id == typeform_id).\
order_by(desc(TypeformResponse.date_received))
responses = q.all()
responses_data = response_serializer.dump(responses, many=True).data
return responses_data
def get_responses_csv(user, typeform_key):
typeform = get_typeform(model=True, user_id=user.id, form_key=typeform_key)
# get responses
results = db.session.query(TypeformResponse, Typeform.form_key).\
join(Typeform, TypeformResponse.typeform_id == Typeform.id).\
filter(Typeform.user_id == user.id, Typeform.form_key == typeform_key).\
order_by(desc(TypeformResponse.date_received)).all()
# serialize them
data = flat_response_serializer.dump(results, many=True).data
if len(data) < 1:
abort(404)
# build csv
keys = list(data[0].keys())
keys.sort()
with io.StringIO() as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=keys, quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerows(data)
return csvfile.getvalue()
def get_seamless_doc_key_for_response(response):
return SeamlessDoc.query.get(response.seamless_id).seamless_key
def get_response_model(response_id):
return TypeformResponse.query.get(int(response_id))
def get_response_detail(user, response_id):
response = get_response_model(response_id)
if user.id != response.user_id:
abort(403)
return response_serializer.dump(response).data
def get_response_count():
return db.session.query(func.count(TypeformResponse.id)).scalar()
def create_typeform(form_key, title, user_id, translator, **kwargs):
params = dict(form_key=form_key, title=title, user_id=user_id)
if not all([form_key, title, user_id, translator]):
raise TypeError(
"Creating a new Typeform requires form_key, title, user_id, and translator arguments")
typeform = db.session.query(Typeform).filter_by(**params).first()
if not typeform:
params.update(dict(translator=translator, **kwargs))
typeform = Typeform(**params)
db.session.add(typeform)
db.session.commit()
return typeform
def get_typeform(model=False, **kwargs):
params = {k: v for k, v in kwargs.items() if v}
if not params:
abort(404)
typeform = db.session.query(Typeform).filter_by(**params).first()
if not typeform:
abort(404)
if model:
return typeform
return typeform_serializer.dump(typeform).data
| bsd-3-clause | -1,366,382,294,274,125,600 | 29.135889 | 98 | 0.628974 | false | 3.67886 | false | false | false |
f-frhs/queequeg | constraint.py | 1 | 8450 | #!/usr/bin/env python
## $Id: constraint.py,v 1.3 2003/07/03 23:07:42 euske Exp $
##
## constraint.py - Pattern matching / constraint checker
##
import sys, re
import pstring
from regpat import PatternActionSet, PatCounter
from sentence import Sentence, TextTokenizer, SentenceSplitter, POSTagger
from abstfilter import AbstractFeeder, AbstractFilter, AbstractConsumer
from document import HTMLProcessor, TexProcessor, PlainTextProcessor
from unification import Unifier, UnificationError, forall, exists
from postagfix import POSTagFixer
from output import TerminalOutput
from grammarerror import GrammarNounAgreementError, GrammarVerbAgreementError, GrammarNonDeterminerError
def ispos(w, t):
return w.pos_pref == t or (w.pos_pref == None and t in w.pos)
class ParsePatternActionSet(PatternActionSet):
def __init__(self, observer, warntypes, debug_action=False):
self.debug_action = debug_action
PatternActionSet.__init__(self)
self.observer = observer
self.check_determiner = "det" in warntypes
self.check_plural = "plural" in warntypes
return
def compile_item0(self, t):
return lambda w: not w.processed and ispos(w, t)
def compile_item2(self, s):
return lambda w: not w.processed and (not isinstance(w.s, Sentence)) and s.lower() == w.s.lower()
def inherit_prop(self, m, inherit=None):
if inherit:
m.prop = inherit.prop
m.prop.match = m
else:
m.prop = Unifier()
return
c = PatCounter().inc
debug_action = True
def action_wrapper(self, n, pat1, action, m):
print "called:", n, map(str,m.getseq())
action(m)
return
## CONSTRAINTS
##
pat_det_pos = c('DT | DT1 | DTS | WDT | PRP$ | WP$')
def act_det_pos(self, m):
self.inherit_prop(m)
w = m.submatch.item
m.prop["determiner"] = True
if ispos(w, "DT1"):
m.prop["plural"] = False
elif ispos(w, "DTS"):
m.prop["plural"] = True
return
pat_pdts = c('PDT | PDT1 | PDTS')
def act_pdts(self, m):
self.inherit_prop(m)
w = m.submatch.item
if ispos(w, "PDT1"):
m.prop["plural"] = False
elif ispos(w, "PDTS"):
m.prop["plural"] = True
return
pat_modifiers = c('CD | JJ | JJR | JJS | NN | NNR')
pat_ng_3rdsing = c('<det_pos> <pdts>? <modifiers>* (NN | NNR)')
def act_ng_3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = True
return
pat_ng_non3rdsing = c('<det_pos>? <pdts>? <modifiers>* NNS')
def act_ng_non3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = False
return
pat_pron = c('WP | PRP | PRP2 | PRPS')
def act_pron(self, m):
self.inherit_prop(m)
w = m.submatch.item
if ispos(w, "PRP2") or ispos(w, "PRPS"):
m.prop["3rdsing"] = False
elif ispos(w, "PRP"):
m.prop["3rdsing"] = True
return
pat_ng = c('<ng_non3rdsing> | <pron> | <ng_3rdsing> ')
def act_ng(self, m):
self.inherit_prop(m, m.submatch)
return
pat_adv1 = c('RB')
pat_there = c('"there" | "here"')
pat_have1 = c('"have" | "\'ve"')
pat_has1 = c('"has" | "\'s"')
pat_had1 = c('"had" | "\'d"')
pat_is1 = c('"is" | "isn\'t" | "\'s"')
pat_are1 = c('"are" | "aren\'t" | "\'re"')
pat_rel1 = c('"which" | "who" | "whom" | "that"')
pat_vg_ven = c('VBN')
pat_vg_ving = c('VBG | "being" <vg_ven>')
pat_vg_perf = c('<adv1>? <vg_ven> | "been" <adv1>? <vg_ven> | "been" <adv1>? <vg_ving>')
# Verb group infinite - ignore
pat_vg_inf = c('MD <adv1>? "be" <vg_ving> | MD <adv1>? "be" <vg_ven> | MD <adv1>? VB')
def act_vg_inf(self, m):
self.inherit_prop(m)
return
# Verb group past tense - ignore
pat_vg_past = c('<had1> <vg_perf> | VBD')
act_vg_past = act_vg_inf
pat_vg_non3rdsing = c('<have1> <vg_perf> | <are1> <vg_ving> | VBP')
def act_vg_non3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = False
return
pat_vg_3rdsing = c('<has1> <vg_perf> | <is1> <vg_ving> | VBZ | ' +
'MDZ <adv1>? "be" <vg_ving> | MDZ <adv1>? "be" <vg_ven> | MDZ <adv1>? VB')
def act_vg_3rdsing(self, m):
self.inherit_prop(m)
m.prop["3rdsing"] = True
return
pat_be_non3rdsing = c('"are" | "\'re" | "were" | "weren\'t"')
act_be_non3rdsing = act_vg_non3rdsing
pat_be_3rdsing = c('"is" | "isn\'t" | "\'s" | "was" | "wasn\'t"')
act_be_3rdsing = act_vg_3rdsing
pat_vg_there = c('<there> (<be_non3rdsing> | <be_3rdsing>)')
def act_vg_there(self, m):
self.inherit_prop(m, m.subseq[1].submatch)
return
pat_vg = c('<vg_inf> | <vg_past> | <vg_non3rdsing> | <vg_3rdsing>')
def act_vg(self, m):
self.inherit_prop(m, m.submatch)
return
pat_rel = c('IN? <rel1>')
pat_pp = c('IN <ng>')
pat_sv1_check = c('<ng> <adv1>? <pp>? <rel>? <vg>')
def act_sv1_check(self, m):
self.check_sv(m, m.subseq[0], m.subseq[4])
return
pat_sv2_check = c('<ng> <adv1>? <rel>? <vg>')
def act_sv2_check(self, m):
self.check_sv(m, m.subseq[0], m.subseq[3])
return
pat_sv3_check = c('<vg_there> <ng>')
def act_sv3_check(self, m):
self.check_sv(m, m.subseq[1], m.subseq[0])
return
pat_ng_single = c('(<det_pos>? <pdts>?) (<modifiers>* (NN | NNR))')
def act_ng_single(self, m):
if exists(lambda w: w.processed, m.getseq()):
return
(mdet, mnoun) = (m.subseq[0], m.subseq[1])
if mdet.subseq[0].repseq:
self.inherit_prop(m, mdet.subseq[0].repseq[0]) # inherit <det_pos>
else:
self.inherit_prop(m)
w = mnoun.subseq[1].submatch.item
if ispos(w, "NNR") or w.is_sent:
m.prop["determiner"] = True
if mdet.subseq[1].repseq:
if self.check_ng(m, mdet, mnoun, mdet.subseq[1].repseq[0].prop["plural"]):
return
self.check_ng(m, mdet, mnoun, False)
return
pat_ng_plural = c('(<det_pos>? <pdts>?) (<modifiers>* NNS)')
def act_ng_plural(self, m):
if exists(lambda w: w.processed, m.getseq()):
return
(mdet, mnoun) = (m.subseq[0], m.subseq[1])
if mdet.subseq[0].repseq:
self.inherit_prop(m, mdet.subseq[0].repseq[0]) # inherit <det_pos>
else:
self.inherit_prop(m)
m.prop["determiner"] = True
if mdet.subseq[1].repseq:
if self.check_ng(m, mdet, mnoun, mdet.subseq[1].repseq[0].prop["plural"]):
return
self.check_ng(m, mdet, mnoun, True)
return
pat_ng_check = c('<ng_single> | <ng_plural>')
del c
def check_sv(self, m, ms, mv):
if exists(lambda w: w.processed, m.getseq()):
return
try:
ms.prop.unify(mv.prop)
except UnificationError:
self.observer(GrammarVerbAgreementError(ms, mv))
for w in m.getseq():
w.processed = True
return
def check_ng(self, m, mdet, mnoun, plural):
for w in m.getseq():
w.processed = True
if self.check_plural:
try:
m.prop["plural"] = plural
except UnificationError:
self.observer(GrammarNounAgreementError(mdet, mnoun))
return True
if self.check_determiner and not m.prop["determiner"]:
self.observer(GrammarNonDeterminerError(m))
return True
return False
##
##
class ConstraintChecker(AbstractFilter):
def __init__(self, next_filter, warntypes, debug_action=False):
AbstractFilter.__init__(self, next_filter)
self.actionset = ParsePatternActionSet(self.notify, warntypes, debug_action)
self.warntypes = warntypes
return
def notify(self, e):
self.feed_next((self.sent, e))
return
def feed(self, sent):
if sent.words[0].s == "[[":
return
for w in sent.words:
if w.is_sent:
self.feed(w.s)
self.sent = sent
if "sv1" in self.warntypes:
self.actionset.perform_longest_first("sv1_check", sent.words)
if "sv2" in self.warntypes:
self.actionset.perform_longest_first("sv2_check", sent.words)
if "sv3" in self.warntypes:
self.actionset.perform_longest_first("sv3_check", sent.words)
self.actionset.perform_longest_first("ng_check", sent.words)
return
#
if __name__ == "__main__":
if sys.argv[1] == "-t":
docproc = TexProcessor
elif sys.argv[1] == "-l":
docproc = HTMLProcessor
elif sys.argv[1] == "-p":
docproc = PlainTextProcessor
else:
assert 0
import dictionary
dict = dictionary.Dictionary("LOCAL/dict.txt")
out = TerminalOutput()
pipeline = docproc(TextTokenizer(SentenceSplitter(POSTagger(dict, POSTagFixer(ConstraintChecker(out, ["sv1","sv2","sv3","det","plural"]))))))
pipeline.read(pstring.PFile(sys.stdin))
| gpl-2.0 | 3,713,613,431,374,230,000 | 28.238754 | 143 | 0.609112 | false | 2.736399 | false | false | false |
burakbayramli/dersblog | tser/tser_070_voltar/util.py | 2 | 14675 | from scipy.optimize import minimize
import pandas as pd, random
import numpy as np, datetime
import scipy.stats
FLAG_BAD_RETURN=-99999.0
CALENDAR_DAYS_IN_YEAR = 365.25
BUSINESS_DAYS_IN_YEAR = 256.0
ROOT_BDAYS_INYEAR = BUSINESS_DAYS_IN_YEAR**.5
WEEKS_IN_YEAR = CALENDAR_DAYS_IN_YEAR / 7.0
ROOT_WEEKS_IN_YEAR = WEEKS_IN_YEAR**.5
MONTHS_IN_YEAR = 12.0
ROOT_MONTHS_IN_YEAR = MONTHS_IN_YEAR**.5
ARBITRARY_START=pd.datetime(1900,1,1)
DEFAULT_CAPITAL = 1.0
DEFAULT_ANN_RISK_TARGET = 0.16
contract_month_codes = ['F', 'G', 'H', 'J', 'K', 'M','N', 'Q', 'U', 'V', 'X', 'Z']
contract_month_dict = dict(zip(contract_month_codes,\
range(1,len(contract_month_codes)+1)))
def shift(lst,empty):
res = lst[:]
temp = res[0]
for index in range(len(lst) - 1): res[index] = res[index + 1]
res[index + 1] = temp
res[-1] = empty
return res
def stitch_prices(dfs, price_col, dates):
res = []
datesr = list(reversed(dates))
dfsr = list(reversed(dfs))
dfsr_pair = shift(dfsr,pd.DataFrame())
for i,v in enumerate(datesr):
tmp1=float(dfsr[i].ix[v,price_col])
tmp2=float(dfsr_pair[i].ix[v,price_col])
dfsr_pair[i].loc[:,price_col] = dfsr_pair[i][price_col] + tmp1-tmp2
dates.insert(0,'1900-01-01')
dates_end = shift(dates,'2200-01-01')
for i,v in enumerate(dates):
tmp = dfs[i][(dfs[i].index > dates[i]) & (dfs[i].index <= dates_end[i])]
res.append(tmp.Settle)
return pd.concat(res)
def which_contract(contract_list, cycle, offset, expday, expmon):
assert len(contract_list) > 0
start_date = contract_list[contract_list.keys()[0]].head(1).index[0] # first dt of first contract
end_date = contract_list[contract_list.keys()[-1]].tail(1).index[0] # last date of last contract
delta = end_date - start_date
dates = []
for i in range(delta.days + 1):
day = start_date + datetime.timedelta(days=i)
if day.weekday() < 5: dates.append(day)
df = pd.DataFrame(index=dates)
def closest_biz(d): # get closest biz day
diffs = np.abs((d - df.index).days)
return df.index[np.argmin(diffs)]
cycle_d = [contract_month_dict[x] for x in cycle]
df['effcont'] = np.nan
for year in np.unique(df.index.year):
for c in cycle_d:
v = "%d%02d" % (year,c)
exp_d = datetime.datetime(year, c, expday)
if expmon=="prev": exp_d = exp_d - datetime.timedelta(days=30)
df.loc[closest_biz(exp_d),'effcont'] = v
df = df.fillna(method='bfill')
df['effcont'] = df.effcont.shift(-int(offset*2/3 + 3))
return df.fillna(method='ffill')
def create_carry(df, offset, contract_list):
df2 = df.copy()
df2['effcont'] = df2.effcont.astype(str)
def offset_contract(con):
s = pd.to_datetime(con + "15", format='%Y%m%d')
ss = s + datetime.timedelta(days=30*offset)
return "%d%02d" % (int(ss.year), int(ss.month))
df2['carrycont'] = df2.effcont.map(offset_contract)
df2['effprice'] = df2.apply(lambda x: contract_list.get(x.effcont).s.get(x.name) if x.effcont in contract_list else np.nan,axis=1)
df2['carryprice'] = df2.apply(lambda x: contract_list.get(x.carrycont).s.get(x.name) if x.carrycont in contract_list else np.nan,axis=1)
return df2
def ccy_returns(price, forecast):
base_capital = DEFAULT_CAPITAL
daily_risk_capital = DEFAULT_CAPITAL * DEFAULT_ANN_RISK_TARGET / ROOT_BDAYS_INYEAR
ts_capital=pd.Series([DEFAULT_CAPITAL]*len(price), index=price.index)
ann_risk = ts_capital * DEFAULT_ANN_RISK_TARGET
daily_returns_volatility = robust_vol_calc(price.diff())
multiplier = daily_risk_capital * 1.0 * 1.0 / 10.0
numerator = forecast * multiplier
positions = numerator.ffill() / daily_returns_volatility.ffill()
cum_trades = positions.shift(1).ffill()
price_returns = price.diff()
instr_ccy_returns = cum_trades.shift(1)*price_returns
instr_ccy_returns=instr_ccy_returns.cumsum().ffill().reindex(price.index).diff()
return instr_ccy_returns
def skew(price, forecast):
base_capital = DEFAULT_CAPITAL
pct = 100.0 * ccy_returns(price, forecast) / base_capital
return scipy.stats.skew(pct[pd.isnull(pct) == False])
def sharpe(price, forecast):
instr_ccy_returns = ccy_returns(price, forecast)
tval,pval = scipy.stats.ttest_1samp(instr_ccy_returns.dropna(), 0)
mean_return = instr_ccy_returns.mean() * BUSINESS_DAYS_IN_YEAR
vol = instr_ccy_returns.std() * ROOT_BDAYS_INYEAR
return mean_return / vol, tval, pval
def ewma(price, slow, fast):
fast_ewma = pd.ewma(price, span=slow)
slow_ewma = pd.ewma(price, span=fast)
raw_ewmac = fast_ewma - slow_ewma
vol = robust_vol_calc(price.diff())
return raw_ewmac / vol
def bollinger(df,col,lev):
signals = pd.DataFrame(index=df.index)
signals['signal'] = np.nan
middle = pd.rolling_mean(df[col], 40, min_periods=1)
std = pd.rolling_std(df[col], 40, min_periods=1)
df['middle'] = middle
df['top'] = middle+2*std
df['bottom'] = middle-2*std
signals['signal'] = np.where(df[col] > middle+2*std, -1, np.nan)
signals['signal'] = np.where(df[col] < middle-2*std, 1, np.nan)
signals['signal'] = signals['signal'].fillna(method='ffill')
df['ret'] = df[col].pct_change() * signals['signal'].shift(1)
ret = df.ret.dropna() * lev
return ret
def crossover(df,col,lev):
signals = pd.DataFrame(index=df.index)
signals['signal'] = 0
short_ma = pd.rolling_mean(df[col], 40, min_periods=1)
long_ma = pd.rolling_mean(df[col], 100, min_periods=1)
signals['signal'] = np.where(short_ma > long_ma, 1, 0)
df['signal'] = signals['signal'].shift(1)
df['ret'] = df[col].pct_change() * df['signal']
ret = df.ret.dropna() * lev
return ret
def carry(daily_ann_roll, vol, diff_in_years, smooth_days=90):
ann_stdev = vol * ROOT_BDAYS_INYEAR
raw_carry = daily_ann_roll / ann_stdev
smooth_carry = pd.ewma(raw_carry, smooth_days) / diff_in_years
return smooth_carry.fillna(method='ffill')
def estimate_forecast_scalar(x, window=250000, min_periods=500):
target_abs_forecast = 10.
x=x.abs().iloc[:,0]
avg_abs_value=x.mean()
return target_abs_forecast/avg_abs_value
def vol_equaliser(mean_list, stdev_list):
if np.all(np.isnan(stdev_list)):
return (([np.nan]*len(mean_list), [np.nan]*len(stdev_list)))
avg_stdev=np.nanmean(stdev_list)
norm_factor=[asset_stdev/avg_stdev for asset_stdev in stdev_list]
norm_means=[mean_list[i]/norm_factor[i] for (i, notUsed) in enumerate(mean_list)]
norm_stdev=[stdev_list[i]/norm_factor[i] for (i, notUsed) in enumerate(stdev_list)]
return (norm_means, norm_stdev)
def apply_with_min_periods(xcol, my_func=np.nanmean, min_periods=0):
not_nan=sum([not np.isnan(xelement) for xelement in xcol])
if not_nan>=min_periods:
return my_func(xcol)
else:
return np.nan
def vol_estimator(x, using_exponent=True, min_periods=20, ew_lookback=250):
vol=x.apply(apply_with_min_periods,axis=0,min_periods=min_periods, my_func=np.nanstd)
stdev_list=list(vol)
return stdev_list
def mean_estimator(x, using_exponent=True, min_periods=20, ew_lookback=500):
means=x.apply(apply_with_min_periods,axis=0,min_periods=min_periods, my_func=np.nanmean)
mean_list=list(means)
return mean_list
def str2Bool(x):
if type(x) is bool:
return x
return x.lower() in ("t", "true")
def correlation_single_period(data_for_estimate,
using_exponent=True, min_periods=20, ew_lookback=250,
floor_at_zero=True):
## These may come from config as str
using_exponent=str2Bool(using_exponent)
if using_exponent:
## If we stack there will be duplicate dates
## So we massage the span so it's correct
## This assumes the index is at least daily and on same timestamp
## This is an artifact of how we prepare the data
dindex=data_for_estimate.index
dlenadj=float(len(dindex))/len(set(list(dindex)))
## Usual use for IDM, FDM calculation when whole data set is used
corrmat=pd.ewmcorr(data_for_estimate, span=int(ew_lookback*dlenadj), min_periods=min_periods)
## only want the final one
corrmat=corrmat.values[-1]
else:
## Use normal correlation
## Usual use for bootstrapping when only have sub sample
corrmat=data_for_estimate.corr(min_periods=min_periods)
corrmat=corrmat.values
if floor_at_zero:
corrmat[corrmat<0]=0.0
return corrmat
def fix_mus(mean_list):
def _fixit(x):
if np.isnan(x):
return FLAG_BAD_RETURN
else:
return x
mean_list=[_fixit(x) for x in mean_list]
return mean_list
def fix_sigma(sigma):
def _fixit(x):
if np.isnan(x):
return 0.0
else:
return x
sigma=[[_fixit(x) for x in sigma_row] for sigma_row in sigma]
sigma=np.array(sigma)
return sigma
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def un_fix_weights(mean_list, weights):
def _unfixit(xmean, xweight):
if xmean==FLAG_BAD_RETURN:
return np.nan
else:
return xweight
fixed_weights=[_unfixit(xmean, xweight) for (xmean, xweight) in zip(mean_list, weights)]
return fixed_weights
def optimise( sigma, mean_list):
## will replace nans with big negatives
mean_list=fix_mus(mean_list)
## replaces nans with zeros
sigma=fix_sigma(sigma)
mus=np.array(mean_list, ndmin=2).transpose()
number_assets=sigma.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
## anything that had a nan will now have a zero weight
weights=ans['x']
## put back the nans
weights=un_fix_weights(mean_list, weights)
return weights
def sigma_from_corr_and_std(stdev_list, corrmatrix):
stdev=np.array(stdev_list, ndmin=2).transpose()
sigma=stdev*corrmatrix*stdev
return sigma
def markosolver(period_subset_data):
mean_list=mean_estimator(period_subset_data)
corrmatrix=correlation_single_period(period_subset_data)
stdev_list=vol_estimator(period_subset_data)
(mean_list, stdev_list)=vol_equaliser(mean_list, stdev_list)
sigma=sigma_from_corr_and_std(stdev_list, corrmatrix)
unclean_weights=optimise( sigma, mean_list)
weights=unclean_weights
diag=dict(raw=(mean_list, stdev_list), sigma=sigma, mean_list=mean_list,
unclean=unclean_weights, weights=weights)
return (weights, diag)
def bootstrap_portfolio(subset_data, monte_runs=100, bootstrap_length=50):
all_results=[bs_one_time(subset_data, bootstrap_length) for unused_index in range(monte_runs)]
### We can take an average here; only because our weights always add
### up to 1. If that isn't true then you will need to some kind
### of renormalisation
weightlist=np.array([x[0] for x in all_results], ndmin=2)
diaglist=[x[1] for x in all_results]
theweights_mean=list(np.mean(weightlist, axis=0))
diag=dict(bootstraps=diaglist)
return (theweights_mean, diag)
def bs_one_time(subset_data, bootstrap_length):
## choose the data
bs_idx=[int(random.uniform(0,1)*len(subset_data)) for notUsed in range(bootstrap_length)]
returns=subset_data.iloc[bs_idx,:]
(weights, diag)=markosolver(returns)
return (weights, diag)
def robust_vol_calc(x, days=35, min_periods=10, vol_abs_min=0.0000000001, vol_floor=True,
floor_min_quant=0.05, floor_min_periods=100,
floor_days=500):
"""
Robust exponential volatility calculation, assuming daily series of prices
We apply an absolute minimum level of vol (absmin);
and a volfloor based on lowest vol over recent history
:param x: data
:type x: Tx1 pd.Series
:param days: Number of days in lookback (*default* 35)
:type days: int
:param min_periods: The minimum number of observations (*default* 10)
:type min_periods: int
:param vol_abs_min: The size of absolute minimum (*default* =0.0000000001) 0.0= not used
:type absmin: float or None
:param vol_floor Apply a floor to volatility (*default* True)
:type vol_floor: bool
:param floor_min_quant: The quantile to use for volatility floor (eg 0.05 means we use 5% vol) (*default 0.05)
:type floor_min_quant: float
:param floor_days: The lookback for calculating volatility floor, in days (*default* 500)
:type floor_days: int
:param floor_min_periods: Minimum observations for floor - until reached floor is zero (*default* 100)
:type floor_min_periods: int
:returns: pd.DataFrame -- volatility measure
"""
# Standard deviation will be nan for first 10 non nan values
vol = pd.ewmstd(x, span=days, min_periods=min_periods)
vol[vol < vol_abs_min] = vol_abs_min
if vol_floor:
# Find the rolling 5% quantile point to set as a minimum
vol_min = pd.rolling_quantile(
vol, floor_days, floor_min_quant, floor_min_periods)
# set this to zero for the first value then propogate forward, ensures
# we always have a value
vol_min.set_value(vol_min.index[0], 0.0)
vol_min = vol_min.ffill()
# apply the vol floor
vol_with_min = pd.concat([vol, vol_min], axis=1)
vol_floored = vol_with_min.max(axis=1, skipna=False)
else:
vol_floored = vol
return vol_floored
def ewmac(price, Lfast, Lslow):
price=price.resample("1B", how="last")
fast_ewma = pd.ewma(price, span=Lfast)
slow_ewma = pd.ewma(price, span=Lslow)
raw_ewmac = fast_ewma - slow_ewma
return raw_ewmac.PRICE / robust_vol_calc(price.diff()).vol
| gpl-3.0 | -7,466,262,017,966,809,000 | 35.6875 | 140 | 0.643271 | false | 3.088823 | false | false | false |
spasticVerbalizer/tom-bot | tombot/plugins/system_plugin.py | 1 | 3235 | '''
Provides commands to globally modify the bot's behaviour.
'''
import logging
import pydoc
from .users_plugin import isadmin
from tombot.registry import get_easy_logger, Command, Subscribe, BOT_START
from tombot.registry import COMMAND_DICT, COMMAND_CATEGORIES
from tombot.helper_functions import determine_sender, extract_query, reply_directly
LOGGER = get_easy_logger('plugins.system')
HELP_OVERVIEW = ''
@Command('ping', 'system')
def ping_cb(bot=None, message=None, *args, **kwargs):
''' Return 'pong' to indicate non-deadness. '''
return 'Pong'
@Command('forcelog', 'system', hidden=True)
def forcelog_cb(bot, message, *args, **kwargs):
''' Write a message to the root logger. '''
logging.info('Forcelog from %s: %s', message.getFrom(), message.getBody())
return
@Command(['shutdown', 'halt'], 'system')
def shutdown_cb(bot, message, *args, **kwargs):
''' Shut down the bot. '''
LOGGER.info('Stop message received from %s, content "%s"',
message.getFrom(), message.getBody())
if not isadmin(bot, message):
LOGGER.warning('Unauthorized shutdown attempt from %s',
determine_sender(message))
return 'Not authorized.'
bot.stop()
@Command('restart', 'system')
def restart_cb(bot, message, *args, **kwargs):
''' Restart the bot. '''
LOGGER.info('Restart message received from %s, content "%s"',
message.getFrom(), message.getBody())
if not isadmin(bot, message):
LOGGER.warning('Unauthorized shutdown attempt from %s',
determine_sender(message))
return 'Not authorized.'
bot.stop(True)
@Command('logdebug', 'system')
def logdebug_cb(bot, message=None, *args, **kwargs):
''' Temporarily set the loglevel to debug. '''
if message:
if not isadmin(bot, message):
return 'Not authorized.'
logging.getLogger().setLevel(logging.DEBUG)
return 'Ok.'
@Command('loginfo', 'system')
def loginfo_cb(bot, message=None, *args, **kwargs):
''' Temporarily (re)set the loglevel to info. '''
if message:
if not isadmin(bot, message):
return 'Not authorized.'
logging.getLogger().setLevel(logging.INFO)
return 'Ok.'
@Subscribe(BOT_START)
def build_help_cb(bot, *args, **kwargs):
'''
Build the help overview so it can be cached and poked at from shell.
'''
global HELP_OVERVIEW
HELP_OVERVIEW += 'Available commands:\n'
for category in sorted(COMMAND_CATEGORIES):
if category:
HELP_OVERVIEW += '- {}:\n'.format(category)
for command in sorted(COMMAND_CATEGORIES[category]):
HELP_OVERVIEW += '{}: {}\n'.format(
command[0], pydoc.splitdoc(command[2].__doc__)[0])
@Command(['help', '?'], 'system')
@reply_directly
def help_cb(bot, message, *args, **kwargs):
'''
Give moral and spiritual guidance in using this bot.
When you select one command, a longer text will be sent!
'''
cmd = extract_query(message)
if not cmd:
return HELP_OVERVIEW
else:
try:
return pydoc.getdoc(COMMAND_DICT[cmd.upper()])
except KeyError:
return 'Sorry, that command is not known.'
| mit | 1,681,421,250,180,765,700 | 32.697917 | 83 | 0.634003 | false | 3.731257 | false | false | false |
seanballais/botos | core/migrations/0002_auto_20190714_1353.py | 1 | 2465 | # Generated by Django 2.2.3 on 2019-07-14 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ElectionSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created')),
('date_updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated')),
('key', models.CharField(default=None, max_length=30, unique=True, verbose_name='key')),
('value', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='value')),
],
options={
'verbose_name_plural': 'election settings',
'verbose_name': 'election setting',
'ordering': ['key'],
},
),
migrations.AddField(
model_name='batch',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='batch',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddField(
model_name='section',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='section',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddField(
model_name='user',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_created'),
),
migrations.AddField(
model_name='user',
name='date_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='date_updated'),
),
migrations.AddIndex(
model_name='electionsetting',
index=models.Index(fields=['key'], name='core_electi_key_1a53c9_idx'),
),
]
| gpl-3.0 | 8,521,539,974,967,091,000 | 38.758065 | 119 | 0.565923 | false | 4.199319 | false | false | false |
Carnon/nlp | TextClassify/textclassify/rnnmodel.py | 1 | 2481 | import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell
class RNNModel(object):
def __init__(self,args,text_data):
self.args = args
self.text_data = text_data
self.input_x = None
self.input_y = None
self.dropout = None
self.losses = None
self.train = None
self.prediction = None
self.accuracy = None
self.build_network()
def build_network(self):
embedding_size = self.args.embedding_size
rnn_cell_size = self.args.rnn_cell_size
batch_size = self.args.batch_size
learning_rate = self.args.learning_rate
max_doc_len = self.args.max_doc_len
label_num = self.text_data.label_num
vocab_size = self.text_data.vocab_size
print('vocab_size: {} label_num: {} max_doc_len: {} batch_size: {} embedding_size: {} rnn_cell_size: {}'.format(vocab_size,label_num,max_doc_len,batch_size,embedding_size,rnn_cell_size))
self.input_x = tf.placeholder(tf.int32,[None,max_doc_len],name='input_x')
self.input_y = tf.placeholder(tf.float32,[None,label_num],name='input_y')
self.dropout = tf.placeholder(tf.float32,name='drop_out')
We = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0))
embedding_char = tf.nn.embedding_lookup(We,self.input_x)
embedding_char_expand = tf.reshape(embedding_char,[-1,embedding_size])
W_in = tf.Variable(tf.random_uniform([embedding_size,rnn_cell_size]))
b_in = tf.Variable(tf.constant(0.1,dtype=tf.float32,shape=[rnn_cell_size,]))
X_in = tf.matmul(embedding_char_expand,W_in)+b_in
Xs = tf.reshape(X_in,[-1,max_doc_len,rnn_cell_size])
cell = BasicLSTMCell(rnn_cell_size)
init_state = cell.zero_state(batch_size,dtype=tf.float32)
outputs,final_state = tf.nn.dynamic_rnn(cell,Xs,initial_state=init_state)
#outputs:(batch,time_step,input)
output = outputs[:,-1,:]
tf.nn.xw_plus_b(output,)
scores = tf.layers.dense(outputs[:,-1,:],label_num)
self.prediction = tf.argmax(scores, 1)
self.losses = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=scores))
self.train = tf.train.AdamOptimizer(learning_rate).minimize(self.losses)
correct_predictions = tf.equal(self.prediction,tf.argmax(self.input_y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,tf.float32))
| apache-2.0 | 4,867,269,093,125,074,000 | 43.303571 | 194 | 0.644901 | false | 3.205426 | false | false | false |
ucoin-io/cutecoin | src/sakia/data/processors/sources.py | 3 | 2400 | import attr
import sqlite3
import logging
from ..entities import Source
from .nodes import NodesProcessor
from ..connectors import BmaConnector
from duniterpy.api import bma, errors
@attr.s
class SourcesProcessor:
"""
:param sakia.data.repositories.SourcesRepo _repo: the repository of the sources
:param sakia.data.connectors.bma.BmaConnector _bma_connector: the bma connector
"""
_repo = attr.ib()
_bma_connector = attr.ib()
_logger = attr.ib(default=attr.Factory(lambda: logging.getLogger('sakia')))
@classmethod
def instanciate(cls, app):
"""
Instanciate a blockchain processor
:param sakia.app.Application app: the app
"""
return cls(app.db.sources_repo,
BmaConnector(NodesProcessor(app.db.nodes_repo), app.parameters))
def commit(self, source):
try:
self._repo.insert(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already known : {0}".format(source.identifier))
def amount(self, currency, pubkey):
"""
Get the amount value of the sources for a given pubkey
:param str currency: the currency of the sources
:param str pubkey: the pubkey owning the sources
:return:
"""
sources = self._repo.get_all(currency=currency, pubkey=pubkey)
return sum([s.amount * (10**s.base) for s in sources])
def available(self, currency, pubkey):
""""
:param str currency: the currency of the sources
:param str pubkey: the owner of the sources
:rtype: list[sakia.data.entities.Source]
"""
return self._repo.get_all(currency=currency, pubkey=pubkey)
def consume(self, sources):
"""
:param currency:
:param sources:
:return:
"""
for s in sources:
self._repo.drop(s)
def insert(self, source):
try:
self._repo.insert(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already exist : {0}".format(source))
def drop(self, source):
try:
self._repo.drop(source)
except sqlite3.IntegrityError:
self._logger.debug("Source already dropped : {0}".format(source))
def drop_all_of(self, currency, pubkey):
self._repo.drop_all(currency=currency, pubkey=pubkey)
| mit | 5,394,411,755,953,615,000 | 30.578947 | 86 | 0.622917 | false | 4.013378 | false | false | false |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/Mezzanine-3.1.10-py2.7.egg/mezzanine/core/admin.py | 10 | 11563 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.forms import ValidationError, ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User as AuthUser
from mezzanine.conf import settings
from mezzanine.core.forms import DynamicInlineAdminForm
from mezzanine.core.models import (Orderable, SitePermission,
CONTENT_STATUS_PUBLISHED)
from mezzanine.utils.urls import admin_url
if settings.USE_MODELTRANSLATION:
from django.utils.datastructures import SortedDict
from django.utils.translation import activate, get_language
from modeltranslation.admin import (TranslationAdmin,
TranslationInlineModelAdmin)
class BaseTranslationModelAdmin(TranslationAdmin):
"""
Mimic modeltranslation's TabbedTranslationAdmin but uses a
custom tabbed_translation_fields.js
"""
class Media:
js = (
"modeltranslation/js/force_jquery.js",
"mezzanine/js/%s" % settings.JQUERY_UI_FILENAME,
"mezzanine/js/admin/tabbed_translation_fields.js",
)
css = {
"all": ("mezzanine/css/admin/tabbed_translation_fields.css",),
}
else:
class BaseTranslationModelAdmin(admin.ModelAdmin):
"""
Abstract class used to handle the switch between translation
and no-translation class logic. We define the basic structure
for the Media class so we can extend it consistently regardless
of whether or not modeltranslation is used.
"""
class Media:
js = ()
css = {"all": ()}
User = get_user_model()
class DisplayableAdminForm(ModelForm):
def clean_content(form):
status = form.cleaned_data.get("status")
content = form.cleaned_data.get("content")
if status == CONTENT_STATUS_PUBLISHED and not content:
raise ValidationError(_("This field is required if status "
"is set to published."))
return content
class DisplayableAdmin(BaseTranslationModelAdmin):
"""
Admin class for subclasses of the abstract ``Displayable`` model.
"""
list_display = ("title", "status", "admin_link")
list_display_links = ("title",)
list_editable = ("status",)
list_filter = ("status", "keywords__keyword")
date_hierarchy = "publish_date"
radio_fields = {"status": admin.HORIZONTAL}
fieldsets = (
(None, {
"fields": ["title", "status", ("publish_date", "expiry_date")],
}),
(_("Meta data"), {
"fields": ["_meta_title", "slug",
("description", "gen_description"),
"keywords", "in_sitemap"],
"classes": ("collapse-closed",)
}),
)
form = DisplayableAdminForm
def __init__(self, *args, **kwargs):
super(DisplayableAdmin, self).__init__(*args, **kwargs)
try:
self.search_fields = list(set(list(self.search_fields) + list(
self.model.objects.get_search_fields().keys())))
except AttributeError:
pass
def save_model(self, request, obj, form, change):
"""
Save model for every language so that field auto-population
is done for every each of it.
"""
super(DisplayableAdmin, self).save_model(request, obj, form, change)
if settings.USE_MODELTRANSLATION:
lang = get_language()
for code in SortedDict(settings.LANGUAGES):
if code != lang: # Already done
try:
activate(code)
except:
pass
else:
obj.save()
activate(lang)
class BaseDynamicInlineAdmin(object):
"""
Admin inline that uses JS to inject an "Add another" link which
when clicked, dynamically reveals another fieldset. Also handles
adding the ``_order`` field and its widget for models that
subclass ``Orderable``.
"""
form = DynamicInlineAdminForm
extra = 20
def get_fields(self, request, obj=None):
fields = super(BaseDynamicInlineAdmin, self).get_fields(request, obj)
if issubclass(self.model, Orderable):
fields = list(fields)
try:
fields.remove("_order")
except ValueError:
pass
fields.append("_order")
return fields
def get_fieldsets(self, request, obj=None):
fieldsets = super(BaseDynamicInlineAdmin, self).get_fieldsets(
request, obj)
if issubclass(self.model, Orderable):
for fieldset in fieldsets:
fields = [f for f in list(fieldset[1]["fields"])
if not hasattr(f, "translated_field")]
try:
fields.remove("_order")
except ValueError:
pass
fieldset[1]["fields"] = fields
fieldsets[-1][1]["fields"].append("_order")
return fieldsets
def get_inline_base_class(cls):
if settings.USE_MODELTRANSLATION:
class InlineBase(TranslationInlineModelAdmin, cls):
"""
Abstract class that mimics django-modeltranslation's
Translation{Tabular,Stacked}Inline. Used as a placeholder
for future improvement.
"""
pass
return InlineBase
return cls
class TabularDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.TabularInline)):
template = "admin/includes/dynamic_inline_tabular.html"
class StackedDynamicInlineAdmin(BaseDynamicInlineAdmin,
get_inline_base_class(admin.StackedInline)):
template = "admin/includes/dynamic_inline_stacked.html"
def __init__(self, *args, **kwargs):
"""
Stacked dynamic inlines won't work without grappelli
installed, as the JavaScript in dynamic_inline.js isn't
able to target each of the inlines to set the value of
the order field.
"""
grappelli_name = getattr(settings, "PACKAGE_NAME_GRAPPELLI")
if grappelli_name not in settings.INSTALLED_APPS:
error = "StackedDynamicInlineAdmin requires Grappelli installed."
raise Exception(error)
super(StackedDynamicInlineAdmin, self).__init__(*args, **kwargs)
class OwnableAdmin(admin.ModelAdmin):
"""
Admin class for models that subclass the abstract ``Ownable``
model. Handles limiting the change list to objects owned by the
logged in user, as well as setting the owner of newly created
objects to the logged in user.
Remember that this will include the ``user`` field in the required
fields for the admin change form which may not be desirable. The
best approach to solve this is to define a ``fieldsets`` attribute
that excludes the ``user`` field or simple add ``user`` to your
admin excludes: ``exclude = ('user',)``
"""
def save_form(self, request, form, change):
"""
Set the object's owner as the logged in user.
"""
obj = form.save(commit=False)
if obj.user_id is None:
obj.user = request.user
return super(OwnableAdmin, self).save_form(request, form, change)
def get_queryset(self, request):
"""
Filter the change list by currently logged in user if not a
superuser. We also skip filtering if the model for this admin
class has been added to the sequence in the setting
``OWNABLE_MODELS_ALL_EDITABLE``, which contains models in the
format ``app_label.object_name``, and allows models subclassing
``Ownable`` to be excluded from filtering, eg: ownership should
not imply permission to edit.
"""
opts = self.model._meta
model_name = ("%s.%s" % (opts.app_label, opts.object_name)).lower()
models_all_editable = settings.OWNABLE_MODELS_ALL_EDITABLE
models_all_editable = [m.lower() for m in models_all_editable]
qs = super(OwnableAdmin, self).get_queryset(request)
if request.user.is_superuser or model_name in models_all_editable:
return qs
return qs.filter(user__id=request.user.id)
class SingletonAdmin(admin.ModelAdmin):
"""
Admin class for models that should only contain a single instance
in the database. Redirect all views to the change view when the
instance exists, and to the add view when it doesn't.
"""
def handle_save(self, request, response):
"""
Handles redirect back to the dashboard when save is clicked
(eg not save and continue editing), by checking for a redirect
response, which only occurs if the form is valid.
"""
form_valid = isinstance(response, HttpResponseRedirect)
if request.POST.get("_save") and form_valid:
return redirect("admin:index")
return response
def add_view(self, *args, **kwargs):
"""
Redirect to the change view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except (self.model.DoesNotExist, self.model.MultipleObjectsReturned):
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = True
response = super(SingletonAdmin, self).add_view(*args, **kwargs)
return self.handle_save(args[0], response)
return redirect(admin_url(self.model, "change", singleton.id))
def changelist_view(self, *args, **kwargs):
"""
Redirect to the add view if no records exist or the change
view if the singleton instance exists.
"""
try:
singleton = self.model.objects.get()
except self.model.MultipleObjectsReturned:
return super(SingletonAdmin, self).changelist_view(*args, **kwargs)
except self.model.DoesNotExist:
return redirect(admin_url(self.model, "add"))
return redirect(admin_url(self.model, "change", singleton.id))
def change_view(self, *args, **kwargs):
"""
If only the singleton instance exists, pass ``True`` for
``singleton`` into the template which will use CSS to hide
the "save and add another" button.
"""
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = self.model.objects.count() == 1
response = super(SingletonAdmin, self).change_view(*args, **kwargs)
return self.handle_save(args[0], response)
###########################################
# Site Permissions Inlines for User Admin #
###########################################
class SitePermissionInline(admin.TabularInline):
model = SitePermission
max_num = 1
can_delete = False
class SitePermissionUserAdmin(UserAdmin):
inlines = [SitePermissionInline]
# only register if User hasn't been overridden
if User == AuthUser:
admin.site.unregister(User)
admin.site.register(User, SitePermissionUserAdmin)
| gpl-2.0 | 4,187,259,666,729,593,000 | 36.664495 | 79 | 0.611779 | false | 4.513271 | false | false | false |
JnyJny/Geometry | Geometry/triangle2.py | 1 | 9993 | ''' a Triangle
'''
import math
import collections
import itertools
from . import Polygon, Point, Segment, Circle
from .constants import Epsilon, Half_Pi, nearly_eq, Sqrt_3
from .exceptions import *
class Triangle(Polygon):
'''a pythonic Triangle
Implements a Triangle object in the XY plane having three
non-coincident vertices and three intersecting edges.
Vertices are labeled; 'A', 'B' and 'C'.
Edges are labeled; 'AB', 'BC' and 'AC'.
The length of edges opposite each vertex are labeled:
'a' for the side opposite vertex A.
'b' for the side opposite vertex B.
'c' for the side opposite vertex C.
Interior angles in radians are labeled:
'alpha' for CAB
'beta' for ABC
'gamma' for BCA
Usage:
>>> a = Triangle()
>>> b = Triangle(A,B,C) # A,B,C are Points or Point equivalents
>>> c = Triangle([p,q,r]) # p,q,r are Points or Point equivalents
>>> d = Triangle([x,y,z],[x,y,z],[x,y,z])
'''
@classmethod
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles")
@classmethod
def withSides(cls, origin=None, a=1, b=1, c=1):
'''
:origin: optional Point
:a: optional float describing length of the side opposite A
:b: optional float describing length of the side opposite B
:c: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified side lengths.
If only 'a' is specified, an equilateral triangle is returned.
'''
raise NotImplementedError("withSides")
@classmethod
def unit(cls,scale=1):
return cls(Point.units(scale))
def __init__(self, *args, **kwds):
'''
:args: iterable of Point or Point equivalents
:kwds: named Points where recognized names are 'A', 'B' and 'C'.
If A is an iterable containing Point or Point equivalent objects
it will be used to initialize up to three points in the triangle.
'''
kwds['defaults'] = Point(),Point(1,0),Point(0,1)
super().__init__(*args,**kwds)
if len(self) != 3:
raise ValueError(len(self))
@property
def AB(self):
return self.pairs('AB')
@AB.setter
def AB(self, iterable):
self.A, self.B = iterable
@property
def BA(self):
return self.pairs('BA')
@BA.setter
def BA(self, iterable):
self.B, self.A = iterable
@property
def BC(self):
return self.pairs('BC')
@BC.setter
def BC(self, iterable):
self.B, self.C = iterable
@property
def CB(self):
return self.pairs('CB')
@CB.setter
def CB(self, iterable):
self.C, self.B = iterable
@property
def AC(self):
return self.pairs('AC')
@AC.setter
def AC(self, iterable):
self.A, self.C = iterable
@property
def CA(self):
return self.pairs('CA')
@CA.setter
def CA(self, iterable):
self.C, self.A = iterable
@property
def ABC(self):
return [self.A, self.B, self.C]
@ABC.setter
def ABC(self, iterable):
self.A, self.B, self.C = iterable
@property
def ccw(self):
'''
Result of A.ccw(B,C), float.
See Point.ccw
'''
return self.A.ccw(self.B, self.C)
@property
def isCCW(self):
'''
True if ABC has a counter-clockwise rotation, boolean.
'''
return self.A.isCCW(self.B,self.C)
@property
def area(self):
'''
Area of the triangle, float.
Performance note: computed via Triangle.ccw (subtractions and
multiplications and a divison).
'''
return abs(self.ccw) / 2
@property
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c)))
@property
def inradius(self):
'''
The radius of the triangle's incircle, float.
'''
return (self.area * 2) / self.perimeter
@property
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y)
@property
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4)
@property
def circumcircle(self):
'''
A circle whose center is equidistant from all the
vertices of the triangle, Circle.
'''
return Circle(self.circumcenter, self.circumradius)
@property
def orthocenter(self):
'''
The intersection of the altitudes of the triangle, Point.
'''
raise NotImplementedError('orthocenter')
@property
def hypotenuse(self):
'''
The longest edge of the triangle, Segment.
'''
return max(self.edges(),key=lambda s:s.length)
@property
def alpha(self):
'''
The angle described by angle CAB in radians, float.
'''
return Segment(self.CA).radiansBetween(Segment(self.BA))
@property
def beta(self):
'''
The angle described by angle ABC in radians, float.
'''
return Segment(self.AB).radiansBetween(Segment(self.CB))
@property
def gamma(self):
'''
The angle described by angle BCA in radians, float.
'''
return Segment(self.BC).radiansBetween(Segment(self.AC))
@property
def angles(self):
'''
A list of the interior angles of the triangle, list of floats.
'''
return [self.alpha, self.beta, self.gamma]
@property
def a(self):
'''
The length of line segment BC, opposite vertex A, float.
'''
return abs(self.B.distance(self.C))
@property
def b(self):
'''
The length of line segment AC, opposite vertex B, float.
'''
return abs(self.A.distance(self.C))
@property
def c(self):
'''
The length of line segment AB, opposite vertex C, float.
'''
return abs(self.A.distance(self.B))
@property
def sides(self):
'''
A list of edge lengths [a, b, c], list of floats.
'''
return [self.a, self.b, self.c]
@property
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
A = self.area * 2
return [A / self.a, A / self.b, A / self.c]
@property
def isEquilateral(self):
'''
True iff all side lengths are equal, boolean.
'''
return self.a == self.b == self.c
@property
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
@property
def isScalene(self):
'''
True iff all side lengths are unequal, boolean.
'''
return self.a != self.b != self.c
@property
def isRight(self):
'''
True if one angle measures 90 degrees (Pi/2 radians), float.
'''
return any([nearly_eq(v,Half_Pi) for v in self.angles])
@property
def isObtuse(self):
'''
True if one angle measures greater than 90 degrees (Pi/2 radians),
float.
'''
return any([v > Half_Pi for v in self.angles])
@property
def isAcute(self):
'''
True iff all angles measure less than 90 degrees (Pi/2 radians),
float.
'''
return all([v < Half_Pi for v in self.angles])
def congruent(self, other):
'''
A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0
| mit | 7,130,236,080,199,315,000 | 23.079518 | 82 | 0.559592 | false | 3.803959 | false | false | false |
jirikadlec2/garmin-client | test_selenium.py | 1 | 8613 | # -*- coding: utf-8 -*-
from selenium import webdriver
import time
#auxiliary functions
def read_saved_track_names(track_file):
tracks = set()
with open(track_file) as f:
for line in f:
line2 = line.strip()
tracks.add(line2)
return tracks
def save_garmin_tracks(activity_links, track_file, mode):
with open(track_file, mode) as myfile:
for link in activity_links:
link = link.strip()
myfile.write(link+'\n')
def extract_activity_links(browser, new_links, activity_links):
activities_el = browser.find_element_by_id('gridForm:gridList:tb')
for anchor in activities_el.find_elements_by_tag_name('a'):
activity_link = anchor.get_attribute("href")
if not activity_link is None:
if '/activity/' in activity_link:
activity_links.add(activity_link)
new_links.add(activity_link)
def move_to_next_page(browser):
footer_el = browser.find_element_by_class_name('resultsFooter')
btn_found = False
for btn in footer_el.find_elements_by_class_name('rich-datascr-button'):
if btn.text == '»':
btn_found = True
btn.click()
break
return btn_found
def select_start_date(browser, n_years):
#move one year back..
for i in range(1, n_years):
calendar1 = browser.find_element_by_id('exploreSearchForm:startDateCalendarPopupButton')
calendar1.click()
time.sleep(1)
calendar_button = browser.find_element_by_class_name('rich-calendar-tool-btn')
calendar_button.click()
time.sleep(1)
#choose date..
date_button = browser.find_element_by_id('exploreSearchForm:startDateCalendarDayCell7')
date_button.click()
time.sleep(2)
def zoom_out_map(browser, n_zooms):
for i in range(1, n_zooms):
mapZoomOut = browser.find_element_by_class_name("map-zoom-out")
mapZoomOut.click()
time.sleep(5)
################################################
# saves the GARMIN activity links for selected
# CITY and the number of the past years
################################################
def save_garmin_activity_links(city, n_years, track_file):
activity_links = read_saved_track_names(track_file)
new_links = set()
browser = webdriver.Firefox()
url = "https://sso.garmin.com/sso/login?service=https%3A%2F%2Fconnect.garmin.com%2FminExplore&webhost=olaxpw-connect00&source=https%3A%2F%2Fconnect.garmin.com%2Fen-US%2Fsignin&redirectAfterAccountLoginUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&redirectAfterAccountCreationUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&gauthHost=https%3A%2F%2Fsso.garmin.com%2Fsso&locale=en_US&id=gauth-widget&cssUrl=https%3A%2F%2Fstatic.garmincdn.com%2Fcom.garmin.connect%2Fui%2Fcss%2Fgauth-custom-v1.1-min.css&clientId=GarminConnect&rememberMeShown=true&rememberMeChecked=false&createAccountShown=true&openCreateAccount=false&usernameShown=false&displayNameShown=false&consumeServiceTicket=false&initialFocus=true&embedWidget=false&generateExtraServiceTicket=false"
browser.get(url)
time.sleep(10)
username = browser.find_element_by_id("username")
password = browser.find_element_by_id("password")
username.send_keys("jirikadlec2@gmail.com")
password.send_keys("AnnAgnps(v1)")
login_attempt = browser.find_element_by_xpath("//*[@type='submit']")
login_attempt.submit()
#now show filters..
time.sleep(10)
show_filters = browser.find_element_by_id("showFilters")
show_filters.click()
#select the activity type option
el = browser.find_element_by_id('exploreSearchForm:activityType')
for option in el.find_elements_by_tag_name('option'):
if option.text == 'Cross Country Skiing':
option.click()
break
#select the time period option
time.sleep(2)
time_el = browser.find_element_by_id('exploreSearchForm:timePeriodSelect')
for option in time_el.find_elements_by_tag_name('option'):
if option.text == 'Custom Dates':
option.click()
break
#select the start date (10 years back..)
select_start_date(browser, n_years)
#select the end date (start of current month..)
time.sleep(2)
calendar2 = browser.find_element_by_id('exploreSearchForm:endDateCalendarPopupButton')
calendar2.click()
date_button = browser.find_element_by_id('exploreSearchForm:endDateCalendarDayCell7')
date_button.click()
#now search a new location ..
time.sleep(5)
location = browser.find_element_by_id("exploreSearchForm:location")
location.send_keys(city)
searchButton = browser.find_element_by_id("searchButton")
searchButton.submit()
#find the grid list
next_active = True
while next_active:
time.sleep(10)
len1 = len(new_links)
extract_activity_links(browser, new_links, activity_links)
len2 = len(new_links)
next_active = len2 > len1
time.sleep(2)
move_to_next_page(browser)
save_garmin_tracks(activity_links, track_file, "w")
browser.close()
print(city + ' : ' + str(len(new_links)))
f = "garmin_tracks2.txt"
trk = read_saved_track_names(f)
save_garmin_tracks(trk, f, "w")
trk = []
#save_garmin_activity_links('Brno', 10, f)
#save_garmin_activity_links('Karlovy Vary', 10, f)
#save_garmin_activity_links('Chomutov', 10, f)
#save_garmin_activity_links('Kvilda', 10, f)
#save_garmin_activity_links('Klingenthal', 10, f)
#save_garmin_activity_links('Jablunkov', 10, f)
#save_garmin_activity_links('Svratka', 10, f)
#save_garmin_activity_links('Jilemnice', 10, f)
#save_garmin_activity_links('Trutnov', 10, f)
#save_garmin_activity_links('Mladkov', 10, f)
#save_garmin_activity_links('Mikulovice', 10, f)
#save_garmin_activity_links('Olomouc', 10, f)
#save_garmin_activity_links('Protivanov', 10, f)
#save_garmin_activity_links('Karolinka', 10, f)
#save_garmin_activity_links('Jihlava', 10, f)
#save_garmin_activity_links('Kocelovice', 10, f)
#save_garmin_activity_links('Altenberg', 10, f)
#save_garmin_activity_links('Oberwiesenthal', 10, f)
#save_garmin_activity_links('Zittau', 10, f)
#save_garmin_activity_links('Heroltovice', 10, f)
#save_garmin_activity_links('Rokytno', 10, f)
cities1 = [
'Flossenburg', 'Olbernhau', 'Hora Svateho Sebestiana',
'Kvan', 'Rozmital', 'Ceska Kubice', 'Primda', 'Honezovice',
'Tremosna', 'Cunkov', 'Jistebnice', 'Hartvikov', 'Frymburk',
'Ceske Budejovice', 'Pisek', 'Pribram', 'Havlickuv Brod',
'Hradec Kralove', 'Ceska Trebova', 'Ricany', 'Chotebor',
'Hlinsko', 'Napajedla', 'Zlin', 'Rajnochovice', 'Papajci', 'Orlicke Zahori',
'Zdobnice', 'Sedlonov', 'Krnov', 'Vitkov', 'Mala Moravka', 'Kouty nad Desnou',
'Dolni Morava', 'Kralicky Sneznik', 'Dlouhe Strane', 'Bruntal',
'Moravsky Beroun']
cities2 = ['Sternberk', 'Svaty Kopecek', 'Kralovo Pole',
'Uhersky Brod', 'Uherske Hradiste', 'Hodonin', 'Hartmanice',
'Brcalnik', 'Keply', 'Vimperk', 'Klet', 'Teskov', 'Moravske Budejovice',
'Novy Hojkov', 'Teskov', 'Letohrad','Johanngeorgenstadt','Pernink','Medenec',
'Bublava','Horni Halze', 'Johstadt', 'Vejprty', 'Bolebor']
cities3 = ['Holzhau',
'Moldava', 'Horazdovice','Sedlcany','Neveklov','Rymarov','Hanusovice',
'Sumperk']
cities4 = ['Zelezny Brod', 'Ceska Lipa', 'Novy Bor', 'Varnsdorf',
'Modlibohov','Hodkovice nad Mohelkou', 'Jablonec nad Nisou','Rakovnik']
cities5 = ['Kladno', 'Luhacovice','Vyskov','Vizovice','Roznov pod Radhostem',
'Celadna','Hrcava', 'Rokytnice v Orlickych Horach','Hostinne',
'Vrchlabi','Hejnice']
cities6 = ['Nove Mesto pod Smrkem','Vernerice',
'Zdar nad Sazavou','Nova Bystrice','Kamenice nad Lipou','Telc']
cities7 = ['Bad Brambach','Becov nad Teplou','Rokycany','Stozec','Borova Lada',
'Lam','Zelezna Ruda','Karlstift','Svetla nad Sazavou','Cechtice',
'Policka','Jimramov','Cenkovice','Kraliky','Miedzylesie','Zacler',
'Janske Lazne','Spindleruv Mlyn','Pec pod Snezkou','Horice',
'Dvur Kralove','Strakonice','Kralovice','Strani','Lazy pod Makytou',
'Seiffen','Znojmo','Drahany','Kurim','Decinsky Sneznik','Capartice',
'Rusava','Javornik','Vapenna','Lipova Lazne','Usti nad Orlici',
'Hronov','Police nad Metuji','Mezimesti','Jetrichovice','Dobris',
'Pelhrimov','Sec','Kyjov','Kaplice','Volary','Bayerisch Eisenstein',
'Grosser Arber','Aigen im Muhlkreis','Litschau','Waldmunchen',
'Selb','Auersberg','Sindelova','Nejdek','Marianska','Abertamy']
for city in cities7:
save_garmin_activity_links(city, 10, f)
| mit | -1,088,594,471,736,206,600 | 40.805825 | 778 | 0.672318 | false | 2.80065 | false | false | false |
nicopresto/webSkapes | modules/savage/graphics/color.py | 2 | 2555 | class ColorMap:
def __init__ (self, base, bound, num):
self.base = base
self.bound = bound
self.num = float (num)
def index (self, val):
val = float (val)
return self.base.interpolate (self.bound, val / (self.num - 1))
def __iter__ (self):
return ColorMapIter (self)
class ColorMapIter:
def __init__ (self, cm):
self.current = 0
self.cm = cm
def __iter__ (self):
return self
def next (self):
if self.current == self.cm.num:
raise StopIteration ()
r_val = self.cm.index (self.current)
self.current += 1
return r_val
def color_to_css (c):
r = hex (int (c.red * 255)) + '0'
g = hex (int (c.green * 255)) + '0'
b = hex (int (c.blue * 255)) + '0'
return '#' + r[2:4] + g[2:4] + b[2:4]
def hex_to_color (hex):
hex = str (hex)
if hex.startswith ('0x'):
hex = hex[2:]
if len (hex) != 6:
raise RuntimeError (hex + ' is not a hex color')
red = int ('0x' + hex[0:2], 0)
green = int ('0x' + hex[2:4], 0)
blue = int ('0x' + hex[4:6], 0)
return Color (*map (clampInt, [red, green, blue]))
def clampInt (value):
value = int (value)
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def clampFloat (value):
value = float (value)
if value > 1.0:
return 1.0
elif value < 0.0:
return 0.0
else:
return value
class Color:
def __init__ (self, red, green, blue):
self.red = float (red)
self.green = float (green)
self.blue = float (blue)
def interpolate (self, c, percent):
percent = float (percent)
if percent > 1.0 or percent < 0.0:
raise RuntimeError ('Cannot interpolate color: perecent out of range')
return ((c * percent) + (self * (1.0 - percent)))
def __add__ (self, c):
r = self.red + c.red
g = self.green + c.green
b = self.blue + c.blue
return Color (r, g, b)
def __mul__ (self, scalar):
r = self.red * scalar
g = self.green * scalar
b = self.blue * scalar
return Color (r, g, b)
def __str__ (self):
rgb = 'rgb('
rgb += str(int(self.red))+ ','
rgb += str(int(self.green))+ ','
rgb += str(int(self.blue))+ ')'
return rgb
red = Color (255, 0, 0)
green = Color (0, 255, 0)
blue = Color (0, 0, 255)
black = Color (0, 0, 0)
white = Color (255, 255, 255)
| mit | -7,536,077,556,048,083,000 | 24.04902 | 82 | 0.504892 | false | 3.221942 | false | false | false |
tebriel/dd-agent | emitter.py | 5 | 2388 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from hashlib import md5
import logging
import re
import zlib
# 3p
import requests
import simplejson as json
# project
from config import get_version
from utils.proxy import set_no_proxy_settings
set_no_proxy_settings()
# urllib3 logs a bunch of stuff at the info level
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.WARN)
requests_log.propagate = True
# From http://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
control_chars = ''.join(map(unichr, range(0, 32) + range(127, 160)))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def remove_control_chars(s):
return control_char_re.sub('', s)
def http_emitter(message, log, agentConfig, endpoint):
"Send payload"
url = agentConfig['dd_url']
log.debug('http_emitter: attempting postback to ' + url)
# Post back the data
try:
payload = json.dumps(message)
except UnicodeDecodeError:
message = remove_control_chars(message)
payload = json.dumps(message)
zipped = zlib.compress(payload)
log.debug("payload_size=%d, compressed_size=%d, compression_ratio=%.3f"
% (len(payload), len(zipped), float(len(payload))/float(len(zipped))))
apiKey = message.get('apiKey', None)
if not apiKey:
raise Exception("The http emitter requires an api key")
url = "{0}/intake/{1}?api_key={2}".format(url, endpoint, apiKey)
try:
headers = post_headers(agentConfig, zipped)
r = requests.post(url, data=zipped, timeout=5, headers=headers)
r.raise_for_status()
if r.status_code >= 200 and r.status_code < 205:
log.debug("Payload accepted")
except Exception:
log.exception("Unable to post payload.")
try:
log.error("Received status code: {0}".format(r.status_code))
except Exception:
pass
def post_headers(agentConfig, payload):
return {
'User-Agent': 'Datadog Agent/%s' % agentConfig['version'],
'Content-Type': 'application/json',
'Content-Encoding': 'deflate',
'Accept': 'text/html, */*',
'Content-MD5': md5(payload).hexdigest(),
'DD-Collector-Version': get_version()
}
| bsd-3-clause | -7,868,835,654,060,473,000 | 27.428571 | 106 | 0.659129 | false | 3.553571 | true | false | false |
Teagan42/home-assistant | tests/components/mqtt/test_switch.py | 2 | 18781 | """The tests for the MQTT switch platform."""
import json
from unittest.mock import ANY
from asynctest import patch
import pytest
from homeassistant.components import mqtt, switch
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_coro,
mock_registry,
)
from tests.components.switch import common
@pytest.fixture
def mock_publish(hass):
"""Initialize components."""
yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))
async def test_controlling_state_via_topic(hass, mock_publish):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "0")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_and_optimistic(hass, mock_publish):
"""Test the sending MQTT commands in optimistic mode."""
fake_state = ha.State("switch.test", "on")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"qos": "2",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "switch.test")
mock_publish.async_publish.assert_called_once_with(
"command-topic", "beer on", 2, False
)
mock_publish.async_publish.reset_mock()
state = hass.states.get("switch.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "switch.test")
mock_publish.async_publish.assert_called_once_with(
"command-topic", "beer off", 2, False
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_controlling_state_via_topic_and_json_message(hass, mock_publish):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer on"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer off"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_default_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"availability_topic": "availability_topic",
"payload_on": 1,
"payload_off": 0,
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "online")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability_topic", "offline")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "online")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async def test_custom_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"availability_topic": "availability_topic",
"payload_on": 1,
"payload_off": 0,
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "good")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability_topic", "nogood")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability_topic", "good")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async def test_custom_state_payload(hass, mock_publish):
"""Test the state payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
"state_on": "HIGH",
"state_off": "LOW",
}
},
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "HIGH")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "LOW")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("switch.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("switch.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("switch.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("switch.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one switch per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids()) == 1
async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
"""Test removal of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
async def test_discovery_update_switch(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("switch.milk")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("switch.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("switch.beer")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT switch device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"state_topic": "test-topic",
"command_topic": "command-topic",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("switch.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("switch.beer", new_entity_id="switch.milk")
await hass.async_block_till_done()
state = hass.states.get("switch.beer")
assert state is None
state = hass.states.get("switch.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
| apache-2.0 | 8,798,958,616,628,749,000 | 30.353923 | 81 | 0.578776 | false | 3.724172 | true | false | false |
uudiin/bleachbit | tests/TestUpdate.py | 3 | 4480 | # vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Update
"""
import os
import os.path
import socket
import sys
import types
import unittest
import urllib2
sys.path.append('.')
from bleachbit import Common
from bleachbit.Update import check_updates, update_winapp2, user_agent
class UpdateTestCase(unittest.TestCase):
"""Test case for module Update"""
def test_UpdateCheck(self):
"""Unit tests for class UpdateCheck"""
update_tests = []
wa = '<winapp2 url="http://katana.oooninja.com/bleachbit/winapp2.ini" sha512="ce9e18252f608c8aff28811e372124d29a86404f328d3cd51f1f220578744bb8b15f55549eabfe8f1a80657fc940f6d6deece28e0532b3b0901a4c74110f7ba7"/>'
update_tests.append(
('<updates><stable ver="0.8.4">http://084</stable><beta ver="0.8.5beta">http://085beta</beta>%s</updates>' % wa,
((u'0.8.4', u'http://084'), (u'0.8.5beta', u'http://085beta'))))
update_tests.append(
('<updates><stable ver="0.8.4">http://084</stable>%s</updates>' % wa,
((u'0.8.4', u'http://084'), )))
update_tests.append(
('<updates><beta ver="0.8.5beta">http://085beta</beta>%s</updates>' % wa,
((u'0.8.5beta', u'http://085beta'), )))
update_tests.append(('<updates></updates>', ()))
# fake network
original_open = urllib2.build_opener
xml = ""
class fake_opener:
def add_headers(self):
pass
def read(self):
return xml
def open(self, url):
return self
urllib2.build_opener = fake_opener
for update_test in update_tests:
xml = update_test[0]
updates = check_updates(True, False, None, None)
self.assertEqual(updates, update_test[1])
urllib2.build_opener = original_open
# real network
for update in check_updates(True, False, None, None):
if not update:
continue
ver = update[0]
url = update[1]
self.assert_(isinstance(ver, (type(None), unicode)))
self.assert_(isinstance(url, (type(None), unicode)))
# test failure
Common.update_check_url = "http://localhost/doesnotexist"
self.assertRaises(
urllib2.URLError, check_updates, True, False, None, None)
def test_update_winapp2(self):
from bleachbit.Common import personal_cleaners_dir
fn = os.path.join(personal_cleaners_dir, 'winapp2.ini')
if os.path.exists(fn):
print 'note: deleting %s' % fn
os.unlink(fn)
url = 'http://www.winapp2.com/Winapp2.ini'
def append_text(s):
print s
succeeded = {'r': False} # scope
def on_success():
succeeded['r'] = True
# bad hash
self.assertRaises(RuntimeError, update_winapp2, url, "notahash",
append_text, on_success)
self.assert_(not succeeded['r'])
# blank hash, download file
update_winapp2(url, None, append_text, on_success)
self.assert_(succeeded['r'])
# blank hash, do not download again
update_winapp2(url, None, append_text, on_success)
succeeded['r'] = False
update_winapp2(url, None, append_text, on_success)
self.assert_(not succeeded['r'])
def test_user_agent(self):
"""Unit test for method user_agent()"""
agent = user_agent()
print "debug: user agent = '%s'" % (agent, )
self.assert_(isinstance(agent, str))
def suite():
return unittest.makeSuite(UpdateTestCase)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -910,371,654,899,689,500 | 31.230216 | 218 | 0.603795 | false | 3.666121 | true | false | false |
hippyk/pix2code | model/classes/model/pix2code.py | 2 | 3237 | from __future__ import absolute_import
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
from keras.layers import Input, Dense, Dropout, \
RepeatVector, LSTM, concatenate, \
Conv2D, MaxPooling2D, Flatten
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from keras import *
from .Config import *
from .AModel import *
class pix2code(AModel):
def __init__(self, input_shape, output_size, output_path):
AModel.__init__(self, input_shape, output_size, output_path)
self.name = "pix2code"
image_model = Sequential()
image_model.add(Conv2D(32, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
image_model.add(Conv2D(32, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Conv2D(64, (3, 3), padding='valid', activation='relu'))
image_model.add(Conv2D(64, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Conv2D(128, (3, 3), padding='valid', activation='relu'))
image_model.add(Conv2D(128, (3, 3), padding='valid', activation='relu'))
image_model.add(MaxPooling2D(pool_size=(2, 2)))
image_model.add(Dropout(0.25))
image_model.add(Flatten())
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(RepeatVector(CONTEXT_LENGTH))
visual_input = Input(shape=input_shape)
encoded_image = image_model(visual_input)
language_model = Sequential()
language_model.add(LSTM(128, return_sequences=True, input_shape=(CONTEXT_LENGTH, output_size)))
language_model.add(LSTM(128, return_sequences=True))
textual_input = Input(shape=(CONTEXT_LENGTH, output_size))
encoded_text = language_model(textual_input)
decoder = concatenate([encoded_image, encoded_text])
decoder = LSTM(512, return_sequences=True)(decoder)
decoder = LSTM(512, return_sequences=False)(decoder)
decoder = Dense(output_size, activation='softmax')(decoder)
self.model = Model(inputs=[visual_input, textual_input], outputs=decoder)
optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def fit(self, images, partial_captions, next_words):
self.model.fit([images, partial_captions], next_words, shuffle=False, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)
self.save()
def fit_generator(self, generator, steps_per_epoch):
self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1)
self.save()
def predict(self, image, partial_caption):
return self.model.predict([image, partial_caption], verbose=0)[0]
def predict_batch(self, images, partial_captions):
return self.model.predict([images, partial_captions], verbose=1)
| apache-2.0 | 9,004,505,941,516,108,000 | 41.592105 | 126 | 0.656472 | false | 3.458333 | false | false | false |
damoxc/vsmtpd | plugins/queue/smtp_forward.py | 1 | 2180 | #
# queue/smtp_foward.py
#
# Copyright (C) 2011 Damien Churchill <damoxc@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
#
import smtplib
import logging
from vsmtpd.error import DenyError
from vsmtpd.hooks import hook
from vsmtpd.plugins.plugin import PluginBase
log = logging.getLogger(__name__)
class Plugin(PluginBase):
def __init__(self, config):
self.smtp_server = config.get('smtp_server')
self.smtp_port = config.getint('smtp_port') or 25
@hook
def queue(self, transaction):
log.info('forwarding to %s:%d', self.smtp_server, self.smtp_port)
smtp = smtplib.SMTP(self.smtp_server, self.smtp_port)
code, msg = smtp.mail(str(transaction.sender or ''))
if code != 250:
raise DenyError(msg)
for rcpt in transaction.recipients:
code, msg = smtp.rcpt(str(rcpt))
if code != 250:
raise DenyError(msg)
code, msg = smtp.docmd('data')
if code != 354:
raise smtplib.SMTPDataError(code, msg)
msg = transaction.body
header = smtplib.quotedata(msg.headers.as_string())
smtp.send(header)
msg.seek(msg.body_start)
for line in msg:
smtp.send(smtplib.quotedata(line))
smtp.send(smtplib.CRLF + '.' + smtplib.CRLF)
code, msg = smtp.getreply()
if code != 250:
raise DenyError(msg)
code, msg = smtp.quit()
log.info('finished queueing')
return True
| gpl-3.0 | -5,603,379,824,121,702,000 | 28.459459 | 73 | 0.647706 | false | 3.720137 | false | false | false |
iotile/coretools | transport_plugins/jlink/setup.py | 1 | 1591 | """Setup file for iotile-transport-jlink package."""
from setuptools import setup, find_packages
import version
setup(
name="iotile-transport-jlink",
packages=find_packages(exclude=("test",)),
version=version.version,
license="LGPLv3",
install_requires=[
"iotile-core>=5.2",
"pylink-square>=0.10",
"pylibftdi>=0.19"
],
python_requires=">=3.7,<4",
include_package_data=True,
entry_points={'iotile.device_adapter': ['jlink = iotile_transport_jlink.jlink:JLinkAdapter']},
description="IOTile JLINK Transport Plugin",
author="Arch",
author_email="info@arch-iot.com",
url="http://github.com/iotile/coretools",
keywords=["iotile", "arch", "embedded", "hardware", "firmware"],
classifiers=[
"Programming Language :: Python",
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules"
],
long_description="""\
IOTile JLink Transport Plugin
-------------------------------
A python plugin into IOTile Coretools that allows for using a JLink adapter to
send RPCs over an IOTile module's SWD interface. The IOTile device needs to be
compiled with support for the SWD RPC interface for this to work.
"""
)
| gpl-3.0 | 6,060,860,339,663,529,000 | 36 | 98 | 0.639849 | false | 3.918719 | false | false | false |
djohsson/Cryptchat | cryptchat/test/test_aes.py | 1 | 1991 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Run from Cryptchat
# python3 -m unittest discover
import unittest
from ..crypto.aes import AESCipher
class testAESCipher(unittest.TestCase):
def test_encrypt_decrypt(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "[TOP SECRET] I like k-pop"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_long(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_unicode(self):
key = "TTTcPolAhIqZZJY0IOH7Orecb/EEaUx8/u/pQlCgma8="
cipher = AESCipher(key)
m = "『秘密』K-popは好きです"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_128(self):
key = "Ya/C/EvmwW1xWhjM1BgZ/g=="
cipher = AESCipher(key)
m = "Private stuff"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def test_encrypt_decrypt_unicode_128(self):
key = "Ya/C/EvmwW1xWhjM1BgZ/g=="
cipher = AESCipher(key)
m = "『秘密』K-popは好きです"
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
self.assertEqual(m, m2)
def main():
unittest.main()
if __name__ == "__main__":
main()
| mit | -3,590,336,263,947,979,000 | 33.910714 | 459 | 0.643478 | false | 2.734266 | true | false | false |
nop33/indico | indico/modules/events/contributions/models/fields.py | 2 | 5336 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii, text_to_repr
def _get_next_position(context):
"""Get the next contribution field position for the event."""
event_id = context.current_parameters['event_id']
res = db.session.query(db.func.max(ContributionField.position)).filter_by(event_id=event_id).one()
return (res[0] or 0) + 1
class ContributionField(db.Model):
__tablename__ = 'contribution_fields'
__table_args__ = (db.UniqueConstraint('event_id', 'legacy_id'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
legacy_id = db.Column(
db.String,
nullable=True
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
title = db.Column(
db.String,
nullable=False
)
description = db.Column(
db.Text,
nullable=False,
default=''
)
is_required = db.Column(
db.Boolean,
nullable=False,
default=False
)
is_active = db.Column(
db.Boolean,
nullable=False,
default=True
)
field_type = db.Column(
db.String,
nullable=True
)
field_data = db.Column(
JSON,
nullable=False,
default={}
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'contribution_fields',
order_by=position,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
# relationship backrefs:
# - abstract_values (AbstractFieldValue.contribution_field)
# - contribution_values (ContributionFieldValue.contribution_field)
def _get_field(self, management=False):
from indico.modules.events.contributions import get_contrib_field_types
try:
impl = get_contrib_field_types()[self.field_type]
except KeyError:
return None
return impl(self, management=management)
@property
def field(self):
return self._get_field()
@property
def mgmt_field(self):
return self._get_field(management=True)
@property
def filter_choices(self):
return {x['id']: x['option'] for x in self.field_data.get('options', {})}
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'field_type', is_required=False, is_active=True, _text=self.title)
@locator_property
def locator(self):
return dict(self.event.locator, contrib_field_id=self.id)
class ContributionFieldValueBase(db.Model):
__abstract__ = True
#: The name of the backref on the `ContributionField`
contribution_field_backref_name = None
data = db.Column(
JSON,
nullable=False
)
@declared_attr
def contribution_field_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.contribution_fields.id', name='fk_{}_contribution_field'.format(cls.__tablename__)),
primary_key=True,
index=True
)
@declared_attr
def contribution_field(cls):
return db.relationship(
'ContributionField',
lazy=False,
backref=db.backref(
cls.contribution_field_backref_name,
cascade='all, delete-orphan',
lazy=True
)
)
@property
def friendly_data(self):
return self.contribution_field.field.get_friendly_value(self.data)
class ContributionFieldValue(ContributionFieldValueBase):
__tablename__ = 'contribution_field_values'
__table_args__ = {'schema': 'events'}
contribution_field_backref_name = 'contribution_values'
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False,
primary_key=True
)
# relationship backrefs:
# - contribution (Contribution.field_values)
@return_ascii
def __repr__(self):
text = text_to_repr(self.data) if isinstance(self.data, unicode) else self.data
return format_repr(self, 'contribution_id', 'contribution_field_id', _text=text)
| gpl-3.0 | -5,259,591,984,329,933,000 | 27.688172 | 118 | 0.625 | false | 3.889213 | false | false | false |
HIPS/optofit | optofit/test/geweke_test.py | 1 | 6320 | __author__ = 'scott'
import numpy as np
# Set the random seed for reproducibility
seed = np.random.randint(2**16)
print "Seed: ", seed
np.random.seed(seed)
import matplotlib.pyplot as plt
from optofit.models.model import Model
from optofit.population.population import Population
from optofit.neuron.neuron import Neuron
from optofit.neuron.compartment import Compartment, CalciumCompartment
from optofit.neuron.channels import LeakChannel, NaChannel, KdrChannel, Ca3KdrChannel, Ca3KaChannel, Ca3NaChannel, Ca3CaChannel, Ca3KahpChannel, Ca3KcChannel
from optofit.simulation.stimulus import PeriodicStepStimulusPattern, DirectCompartmentCurrentInjection
from optofit.simulation.simulate import simulate
from optofit.observation.observable import NewDirectCompartmentVoltage, IndependentObservations, LinearFluorescence
from optofit.plotting.plotting import plot_latent_compartment_state, plot_latent_compartment_V_and_I
from optofit.inference.fitting import fit_mcmc
from optofit.models.hyperparameters import hypers
def make_model():
"""Make a model of a single compartment neuron with a handful of channels and a directly
observable voltage
"""
model = Model()
# The population object doesn't do anything yet, but eventually it could support
# synapses between neurons
population = Population('population', model)
# Explicitly build the neuron
neuron = Neuron('neuron', population)
# The single compartment corresponds to the cell body
body = Compartment('body', neuron)
# body = CalciumCompartment('body', neuron)
# Add a few channels
# body.add_channel(LeakChannel('leak', body))
# body.add_channel(NaChannel('na', body))
body.add_channel(KdrChannel('kdr', body))
# ca3kdr = Ca3KdrChannel('ca3kdr', body)
# ca3ka = Ca3KaChannel('ca3ka', body)
# ca3na = Ca3NaChannel('ca3na', body)
# ca3ca = Ca3CaChannel('ca3ca', body)
# ca3kahp = Ca3KahpChannel('ca3kahp', body)
# ca3kc = Ca3KcChannel('ca3kc', body)
#
#body.add_channel(ca3kdr)
#body.add_channel(ca3ka)
#body.add_channel(ca3na)
#body.add_channel(ca3ca)
#body.add_channel(ca3kahp)
#body.add_channel(ca3kc)
# Now connect all the pieces of the neuron together
neuron.add_compartment(body, None)
population.add_neuron(neuron)
model.add_population(population)
# Create the observation model
observation = IndependentObservations('observations', model)
body_voltage = NewDirectCompartmentVoltage('body voltage', model, body)
# body_fluorescence = LinearFluorescence('body fluorescence' , model, body)
# observation.add_observation(body_fluorescence)
observation.add_observation(body_voltage)
model.add_observation(observation)
return model
# Instantiate the true model
true_model = make_model()
# Create a stimulus for the neuron
# Stimulate the neuron by injecting a current pattern
stim_on = 2.0
stim_off = 50.0
stim_on_dur = .5
stim_off_dur = 1.5
stim_I = 500.0
stim_pattern = PeriodicStepStimulusPattern(stim_on, stim_off, stim_on_dur, stim_off_dur, stim_I)
stim = DirectCompartmentCurrentInjection(true_model.population.neurons[0].compartments[0], stim_pattern)
# Set the recording duration
t_start = 0
t_stop = 0.2
dt = 0.1
t = np.arange(t_start, t_stop, dt)
# Simulate the model to create synthetic data
data_sequence = simulate(true_model, t, stim)
true_model.add_data_sequence(data_sequence)
# Plot the true and observed voltage
plt.ion()
fig = plt.figure(figsize=(8,6))
# axs = plot_latent_compartment_state(t, z, true_model.population.neurons[0].compartments[0])
axs = plot_latent_compartment_V_and_I(t, data_sequence,
true_model.population.neurons[0].compartments[0],
true_model.observation.observations[0],)
i = {'i' : 0}
# Add a callback to update the plots
def plot_sample(m):
plt.gcf().clf()
# latent = m.data_sequences[0].latent
# plot_latent_compartment_state(t, m.data_sequences[0].latent,
# m.data_sequences[0].states,
# m.population.neurons[0].compartments[0])
axs = plot_latent_compartment_V_and_I(t, m.data_sequences[0],
m.population.neurons[0].compartments[0],
m.observation.observations[0])
print '%d: g_leak: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
print '%d: g_na: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[1].g.value)
print '%d: g_kdr: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[2].g.value)
fig.suptitle('Iteration: %d' % i['i'])
i['i'] += 1
plt.pause(0.001)
def print_g_leak(m):
if np.mod(i['i'], 1) == 0:
# print '%d: g_leak: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
# print '%d: g_na: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[1].g.value)
print '%d: g_kdr: %f' % (i['i'], m.population.neurons[0].compartments[0].channels[0].g.value)
i['i'] += 1
# Generic fitting code will enumerate the components of the model and determine
# which MCMC updates to use.
raw_input("Press enter to begin MCMC")
print "Running particle MCMC"
# samples = fit_mcmc(true_model, N_samples=1000, callback=plot_sample, geweke=True)
samples = fit_mcmc(true_model, N_samples=10000, callback=print_g_leak, print_interval=10, geweke=True)
# Plot the results
import scipy.stats
def plot_channel(samples, index, name, a, b, xlim=None):
gs = np.array([m.population.neurons[0].compartments[0].channels[index].g.value for m in samples])
plt.figure()
_,bins,_ = plt.hist(gs, 50, normed=True, alpha=0.5)
if xlim is None:
plt.plot(bins, scipy.stats.gamma.pdf(bins, a, scale=b))
else:
xx = np.linspace(xlim[0], xlim[1])
plt.plot(xx, scipy.stats.gamma.pdf(xx, a, scale=1.0/b))
plt.title('$g_{%s}' % name)
# plot_channel(samples, 0, 'leak', hypers['a_g_leak'].value, hypers['b_g_leak'].value, (1e-4,3))
# plot_channel(samples, 1, 'na', hypers['a_g_na'].value, hypers['b_g_na'].value, (1,30))
plot_channel(samples, 0, 'kdr', hypers['a_g_kdr'].value, hypers['b_g_kdr'].value, (1,14))
plt.ioff()
plt.show()
| gpl-2.0 | -4,441,891,441,626,008,600 | 38.254658 | 157 | 0.68038 | false | 3.013829 | false | false | false |
nicolashainaux/mathmaker | mathmaker/lib/old_style_sheet/exercise/question/Q_RightTriangle.py | 1 | 23978 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import random
from decimal import Decimal
from string import ascii_uppercase as alphabet
from mathmakerlib.calculus.unit import LENGTH_UNITS
from mathmaker.lib import shared
from mathmaker.lib.constants import pythagorean
from mathmaker.lib.constants.numeration import (PRECISION, HUNDREDTH, TENTH,
UNIT, THOUSANDTH,
TEN_THOUSANDTH)
from .Q_Structure import Q_Structure
from mathmaker.lib.core.base_calculus import Item, Sum
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.calculus import Equation, Equality
from mathmaker.lib.core.geometry import RightTriangle
AVAILABLE_Q_KIND_VALUES = {'pythagorean_theorem': ['calculate_hypotenuse',
'calculate_one_leg'],
'converse_of_pythagorean_theorem': ['default'],
'contrapositive_of_pythagorean_theorem':
['default'],
'cosinus': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle'],
'sinus': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle'],
'tangente': ['calculate_hypotenuse',
'calculate_one_leg',
'calculate_angle']}
# ------------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
##
# @class Q_RightTriangle
# @brief All questions about the right triangle
class Q_RightTriangle(Q_Structure):
# --------------------------------------------------------------------------
##
# @brief Constructor.
# @options
# @return One instance of question.Q_RightTriangle
def __init__(self, q_kind='default_nothing', **options):
self.derived = True
# The call to the mother class __init__() method will set the
# fields matching optional arguments which are so far:
# self.q_kind, self.q_subkind
# plus self.options (modified)
Q_Structure.__init__(self,
q_kind, AVAILABLE_Q_KIND_VALUES,
**options)
# The purpose of this next line is to get the possibly modified
# value of **options
options = self.options
# Set the default values of the different options
use_pythagorean_triples = False
if (('use_pythagorean_triples' in options
and options['use_pythagorean_triples'])
or (self.q_kind == 'converse_of_pythagorean_theorem')):
# __
use_pythagorean_triples = True
use_decimals = True
if 'use_decimals' in options and not options['use_decimals']:
use_decimals = False
self.round_to = ""
if 'round_to' in options and options['round_to'] in PRECISION:
self.round_to = options['round_to']
if not use_pythagorean_triples:
if self.round_to == "":
if use_decimals:
self.round_to = HUNDREDTH
else:
self.round_to = TENTH
self.use_pythagorean_triples = use_pythagorean_triples
self.figure_in_the_text = True
if ('figure_in_the_text' in options
and not options['figure_in_the_text']):
# __
self.figure_in_the_text = False
rotation_option = 'no'
if 'rotate_around_barycenter' in options:
rotation_option = options['rotate_around_barycenter']
self.final_unit = ""
if ('final_unit' in options
and options['final_unit'] in LENGTH_UNITS):
# __
self.final_unit = options['final_unit']
sides_units = [self.final_unit,
self.final_unit,
self.final_unit]
# Later, allow to use a different length unit for the sides
# than the final expected unit ; allow different units for different
# sides (for instance giving a list in option 'sides_units')...
# So far we will do with only ONE unit
# if 'sides_units' in options \
# and options['sides_units'] in LENGTH_UNITS:
# # __
# sides_units = options['sides_units']
self.right_triangle = None
self.unknown_side = None
self.known_sides = []
# Now set some randomly values
letters = [elt for elt in alphabet]
random.shuffle(letters)
vertices_names = (letters.pop(), letters.pop(), letters.pop())
# Here you can begin to write code for the different
# q_kinds & q_subkinds
if self.q_kind == 'pythagorean_theorem':
sides_values = [None, None, None]
if use_pythagorean_triples:
triples = pythagorean.ALL_TRIPLES_5_100
if use_decimals:
triples = pythagorean.ALL_TRIPLES_5_100 \
+ pythagorean.TRIPLES_101_200_WO_TEN_MULTIPLES
sides_values = random.choice(triples)
if use_decimals:
sides_values = \
[Decimal(str(Decimal(sides_values[0]) / 10)),
Decimal(str(Decimal(sides_values[1]) / 10)),
Decimal(str(Decimal(sides_values[2]) / 10))]
if self.q_subkind == 'calculate_hypotenuse':
sides_values[2] = ""
sides_units[2] = ""
else:
# case: self.q_subkind == 'calculate_one_leg'
leg0_or_1 = random.choice([0, 1])
sides_values[leg0_or_1] = ""
sides_units[leg0_or_1] = ""
else:
# NO pythagorean triples.
# The two generated values must NOT match any pythagorean
# triple
if use_decimals:
min_side_value = 5
max_side_value = 200
else:
min_side_value = 5
max_side_value = 100
if self.q_subkind == 'calculate_hypotenuse':
first_leg = random.randint(min_side_value, max_side_value)
# we will take the leg values between
# at least 25% and at most 150% of the length of first leg
# (and smaller than max_side_value)
second_leg_values = []
for i in range(int(first_leg * 1.5)):
if (i + int(first_leg * 0.25) <= 1.5 * first_leg
and i + int(first_leg * 0.25) <= max_side_value):
# __
second_leg_values += [i + int(first_leg * 0.25)]
second_leg_unauthorized_values = \
pythagorean.get_legs_matching_given_leg(first_leg)
second_leg_possible_values = \
list(set(second_leg_values)
- set(second_leg_unauthorized_values))
if random.choice([True, False]):
sides_values = \
[first_leg,
random.choice(second_leg_possible_values),
""]
sides_units[2] = ""
else:
sides_values = \
[random.choice(second_leg_possible_values),
first_leg,
""]
sides_units[2] = ""
else:
# case: self.q_subkind == 'calculate_one_leg'
hypotenuse = random.randint(min_side_value, max_side_value)
# we will take the leg values between
# at least 25% and at most 90% of the length of hypotenuse
# to avoid "weird" cases (with a very subtle difference
# between the given values and the one to calculate)
leg_values = []
for i in range(int(hypotenuse * 0.9)):
if i + int(hypotenuse * 0.25) <= 0.9 * hypotenuse:
leg_values += [i + int(hypotenuse * 0.25)]
leg_unauthorized_values = \
pythagorean\
.get_legs_matching_given_hypotenuse(hypotenuse)
leg_possible_values = list(set(leg_values)
- set(leg_unauthorized_values))
if random.choice([True, False]):
sides_values = ["",
random.choice(leg_possible_values),
hypotenuse]
sides_units[0] = ""
else:
sides_values = [random.choice(leg_possible_values),
"",
hypotenuse]
sides_units[1] = ""
self.right_triangle = \
RightTriangle((vertices_names,
'sketch'),
rotate_around_isobarycenter=rotation_option)
self.right_triangle.leg[0].label = Value(sides_values[0],
unit=sides_units[0])
self.right_triangle.leg[1].label = Value(sides_values[1],
unit=sides_units[1])
self.right_triangle.hypotenuse.label = Value(sides_values[2],
unit=sides_units[2])
for side in self.right_triangle.side:
if side.label.raw_value == "":
self.unknown_side = side.clone()
else:
self.known_sides += [side.clone()]
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
sides_values = [None, None, None]
triples = list(pythagorean.ALL_TRIPLES_5_100)
if use_decimals:
triples += list(pythagorean.TRIPLES_101_200_WO_TEN_MULTIPLES)
sides_values = random.choice(triples)
if self.q_kind == 'contrapositive_of_pythagorean_theorem':
# We'll change exactly one value to be sure the triplet
# is NOT pythagorean
if random.choice([True, False]):
# We will decrease the lowest value
max_delta = int(0.1 * sides_values[0])
min_delta = 1
if min_delta > max_delta:
max_delta = min_delta
chosen_delta = random.choice(
[i + min_delta
for i in range(max_delta - min_delta + 1)])
sides_values = [sides_values[0] - chosen_delta,
sides_values[1],
sides_values[2]]
else:
# We will increase the highest value
max_delta = int(0.1 * sides_values[2])
min_delta = 1
if min_delta > max_delta:
max_delta = min_delta
chosen_delta = random.choice(
[i + min_delta
for i in range(max_delta - min_delta + 1)])
sides_values = [sides_values[0],
sides_values[1],
sides_values[2] + chosen_delta]
if use_decimals:
sides_values = [Decimal(str(Decimal(sides_values[0]) / 10)),
Decimal(str(Decimal(sides_values[1]) / 10)),
Decimal(str(Decimal(sides_values[2]) / 10))]
self.right_triangle = \
RightTriangle((vertices_names,
'sketch'),
rotate_around_isobarycenter=rotation_option)
self.right_triangle.leg[0].label = Value(sides_values[0],
unit=sides_units[0])
self.right_triangle.leg[1].label = Value(sides_values[1],
unit=sides_units[1])
self.right_triangle.hypotenuse.label = Value(sides_values[2],
unit=sides_units[2])
self.right_triangle.right_angle.mark = ""
# --------------------------------------------------------------------------
##
# @brief Returns the text of the question as a str
def text_to_str(self):
PRECISION_IDIOMS = {UNIT: _("to the unit"),
TENTH: _("to the tenth"),
HUNDREDTH: _("to the hundreth"),
THOUSANDTH: _("to the thousandth"),
TEN_THOUSANDTH: _("to the ten thousandth")}
M = shared.machine
result = self.displayable_number
if self.q_kind == 'pythagorean_theorem':
if self.figure_in_the_text:
result += M.insert_picture(self.right_triangle)
else:
result += _("The triangle {triangle_name} has a right \
angle in {right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(self.right_triangle.vertex[1]
.name))
result += " " + str(self.known_sides[0].length_name) \
+ " = " \
+ self.known_sides[0].label.into_str(display_unit=True)\
+ ". " \
+ str(self.known_sides[1].length_name) \
+ " = " \
+ self.known_sides[1].label.into_str(display_unit=True)\
+ ". " + M.write_new_line()
result += _("Calculate the length of {this_side}.")\
.format(this_side=self.unknown_side.name)
if self.final_unit != "":
result += " " + _("Give the result in {this_unit}.")\
.format(this_unit=self.final_unit)
if self.round_to != "":
result += " " + _("Round the result {at_this_precision}.")\
.format(at_this_precision=PRECISION_IDIOMS[self.round_to])
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
if self.figure_in_the_text:
result += M.insert_picture(self.right_triangle)
else:
sides_copy = [self.right_triangle.side[0].clone(),
self.right_triangle.side[1].clone(),
self.right_triangle.side[2].clone()]
random.shuffle(sides_copy)
side0, side1, side2 = sides_copy
result += _("{triangle_name} is a triangle such as "
"{side_length0} = {nb0}, {side_length1} = {nb1} "
"and {side_length2} = {nb2}")\
.format(triangle_name=str(self.right_triangle.name),
side_length0=str(side0.length_name),
nb0=side0.label.into_str(display_unit=True),
side_length1=str(side1.length_name),
nb1=side1.label.into_str(display_unit=True),
side_length2=str(side2.length_name),
nb2=side2.label.into_str(display_unit=True))
result += _("Is it a right triangle ? Prove your answer and if "
"the triangle is right, give the name of the right "
"angle.")
result += M.write_new_line()
return result + M.write_new_line()
# --------------------------------------------------------------------------
##
# @brief Returns the answer of the question as a str
def answer_to_str(self):
M = shared.machine
if self.q_kind == 'pythagorean_theorem':
# Resolution (and the part with the figure will be dealed later)
result = _("The triangle {triangle_name} has a right angle in "
"{right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(self.right_triangle.vertex[1].name))
result += M.write_new_line()
result += _("Then by Pythagoras theorem") + ":"
pyth_eq = self.right_triangle.pythagorean_substequality()
result += M.write_math_style1(pyth_eq.into_str())
if self.use_pythagorean_triples:
result += M.write(Equation(pyth_eq.substitute())
.auto_resolution(
dont_display_equations_name=True,
pythagorean_mode=True,
unit=self.final_unit,
underline_result=True))
else:
result += M.write(Equation(pyth_eq.substitute())
.auto_resolution(
dont_display_equations_name=True,
decimal_result=self.round_to,
pythagorean_mode=True,
unit=self.final_unit,
underline_result=True))
if self.figure_in_the_text:
return self.displayable_number + result
else:
content = [self.displayable_number
+ _("Sketch") + ":"
+ M.write_new_line()
+ M.insert_picture(self.right_triangle),
result]
return M.write_layout((1, 2), [9, 9], content)
elif self.q_kind in ['converse_of_pythagorean_theorem',
'contrapositive_of_pythagorean_theorem']:
# __
hyp_equality = Equality([Item(('+',
self.right_triangle.
hypotenuse.length_name,
2)),
Item(('+',
self.right_triangle.
hypotenuse.label.raw_value,
2))])
hyp_equality_step2 = Equality([Item(('+',
self.right_triangle.
hypotenuse.length_name,
2)),
Item(Item(('+',
self.right_triangle.
hypotenuse.label.raw_value,
2)).evaluate())])
legs_equality = Equality([
Sum([Item(('+',
self.right_triangle.leg[0].length_name,
2)),
Item(('+',
self.right_triangle.leg[1].length_name,
2))]),
Sum([Item(('+',
self.right_triangle.leg[0].label.raw_value,
2)),
Item(('+',
self.right_triangle.leg[1].label.raw_value,
2))])])
legs_equality_step2 = Equality([
Sum([Item(('+',
self.right_triangle.leg[0].length_name,
2)),
Item(('+',
self.right_triangle.leg[1].length_name,
2))]),
Item(Sum([Item(('+',
self.right_triangle.leg[0].label.raw_value,
2)),
Item(('+',
self.right_triangle.leg[1].label.raw_value,
2))]).evaluate())])
result = _("On one hand:") + M.write_new_line()
result += M.write_math_style1(hyp_equality.into_str())
result += M.write_math_style1(hyp_equality_step2.into_str())
result += _("On the other hand:") + M.write_new_line()
result += M.write_math_style1(legs_equality.into_str())
result += M.write_math_style1(legs_equality_step2.into_str())
result += _("Hence:")
if self.q_kind == 'converse_of_pythagorean_theorem':
result += M.write_math_style1(
self.right_triangle.pythagorean_equality().into_str())
result += _("So, by the converse of the pythagorean theorem,")
# result += M.write_new_line()
result += " "
result += _("{triangle_name} has a right angle "
"in {right_vertex}.")\
.format(triangle_name=str(self.right_triangle.name),
right_vertex=str(
self.right_triangle.vertex[1].name))
elif self.q_kind == 'contrapositive_of_pythagorean_theorem':
result += M.write_math_style1(
self.right_triangle.pythagorean_equality(
equal_signs=['neq']).into_str())
result += _("So, by the contrapositive of the pythagorean "
"theorem,")
result += " "
result += _("{triangle_name} has no right angle.")\
.format(triangle_name=str(self.right_triangle.name))
if self.figure_in_the_text:
return self.displayable_number + result
else:
content = [self.displayable_number
+ _("Sketch") + ":"
+ M.write_new_line()
+ M.insert_picture(self.right_triangle),
result]
return M.write_layout((1, 2), [6, 12], content)
| gpl-3.0 | -7,726,776,038,957,097,000 | 42.755474 | 80 | 0.449662 | false | 4.570721 | false | false | false |
MOLSSI-BSE/basis_set_exchange | basis_set_exchange/writers/dalton.py | 1 | 2999 | '''
Conversion of basis sets to Dalton format
'''
from .. import lut, manip, sort, misc, printing
def write_dalton(basis):
'''Converts a basis set to Dalton format
'''
s = '! Basis = {}\n\n'.format(basis['name'])
basis = manip.make_general(basis, False, True)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if electron_elements:
for z in electron_elements:
data = basis['elements'][z]
#sym = lut.element_sym_from_Z(z, True)
elname = lut.element_name_from_Z(z).upper()
cont_string = misc.contraction_string(data)
s += 'a {}\n'.format(z)
s += '! {} {}\n'.format(elname, cont_string)
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
ngen = len(coefficients)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += '! {} functions\n'.format(amchar)
# Is this a bug in the original EMSL?
#s += '{} {} 1.00\n'.format(sym, r, nprim)
s += '{} {} {}\n'.format('H', nprim, ngen)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=False)
# Write out ECP
if ecp_elements:
s += '\n\nECP\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, normalize=True)
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} nelec {}\n'.format(sym, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).upper()
if am[0] == max_ecp_am:
s += '{} ul\n'.format(sym)
else:
s += '{} {}\n'.format(sym, amchar)
point_places = [0, 9, 32]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places, convert_exp=False)
s += 'END\n'
return s
| bsd-3-clause | 6,519,890,812,113,413,000 | 35.13253 | 116 | 0.520173 | false | 3.587321 | false | false | false |
schocco/mds-web | apps/muni_scales/api.py | 1 | 5813 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from tastypie import fields
from tastypie.authentication import MultiAuthentication
from tastypie.authorization import Authorization
from tastypie.bundle import Bundle
from tastypie.exceptions import NotFound
from tastypie.resources import Resource, ModelResource
from tastypie.validation import CleanedDataFormValidation
from apps.mds_auth.authorization import ReadAllSessionAuthentication, ReadAllTokenAuthentication
from apps.muni_scales.api_authorization import UXXAuthorization
from apps.muni_scales.fields import MscaleFieldMixin
from apps.muni_scales.forms import UDHscaleForm, UXCscaleForm
from apps.muni_scales.models import UDHscale, UXCscale
from apps.muni_scales.mscale import Mscale, MSCALES
class MscaleField(fields.ApiField, MscaleFieldMixin):
'''
A field that accepts an Mscale Resource but stores the integer value in the db.
'''
dehydrated_type = 'apps.muni_scales.mscale.Mscale'
help_text = 'an mscale object'
def convert(self, value):
if value is None:
return None
return self.to_mscale(value)
def hydrate(self, bundle):
'''
Prepare data before saving to the model.
'''
#check if value present
if bundle.data.has_key(self.instance_name):
value = bundle.data[self.instance_name]
mscale = self.to_mscale(value)
return mscale.number
else:
return None
def dehydrate(self, bundle, **kwargs):
'''
Prepare data for serialization before sending to the client.
'''
return self.convert(bundle.obj.__getattribute__(self.instance_name))
class MscaleResource(Resource):
'''
A read-only Mscale resource.
'''
id = fields.DecimalField(attribute='number')
underground = fields.CharField(attribute='underground')
slope = fields.CharField(attribute='slope')
obstacles = fields.ListField(attribute='obstacles')
characteristics = fields.ListField(attribute='characteristics')
class Meta:
resource_name = 'mscales'
object_class = Mscale
authorization = Authorization()
allowed_methods = ['get']
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>[0-9]+)/$" %
self._meta.resource_name,
self.wrap_view('dispatch_detail'),
name="api_dispatch_detail"),
]
def apply_sorting(self, obj_list, options=None):
"""
sorts by number (always ascending)
"""
return sorted(obj_list, key=lambda m: m.number)
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.number
else:
kwargs['pk'] = bundle_or_obj.number
return kwargs
def get_object_list(self, request):
return MSCALES.values()
def obj_get_list(self, request=None, **kwargs):
# TODO: proper filtering
return self.get_object_list(request)
def obj_get(self, request=None, **kwargs):
try:
pk = float(kwargs['pk'])
return MSCALES[pk]
except KeyError:
raise NotFound("Invalid lookup ID provided.")
except ValueError:
raise NotFound()
class ScaleCalcMixin(object):
'''
Adds endpoint for score calculation.
'''
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/calculate/$" %
self._meta.resource_name, self.wrap_view('get_score'), name="api_calc_score"),
]
def get_score(self, request, **kwargs):
'''
Return the score for the calculation
'''
scale = self.__class__()
bundle = scale.build_bundle(data=request.POST, request=request)
scale_obj = scale.full_hydrate(bundle).obj
errors = scale_obj.full_clean()
if errors:
return self.create_response(request, errors)
score = scale_obj.get_score()
return self.create_response(request, score)
class UDHResource(ScaleCalcMixin, ModelResource):
'''
UDH rating
'''
max_difficulty = MscaleField(attribute="max_difficulty")#fields.ToOneField(MscaleResource, attribute="max_difficulty")
avg_difficulty = MscaleField(attribute="avg_difficulty")#fields.ToOneField(MscaleResource, attribute="avg_difficulty")
score = fields.DictField(attribute='get_score', readonly=True, use_in="detail")
trail = fields.ToOneField("apps.trails.api.TrailResource", "trail", related_name="udhscale", blank=True)
class Meta:
queryset = UDHscale.objects.all()
resource_name = 'udh-scale'
validation = CleanedDataFormValidation(form_class = UDHscaleForm)
always_return_data = True
authentication = MultiAuthentication(ReadAllSessionAuthentication(), ReadAllTokenAuthentication())
authorization = UXXAuthorization()
class UXCResource(ScaleCalcMixin, ModelResource):
'''
UXC Rating
'''
max_difficulty = MscaleField(attribute="max_difficulty")
avg_difficulty = MscaleField(attribute="avg_difficulty")
score = fields.DictField(attribute='get_score', readonly=True, use_in="detail")
trail = fields.ToOneField("apps.trails.api.TrailResource", "trail", related_name="uxcscale", blank=True)
class Meta:
queryset = UXCscale.objects.all()
resource_name = 'uxc-scale'
always_return_data = True
validation = CleanedDataFormValidation(form_class = UXCscaleForm)
authentication = MultiAuthentication(ReadAllSessionAuthentication(), ReadAllTokenAuthentication())
authorization = UXXAuthorization()
| mit | 3,969,057,971,859,929,600 | 34.018072 | 122 | 0.653707 | false | 4.059358 | false | false | false |
s20121035/rk3288_android5.1_repo | external/lldb/utils/misc/grep-svn-log.py | 2 | 2461 | #!/usr/bin/env python
"""
Greps and returns the first svn log entry containing a line matching the regular
expression pattern passed as the only arg.
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h$'
"""
import fileinput, re, sys, StringIO
# Separator string for "svn log -v" output.
separator = '-' * 72
usage = """Usage: grep-svn-log.py line-pattern
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h'"""
class Log(StringIO.StringIO):
"""Simple facade to keep track of the log content."""
def __init__(self):
self.reset()
def add_line(self, a_line):
"""Add a line to the content, if there is a previous line, commit it."""
global separator
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = a_line
self.separator_added = (a_line == separator)
def del_line(self):
"""Forget about the previous line, do not commit it."""
self.prev_line = None
def reset(self):
"""Forget about the previous lines entered."""
StringIO.StringIO.__init__(self)
self.prev_line = None
def finish(self):
"""Call this when you're finished with populating content."""
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = None
def grep(regexp):
# The log content to be written out once a match is found.
log = Log()
LOOKING_FOR_MATCH = 0
FOUND_LINE_MATCH = 1
state = LOOKING_FOR_MATCH
while 1:
line = sys.stdin.readline()
if not line:
return
line = line.splitlines()[0]
if state == FOUND_LINE_MATCH:
# At this state, we keep on accumulating lines until the separator
# is encountered. At which point, we can return the log content.
if line == separator:
log.finish()
print log.getvalue()
return
log.add_line(line)
elif state == LOOKING_FOR_MATCH:
if line == separator:
log.reset()
log.add_line(line)
# Update next state if necessary.
if regexp.search(line):
state = FOUND_LINE_MATCH
def main():
if len(sys.argv) != 2:
print usage
sys.exit(0)
regexp = re.compile(sys.argv[1])
grep(regexp)
sys.stdin.close()
if __name__ == '__main__':
main()
| gpl-3.0 | 546,846,880,806,010,430 | 27.952941 | 80 | 0.572938 | false | 3.77454 | false | false | false |
JohanComparat/nbody-npt-functions | bin/bin_MD/DF_degrade_resolution.py | 1 | 2873 | # cd pySU/pyMultidark/trunk/bin/fortranfile-0.2.1/
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import fortranfile
import cPickle
DFdir = join("/data2", "users", "gustavo", "BigMD", "1Gpc_3840_Planck1_New", "DENSFIELDS")
mockDir = "/data1/DATA/eBOSS/Multidark-box-mocks/v1.0/parts/"
inFiles = n.array(["dmdens_cic_104.dat", "dmdens_cic_101.dat", "dmdens_cic_097.dat", "dmdens_cic_087.dat"])
bins = n.hstack((0,n.logspace(-3, 4, 1000)))
for infi in inFiles:
print infi
DFfile = join(DFdir,infi)
f = fortranfile.FortranFile(DFfile)
gridx, gridy, gridz = f.readInts()
Ntot = gridx/2
res0 = n.empty((Ntot, len(bins)-1))
NS = n.arange(Ntot)
for kk in NS:
print kk, time.time()
DFa = f.readReals()
DFb = f.readReals()
DFaR = DFa.reshape((gridx, gridx))
DFbR = DFb.reshape((gridx, gridx))
DF = n.mean(n.array([DFaR,DFbR]), axis=0)
DFdg = n.array([ n.array([ n.mean([DF[2*i][2*j:2*j+2], DF[2*i+1][2*j:2*j+2]]) for j in NS]) for i in NS])
res0[kk] = n.histogram(n.hstack((DFdg)), bins=bins)[0]
f.close()
path_to_outputCat = join(mockDir,infi[:-4] + "_DF_dg2_hist.dat")
f=open(path_to_outputCat, 'w')
cPickle.dump( [bins, n.sum(res0, axis=0)], f )
f.close()
sys.exit()
inFiles = n.array(["vx_cic_104.dat", "vx_cic_101.dat", "dmdens_cic_097.dat", "dmdens_cic_087.dat"])
inFiles = n.array(["vx_cic_104.dat", "vx_cic_101.dat", "vx_cic_097.dat", "vx_cic_087.dat", "vy_cic_104.dat", "vy_cic_101.dat", "vy_cic_097.dat", "vy_cic_087.dat", "vz_cic_104.dat", "vz_cic_101.dat", "vz_cic_097.dat", "vz_cic_087.dat"])
bins = n.arange(-2000.,2000., 5.)
for infi in inFiles:
print infi
DFfile = join(DFdir,infi)
f = fortranfile.FortranFile(DFfile)
gridx, gridy, gridz = f.readInts()
res0 = n.empty((gridx, len(bins)-1))
res1 = n.empty((gridx, len(bins)-1))
resH = n.empty((gridx, len(bins)-1, len(bins)-1))
for kk in range(gridx):
DF = f.readReals()
i = n.arange(1, gridx-1, 1)
j = n.arange(1, gridx-1, 1)
DF0 = DF[n.hstack((n.outer(i,j)))]
N1 = n.transpose([ n.hstack((n.outer(i-1,j-1))), n.hstack((n.outer(i,j-1))), n.hstack((n.outer(i-1,j))), n.hstack((n.outer(i+1,j+1))), n.hstack((n.outer(i+1,j))), n.hstack((n.outer(i,j+1))), n.hstack((n.outer(i+1,j+1))), n.hstack((n.outer(i-1,j+1))) ])
# N1 = n.transpose([ (i-1) + gridx * (j -1), (i) + gridx * (j -1), (i-1) + gridx * (j), (i+1) + gridx * (j +1), (i+1) + gridx * (j ), (i) + gridx * (j +1), (i+1) + gridx * (j -1), (i-1) + gridx * (j +1) ])
DF1 = n.array([ n.mean(DF[el]) for el in N1 ])
res0[kk] = n.histogram(DF0,bins=bins)[0]
res1[kk] = n.histogram(DF1,bins=bins)[0]
resH[kk] = n.histogram2d(DF0, DF1, bins)[0]
f.close()
path_to_outputCat = join(mockDir,infi[:-4] + "_DF0DF1hist.dat")
f=open(path_to_outputCat, 'w')
cPickle.dump([bins,n.sum(res0,axis=0), n.sum(res1,axis=0), n.sum(resH,axis=0)],f)
f.close()
| cc0-1.0 | 3,952,391,942,574,257,000 | 38.356164 | 254 | 0.616777 | false | 2.060976 | false | false | false |
widdowquinn/find_differential_primers | diagnostic_primers/scripts/pdp_script.py | 1 | 2333 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""pdp_script.py
Implements the pdp script for finding differential primers
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import time
from diagnostic_primers import __version__
from diagnostic_primers.scripts import parsers
from diagnostic_primers.scripts.logger import build_logger
def run_pdp_main(argv=None, logger=None):
"""Main process for pdp script"""
# If we need to (i.e. a namespace isn't passed), parse the command-line
if argv is None:
args = parsers.parse_cmdline()
else:
args = parsers.parse_cmdline(argv)
# Catch execution with no arguments
if len(sys.argv) == 1:
sys.stderr.write("pdp version: {0}\n".format(__version__))
return 0
# Set up logging
time0 = time.time()
if logger is None:
logger = build_logger("pdp", args)
# Run the subcommand
returnval = args.func(args, logger)
logger.info("Completed. Time taken: %.3f", (time.time() - time0))
return returnval
| mit | -2,280,028,092,503,700,000 | 30.958904 | 77 | 0.743678 | false | 3.787338 | false | false | false |
cheral/orange3-text | orangecontrib/text/widgets/owwordenrichment.py | 1 | 7520 | import numpy as np
from AnyQt.QtWidgets import QTreeWidget, QTreeView, QTreeWidgetItem
from Orange.data import Table
from Orange.widgets import gui
from Orange.widgets.settings import Setting
from Orange.widgets.widget import OWWidget, Msg
from orangecontrib.text.util import np_sp_sum
from orangecontrib.text.stats import false_discovery_rate, hypergeom_p_values
class OWWordEnrichment(OWWidget):
# Basic widget info
name = "Word Enrichment"
description = "Word enrichment analysis for selected documents."
icon = "icons/SetEnrichment.svg"
priority = 60
# Input/output
inputs = [("Selected Data", Table, "set_data_selected"),
("Data", Table, "set_data"),]
want_main_area = True
class Error(OWWidget.Error):
no_words_overlap = Msg('No words overlap!')
empty_selection = Msg('Selected data is empty!')
all_selected = Msg('All examples can not be selected!')
# Settings
filter_by_p = Setting(False)
filter_p_value = Setting(0.01)
filter_by_fdr = Setting(True)
filter_fdr_value = Setting(0.2)
def __init__(self):
super().__init__()
# Init data
self.data = None
self.selected_data = None
self.selected_data_transformed = None # used for transforming the 'selected data' into the 'data' domain
self.words = []
self.p_values = []
self.fdr_values = []
# Info section
fbox = gui.widgetBox(self.controlArea, "Info")
self.info_all = gui.label(fbox, self, 'Cluster words:')
self.info_sel = gui.label(fbox, self, 'Selected words:')
self.info_fil = gui.label(fbox, self, 'After filtering:')
# Filtering settings
fbox = gui.widgetBox(self.controlArea, "Filter")
hbox = gui.widgetBox(fbox, orientation=0)
self.chb_p = gui.checkBox(hbox, self, "filter_by_p", "p-value",
callback=self.filter_and_display,
tooltip="Filter by word p-value")
self.spin_p = gui.doubleSpin(hbox, self, 'filter_p_value',
1e-4, 1, step=1e-4, labelWidth=15,
callback=self.filter_and_display,
callbackOnReturn=True,
tooltip="Max p-value for word")
self.spin_p.setEnabled(self.filter_by_p)
hbox = gui.widgetBox(fbox, orientation=0)
self.chb_fdr = gui.checkBox(hbox, self, "filter_by_fdr", "FDR",
callback=self.filter_and_display,
tooltip="Filter by word FDR")
self.spin_fdr = gui.doubleSpin(hbox, self, 'filter_fdr_value',
1e-4, 1, step=1e-4, labelWidth=15,
callback=self.filter_and_display,
callbackOnReturn=True,
tooltip="Max p-value for word")
self.spin_fdr.setEnabled(self.filter_by_fdr)
gui.rubber(self.controlArea)
# Word's list view
self.cols = ['Word', 'p-value', 'FDR']
self.sig_words = QTreeWidget()
self.sig_words.setColumnCount(len(self.cols))
self.sig_words.setHeaderLabels(self.cols)
self.sig_words.setSortingEnabled(True)
self.sig_words.setSelectionMode(QTreeView.ExtendedSelection)
self.sig_words.sortByColumn(2, 0) # 0 is ascending order
for i in range(len(self.cols)):
self.sig_words.resizeColumnToContents(i)
self.mainArea.layout().addWidget(self.sig_words)
def set_data(self, data=None):
self.data = data
def set_data_selected(self, data=None):
self.selected_data = data
def handleNewSignals(self):
self.check_data()
def check_data(self):
self.Error.clear()
if isinstance(self.data, Table) and \
isinstance(self.selected_data, Table):
if len(self.selected_data) == 0:
self.Error.empty_selection()
self.clear()
return
self.selected_data_transformed = Table.from_table(
self.data.domain, self.selected_data)
if np_sp_sum(self.selected_data_transformed.X) == 0:
self.Error.no_words_overlap()
self.clear()
elif len(self.data) == len(self.selected_data):
self.Error.all_selected()
self.clear()
else:
self.apply()
else:
self.clear()
def clear(self):
self.sig_words.clear()
self.info_all.setText('Cluster words:')
self.info_sel.setText('Selected words:')
self.info_fil.setText('After filtering:')
def filter_enabled(self, b):
self.chb_p.setEnabled(b)
self.chb_fdr.setEnabled(b)
self.spin_p.setEnabled(b)
self.spin_fdr.setEnabled(b)
def filter_and_display(self):
self.spin_p.setEnabled(self.filter_by_p)
self.spin_fdr.setEnabled(self.filter_by_fdr)
self.sig_words.clear()
count = 0
if self.words:
for word, pval, fval in zip(self.words, self.p_values, self.fdr_values):
if (not self.filter_by_p or pval <= self.filter_p_value) and \
(not self.filter_by_fdr or fval <= self.filter_fdr_value):
it = EATreeWidgetItem(word, pval, fval, self.sig_words)
self.sig_words.addTopLevelItem(it)
count += 1
for i in range(len(self.cols)):
self.sig_words.resizeColumnToContents(i)
self.info_all.setText('Cluster words: {}'.format(len(self.selected_data_transformed.domain.attributes)))
self.info_sel.setText('Selected words: {}'.format(np.count_nonzero(np_sp_sum(self.selected_data_transformed.X, axis=0))))
if not self.filter_by_p and not self.filter_by_fdr:
self.info_fil.setText('After filtering:')
self.info_fil.setEnabled(False)
else:
self.info_fil.setEnabled(True)
self.info_fil.setText('After filtering: {}'.format(count))
def progress(self, p):
self.progressBarSet(p)
def apply(self):
self.clear()
self.progressBarInit()
self.filter_enabled(False)
self.words = [i.name for i in self.selected_data_transformed.domain.attributes]
self.p_values = hypergeom_p_values(self.data.X,
self.selected_data_transformed.X,
callback=self.progress)
self.fdr_values = false_discovery_rate(self.p_values)
self.filter_and_display()
self.filter_enabled(True)
self.progressBarFinished()
fp = lambda score: "%0.5f" % score if score > 10e-3 else "%0.1e" % score
fpt = lambda score: "%0.9f" % score if score > 10e-3 else "%0.5e" % score
class EATreeWidgetItem(QTreeWidgetItem):
def __init__(self, word, p_value, f_value, parent):
super().__init__(parent)
self.data = [word, p_value, f_value]
self.setText(0, word)
self.setText(1, fp(p_value))
self.setToolTip(1, fpt(p_value))
self.setText(2, fp(f_value))
self.setToolTip(2, fpt(f_value))
def __lt__(self, other):
col = self.treeWidget().sortColumn()
return self.data[col] < other.data[col]
| bsd-2-clause | -4,776,932,693,328,571,000 | 37.762887 | 129 | 0.574335 | false | 3.708087 | false | false | false |
xiaohan2012/capitalization-restoration | cap_transform.py | 1 | 3211 | import nltk
from ground_truth import (ARTICLES, PREPOSITIONS, CONJUNCTIONS)
from operator import itemgetter
def make_capitalized_title(title = None, title_words = None):
"""
>>> make_capitalized_title(title = "This translation app helps professionals traveling in China and Japan")
['This', 'Translation', 'App', 'Helps', 'Professionals', 'Traveling', 'in', 'China', 'and', 'Japan']
>>> make_capitalized_title(title = "Russia to see surge of investments if sanctions lifted: VTB Bank Head")
['Russia', 'to', 'See', 'Surge', 'of', 'Investments', 'if', 'Sanctions', 'Lifted', ':', 'VTB', 'Bank', 'Head']
>>> make_capitalized_title(title = "CIS FMs hold summit in Belarus")
['CIS', 'FMs', 'Hold', 'Summit', 'in', 'Belarus']
"""
trans_words = []
if title_words:
words = title_words
elif title:
words = nltk.word_tokenize(title)
else:
raise ValueError("Receive nothing..")
for i, word in enumerate(words):
if i == 0:
trans_words.append(word if word[0] == word[0].upper() else word.capitalize())
elif (word in ARTICLES or word in PREPOSITIONS or word in CONJUNCTIONS):
trans_words.append(word)
elif word[0] == word[0].upper(): #already capitalized
trans_words.append(word)
else:
trans_words.append(word.capitalize())
return trans_words
def make_uppercase_title(title_words):
"""make the title uppercase
>>> make_uppercase_title(["This", "translation", "app", "helps", "professionals", "traveling", "in", "China", "and", "Japan"])
['THIS', 'TRANSLATION', 'APP', 'HELPS', 'PROFESSIONALS', 'TRAVELING', 'IN', 'CHINA', 'AND', 'JAPAN']
"""
words = []
for w in title_words:
words.append(w.upper())
return words
def make_lowercase_title(title_words):
"""make the title lowercase
>>> make_lowercase_title(["This", "translation", "app", "helps", "professionals", "traveling", "in", "China", "and", "Japan"])
['this', 'translation', 'app', 'helps', 'professionals', 'traveling', 'in', 'china', 'and', 'japan']
"""
words = []
for w in title_words:
words.append(w.lower())
return words
def transform_data(data, sent_transform_func):
"""
Transform the data on the sentence level
>>> input = [[(u'The', 'IC'), (u'Sun', 'IC'), (u'Life', 'IC'), (u'Building', 'IC'), (u'receives', 'AL'), (u'LEED', 'AU'), (u'Silver', 'IC'), (u'Certification', 'IC')]]
>>> transform_data(input, make_capitalized_title)
[[(u'The', 'IC'), (u'Sun', 'IC'), (u'Life', 'IC'), (u'Building', 'IC'), (u'Receives', 'AL'), (u'LEED', 'AU'), (u'Silver', 'IC'), (u'Certification', 'IC')]]
>>> transform_data(input, make_lowercase_title)
[[(u'the', 'IC'), (u'sun', 'IC'), (u'life', 'IC'), (u'building', 'IC'), (u'receives', 'AL'), (u'leed', 'AU'), (u'silver', 'IC'), (u'certification', 'IC')]]
"""
assert callable(sent_transform_func)
new_data = []
for instance in data:
new_data.append(
zip(sent_transform_func(title_words = map(itemgetter(0), instance)),
map(itemgetter(1), instance))
)
return new_data
| gpl-2.0 | -5,132,698,211,757,617,000 | 40.701299 | 171 | 0.582685 | false | 3.141879 | false | false | false |
internetarchive/bookserver | bookserver/catalog/Catalog.py | 1 | 2008 | #!/usr/bin/env python
"""
Copyright(c)2009 Internet Archive. Software license AGPL version 3.
This file is part of bookserver.
bookserver is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
bookserver is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with bookserver. If not, see <http://www.gnu.org/licenses/>.
The bookserver source is hosted at http://github.com/internetarchive/bookserver/
"""
class Catalog:
"""
Catalog class init
"""
def __init__(self,
title = 'Internet Archive OPDS',
urn = 'urn:x-internet-archive:bookserver:catalog',
url = 'http://bookserver.archive.org/catalog/',
datestr = '1970-01-01T00:00:00Z',
author = 'Internet Archive',
authorUri = 'http://www.archive.org',
crawlableUrl = None
):
self._entries = []
self._opensearch = None
self._navigation = None
self._title = title
self._urn = urn
self._url = url
self._datestr = datestr
self._author = author
self._authorUri = authorUri
self._crawlableUrl = crawlableUrl
def addEntry(self, entry):
self._entries.append(entry)
def addNavigation(self, nav):
self._navigation = nav
def addOpenSearch(self, opensearch):
self._opensearch = opensearch
def getEntries(self):
return self._entries
| agpl-3.0 | 6,544,399,357,708,317,000 | 32.466667 | 84 | 0.608566 | false | 4.374728 | false | false | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/112_WalkOfFate/__init__.py | 1 | 2208 | # Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "112_WalkOfFate"
# ~~~~~ npcId list: ~~~~~
Livina = 30572
Karuda = 32017
# ~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~ itemId list: ~~~~~~
EnchantD = 956
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
st = player.getQuestState(qn)
if not st: return
htmltext = event
cond = st.getInt("cond")
if event == "32017-02.htm" and cond == 1 :
st.giveItems(57,22308)
st.giveItems(EnchantD,1)
st.addExpAndSp(112876,5774)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
elif event == "30572-02.htm" :
st.playSound("ItemSound.quest_accept")
st.setState(STARTED)
st.set("cond","1")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><head><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
state = st.getState()
npcId = npc.getNpcId()
cond = st.getInt("cond")
if state == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif state == CREATED :
if npcId == Livina :
if player.getLevel() >= 20 :
htmltext = "30572-01.htm"
else:
htmltext = "30572-00.htm"
st.exitQuest(1)
elif state == STARTED :
if npcId == Livina :
htmltext = "30572-03.htm"
elif npcId == Karuda :
htmltext = "32017-01.htm"
return htmltext
QUEST = Quest(112,qn,"Walk of Fate")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(Livina)
QUEST.addTalkId(Livina)
QUEST.addTalkId(Karuda)
| gpl-3.0 | -3,607,683,265,188,356,000 | 28.837838 | 159 | 0.597826 | false | 3.053942 | false | false | false |
flockchat/pyflock | flockos/apis/chat.py | 1 | 1384 | # coding: utf-8
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import call_api
def fetch_messages(token, chat, uids, **kwargs):
"""
This method makes a synchronous HTTP request.
:param str token: (required)
:param str chat: (required)
:param list[str] uids: (required)
:return: response dict
"""
params = locals()
for key, val in iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
resource_path = '/chat.fetchMessages'.replace('{format}', 'json')
response = call_api(resource_path, params=params)
return response
def send_message(token, to, text, **kwargs):
"""
This method makes a synchronous HTTP request.
:param str token: (required)
:param str to: (required)
:param str text: (required)
:param str on_behalf_of:
:param list[str] visible_to:
:param str flockml:
:param str notification:
:param list[str] mentions:
:param SendAs send_as:
:param list[Attachment] attachments:
:return: response dict
"""
params = locals()
for key, val in iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
resource_path = '/chat.sendMessage'.replace('{format}', 'json')
response = call_api(resource_path, params=params)
return response
| apache-2.0 | -1,932,706,288,706,491,400 | 25.113208 | 69 | 0.632948 | false | 3.812672 | false | false | false |
LighthouseUK/koalacore | koalacore/api.py | 1 | 16622 | # -*- coding: utf-8 -*-
"""
koala.api
~~~~~~~~~~~~~~~~~~
Contains base implementations for building an internal project API
:copyright: (c) 2015 Lighthouse
:license: LGPL
"""
from blinker import signal
from google.appengine.ext import deferred
__author__ = 'Matt Badger'
# TODO: remove the deferred library dependency; extend the BaseAPI in an App Engine specific module to include deferred.
# TODO: it is possible that these methods will fail and thus their result will be None. Passing this in a signal may
# cause other functions to throw exceptions. Check the return value before processing the post_ signals?
# Result should always be the first argument to the post_ signals. That way the receivers can check the value before
# continuing execution.
class BaseAPI(object):
_api_name = ''
_api_model = None
_datastore_interface = None
_search_interface = None
@classmethod
def new(cls, **kwargs):
return cls._api_model(**kwargs)
@classmethod
def insert(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_insert').has_receivers_for(cls):
signal('pre_insert').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.insert(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_insert').has_receivers_for(cls):
signal('post_insert').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def get(cls, resource_uid, **kwargs):
if signal('pre_get').has_receivers_for(cls):
signal('pre_get').send(cls, resource_uid=resource_uid, **kwargs)
resource = cls._datastore_interface.get(resource_uid=resource_uid)
if signal('post_get').has_receivers_for(cls):
signal('post_get').send(cls, result=resource, resource_uid=resource_uid, **kwargs)
return resource
@classmethod
def update(cls, resource_object, auth_uid=None, **kwargs):
if signal('pre_update').has_receivers_for(cls):
signal('pre_update').send(cls, resource_object=resource_object, auth_uid=auth_uid, **kwargs)
resource_uid = cls._datastore_interface.update(resource_object=resource_object, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_update').has_receivers_for(cls):
signal('post_update').send(cls, result=resource_uid, resource_uid=resource_uid,
resource_object=resource_object, auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def patch(cls, resource_uid, delta_update, auth_uid=None, **kwargs):
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, auth_uid=auth_uid,
**kwargs)
resource_uid = cls._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update, **kwargs)
deferred.defer(cls._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
auth_uid=auth_uid, **kwargs)
return resource_uid
@classmethod
def delete(cls, resource_uid, auth_uid=None, **kwargs):
if signal('pre_delete').has_receivers_for(cls):
signal('pre_delete').send(cls, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
cls._datastore_interface.delete(resource_uid=resource_uid, **kwargs)
deferred.defer(cls._delete_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_delete').has_receivers_for(cls):
signal('post_delete').send(cls, result=None, resource_uid=resource_uid, auth_uid=auth_uid, **kwargs)
@classmethod
def search(cls, query_string, **kwargs):
if signal('pre_search').has_receivers_for(cls):
signal('pre_search').send(cls, query_string=query_string, **kwargs)
search_result = cls._search_interface.search(query_string=query_string, **kwargs)
if signal('post_search').has_receivers_for(cls):
signal('post_search').send(cls, result=search_result, query_string=query_string, **kwargs)
return search_result
@classmethod
def _update_search_index(cls, resource_uid, **kwargs):
resource = cls.get(resource_uid=resource_uid)
cls._search_interface.insert(resource_object=resource, **kwargs)
@classmethod
def _delete_search_index(cls, resource_uid, **kwargs):
cls._search_interface.delete(resource_object_uid=resource_uid, **kwargs)
class BaseSubAPI(object):
_api_name = ''
_parent_api = None
_allowed_patch_keys = set()
@classmethod
def _parse_patch_keys(cls, delta_update):
delta_keys = set(delta_update.keys())
unauthorized_keys = delta_keys - cls._allowed_patch_keys
if unauthorized_keys:
raise ValueError(u'Cannot perform patch as "{}" are unauthorized keys'.format(unauthorized_keys))
@classmethod
def patch(cls, resource_uid, delta_update, **kwargs):
cls._parse_patch_keys(delta_update=delta_update)
if signal('pre_patch').has_receivers_for(cls):
signal('pre_patch').send(cls, resource_uid=resource_uid, delta_update=delta_update, **kwargs)
resource_uid = cls._parent_api._datastore_interface.patch(resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
deferred.defer(cls._parent_api._update_search_index, resource_uid=resource_uid, _queue='search-index-update')
if signal('post_patch').has_receivers_for(cls):
signal('post_patch').send(cls, result=resource_uid, resource_uid=resource_uid, delta_update=delta_update,
**kwargs)
return resource_uid
class BaseResourceProperty(object):
"""A data descriptor that sets and returns values normally but also includes a title attribute and assorted filters.
You can inherit from this class to create custom property types
"""
_name = None
_default = None
title = None
_attributes = ['_name', '_default', 'title']
_positional = 1 # Only name is a positional argument.
def __init__(self, name=None, default=None, title=''):
self._name = name # name should conform to python class attribute naming conventions
self._default = default
self.title = title
def __repr__(self):
"""Return a compact unambiguous string representation of a property."""
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return entity._values.get(self._name, self._default)
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
entity._values[self._name] = value
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
"""
if self._name is None:
self._name = code_name
def _has_value(self, entity, unused_rest=None):
"""Internal helper to ask if the entity has a value for this Property."""
return self._name in entity._values
class ResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_immutable', '_unique', '_strip', '_lower']
def __init__(self, immutable=False, unique=False, track_revisions=True, strip_whitespace=True,
force_lowercase=False, **kwargs):
super(ResourceProperty, self).__init__(**kwargs)
self._immutable = immutable
self._unique = unique
self._track_revisions = track_revisions
self._strip = strip_whitespace
self._lower = force_lowercase
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
if entity._init_complete:
if self._immutable:
raise AssertionError('"{}" is immutable.'.format(self._name))
if self._strip:
if value is not None:
if hasattr(value, 'strip'):
value = value.strip()
elif isinstance(value, list):
try:
value = [item.strip() for item in value]
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
elif isinstance(value, set):
value_list = list(value)
try:
value = set([item.strip() for item in value_list])
except AttributeError:
# The value cannot simply be stripped. Custom formatting should be used in a dedicated method.
pass
if self._lower:
if value is not None:
if hasattr(value, 'lower'):
value = value.lower()
elif isinstance(value, list):
try:
value = [item.lower() for item in value]
except AttributeError:
# The value cannot simply be lowered. Custom formatting should be used in a dedicated method.
pass
if entity._init_complete:
if self._unique:
entity._uniques_modified.append(self._name)
if self._track_revisions:
if self._name in entity._history:
entity._history[self._name] = (entity._history[self._name][0], value)
else:
entity._history[self._name] = (getattr(entity, self._name, None), value)
super(ResourceProperty, self).__set__(entity=entity, value=value)
class ComputedResourceProperty(BaseResourceProperty):
_attributes = BaseResourceProperty._attributes + ['_compute_function']
def __init__(self, compute_function, **kwargs):
super(ComputedResourceProperty, self).__init__(**kwargs)
self._compute_function = compute_function
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return self._compute_function(entity)
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's _fix_properties() method.
Note: This class is derived from Google's NDB MetaModel (line 2838 in model.py)
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.iteritems()):
props.append('%s=%r' % (prop._code_name, prop))
return '%s<%s>' % (cls.__name__, ', '.join(props))
class BaseResource(object):
"""
Base resource object. You have to implement some of the functionality yourself.
You must call super(Resource, self).__init__() first in your init method.
Immutable properties must be set within init otherwise it makes it impossible to set initial values.
If a property is required then make sure that you check it during init and throw an exception.
"""
__metaclass__ = MetaModel
_properties = None
_uniques = None
def __init__(self, **kwargs):
self._init_complete = False
self._values = {}
self._uniques_modified = []
self._history = {}
self._set_attributes(kwargs)
self._init_complete = True
def _set_attributes(self, kwds):
"""Internal helper to set attributes from keyword arguments.
Expando overrides this.
"""
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
if not isinstance(prop, BaseResourceProperty):
raise TypeError('Cannot set non-property %s' % name)
prop.__set__(self, value)
def __repr__(self):
"""Return an unambiguous string representation of an entity."""
args = []
for prop in self._properties.itervalues():
if prop._has_value(self):
val = prop.__get__(self)
if val is None:
rep = 'None'
else:
rep = val
args.append('%s=%s' % (prop._name, rep))
args.sort()
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _as_dict(self):
"""Return a dict containing the entity's property values.
"""
return self._values.copy()
as_dict = _as_dict
@classmethod
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
cls._properties = {} # Map of {name: Property}
cls._uniques = [] # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, BaseResourceProperty):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
cls._properties[attr._name] = attr
try:
if attr._unique:
cls._uniques.append(attr._name)
except AttributeError:
pass
class Resource(BaseResource):
"""
Default implementation of a resource. It handles uid, created and updated properties. The latter two are simply
timestamps.
Due to the way these objects are used, the properties cannot be mandatory. For example, the uid may be set by the
datastore on insert. Same goes for the timestamps.
"""
# name=None, default=None, title='', immutable=False, unique=False, track_revisions=True, strip_whitespace=True, force_lowercase=False
uid = ResourceProperty(title=u'UID', immutable=True, track_revisions=False)
created = ResourceProperty(title=u'Created', immutable=True, track_revisions=False)
updated = ResourceProperty(title=u'Updated', immutable=True, track_revisions=False)
| lgpl-3.0 | -1,690,080,187,786,669,800 | 39.541463 | 138 | 0.60089 | false | 4.310685 | false | false | false |
runefriborg/pycsp | test/unix/check.py | 1 | 2240 | import sys
sys.path.insert(0, "../..")
from pycsp.parallel import *
def print_state(received, poison, retire):
sys.stdout.write("Received: " + str(received) + "\n")
if poison:
sys.stdout.write("Poisoned\n")
if retire:
sys.stdout.write("Retired\n")
sys.stdout.flush()
@process
def Assert(cin, name = "", count = 0, minimum = 0, vocabulary = [], ordered = False, quit_on_count = False, debug = False):
received = []
poison = False
retire = False
while True:
try:
val = cin()
if debug:
sys.stdout.write("Debug: "+str(val)+"\n")
sys.stdout.flush()
received.append(val)
except ChannelPoisonException:
poison = True
break
except ChannelRetireException:
retire = True
break
if quit_on_count and len(received) == count:
break
error = ""
if (len(received) < minimum):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected the minimum number of values: "+str(minimum)+"\n"
if count:
if minimum:
if (len(received) > count):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected a maximum number of values: "+str(count)+"\n"
else:
if not (len(received) == count):
error += "Wrong number of values: "+str(len(received))+"\n"
error += "Expected number of values: "+str(count)+"\n"
if vocabulary:
for i in range(len(received)):
if received[i] not in vocabulary:
error += "Value "+ str(received[i]) + " not in vocabulary\n"
if (ordered):
for i in range(len(received)):
if received[i] != vocabulary[i % len(vocabulary)]:
error += "Value "+ str(received[i]) + " != " + str(vocabulary[i % len(vocabulary)])+" in vocabulary\n"
if error:
sys.stdout.write(name+"\n")
sys.stdout.write(error)
print_state(received, poison, retire)
else:
sys.stdout.write("OK - "+ name+ "\n")
| mit | 4,462,060,319,707,481,000 | 31.941176 | 123 | 0.519196 | false | 3.929825 | false | false | false |
hynekcer/django | tests/admin_inlines/models.py | 276 | 6824 | """
Testing of admin inline formsets.
"""
from __future__ import unicode_literals
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super(NonAutoPKBook, self).save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause | -4,537,177,001,815,998,000 | 24.750943 | 93 | 0.722743 | false | 3.522974 | false | false | false |
UkrainianTrickingCommunity/website | logsys/views.py | 1 | 2432 |
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.core.context_processors import csrf
import django.contrib.auth.decorators as decorators
import django.contrib.auth as auth
import django.contrib.auth.forms as auth_forms
import django.contrib.auth.models as auth_models
import json
@decorators.login_required
def logout(request):
if request.user.is_authenticated():
auth.logout(request)
return redirect('/')
def reset_password(request):
return render(request, 'logsys/reset-password.html', {})
def login(request):
args = {}
args.update(csrf(request))
if request.POST:
auth_form = auth_forms.AuthenticationForm(request, data=request.POST)
if auth_form.is_valid():
auth_form.clean()
auth.login(request, auth_form.get_user())
return redirect('/')
else:
args['auth_error_p'] = True
if request.POST['username']:
args['tricker_id_prev_try'] = request.POST['username']
return render(request, 'logsys/login.html', args)
def register(request):
args = {}
args.update(csrf(request))
if request.POST:
new_user_form = auth_forms.UserCreationForm(request.POST)
if new_user_form.is_valid():
new_user = new_user_form.save(commit=False)
new_user.email = request.POST['email']
new_user.first_name = request.POST.get('first-name', '')
new_user.last_name = request.POST.get('last-name', '')
new_user.save()
auth.login(
request,
auth.authenticate(
username=new_user_form.cleaned_data['username'],
password=new_user_form.cleaned_data['password1']
)
)
return redirect('/')
else:
pass
# TODO reject incorrect registration
return render(request, 'logsys/register.html', args)
def is_username_already_engaged(request, username):
response = {'engaged': False}
if auth_models.User.objects.filter(username=username):
response['engaged'] = True
return HttpResponse(json.dumps(response))
def is_email_already_engaged(request, email):
response = {'engaged': False}
if auth.models.User.objects.filter(email=email):
response['engaged'] = True
return HttpResponse(json.dumps(response))
| gpl-2.0 | 9,127,130,040,032,611,000 | 31.426667 | 77 | 0.628289 | false | 4 | false | false | false |
MattBroach/Django-Merged-Inlines | merged_inlines/admin.py | 1 | 2288 | from django.contrib import admin
class MergedInlineAdmin(admin.ModelAdmin):
# optional field ordering variable
merged_field_order = None
merged_inline_order = 'id'
# Edited Change_Form Template with one inline form
change_form_template = 'admin/change_form_merged_inlines.html'
class Media:
js = ('admin/js/merged_inlines.js',)
# iterates over all the inline_formsets and collects them into a lists,
# that are then sent to the
# change_view as extra context
def render_change_form(
self, request, context, add=False,
change=False, form_url='', obj=None):
inline_admin_formsets = context['inline_admin_formsets']
all_forms = []
all_fields = []
i = 0
for formset in inline_admin_formsets:
for form in formset:
form.verbose_name = form.form._meta.model._meta.verbose_name.title()
all_forms.append((form, {}))
for fieldset in form:
for line in fieldset:
for field in line:
if (field.field.name, field.field.label) not in all_fields and not field.field.is_hidden:
all_fields.append(
(field.field.name, field.field.label)
)
all_forms[i][1][field.field.name] = field
i += 1
# Sort the forms based on given field.
end = len(all_forms)-1
all_forms.sort(
key=lambda x: getattr(
x[0].form.instance,
self.merged_inline_order
) if getattr(
x[0].form.instance,
self.merged_inline_order) is not None else end)
# Sort the fields based in merged_inline_order, if given
if self.merged_field_order is not None:
all_fields.sort(key=lambda x: self.merged_field_order.index(x[0]))
extra_context = {}
extra_context['all_fields'] = all_fields
extra_context['all_forms'] = all_forms
context.update(extra_context)
return super(MergedInlineAdmin, self).render_change_form(
request, context, add, change, form_url, obj)
| mit | -3,833,279,934,971,811,000 | 36.508197 | 117 | 0.555507 | false | 4.221402 | false | false | false |
google/brain-tokyo-workshop | AttentionAgent/tasks/gym_task.py | 1 | 6712 | import cv2
import gin
import gym
from gym import spaces
import numpy as np
import os
import tasks.abc_task
import time
import car_racing_variants
from takecover_variants.doom_take_cover import DoomTakeCoverEnv
class GymTask(tasks.abc_task.BaseTask):
"""OpenAI gym tasks."""
def __init__(self):
self._env = None
self._render = False
self._logger = None
def create_task(self, **kwargs):
raise NotImplementedError()
def seed(self, seed):
if isinstance(self, TakeCoverTask):
self._env.game.set_seed(seed)
else:
self._env.seed(seed)
def reset(self):
return self._env.reset()
def step(self, action, evaluate):
return self._env.step(action)
def close(self):
self._env.close()
def _process_reward(self, reward, done, evaluate):
return reward
def _process_action(self, action):
return action
def _process_observation(self, observation):
return observation
def _overwrite_terminate_flag(self, reward, done, step_cnt, evaluate):
return done
def _show_gui(self):
if hasattr(self._env, 'render'):
self._env.render()
def roll_out(self, solution, evaluate):
ob = self.reset()
ob = self._process_observation(ob)
if hasattr(solution, 'reset'):
solution.reset()
start_time = time.time()
rewards = []
done = False
step_cnt = 0
while not done:
action = solution.get_output(inputs=ob, update_filter=not evaluate)
action = self._process_action(action)
ob, r, done, _ = self.step(action, evaluate)
ob = self._process_observation(ob)
if self._render:
self._show_gui()
step_cnt += 1
done = self._overwrite_terminate_flag(r, done, step_cnt, evaluate)
step_reward = self._process_reward(r, done, evaluate)
rewards.append(step_reward)
time_cost = time.time() - start_time
actual_reward = np.sum(rewards)
if hasattr(self, '_logger') and self._logger is not None:
self._logger.info(
'Roll-out time={0:.2f}s, steps={1}, reward={2:.2f}'.format(
time_cost, step_cnt, actual_reward))
return actual_reward
@gin.configurable
class TakeCoverTask(GymTask):
"""VizDoom take cover task."""
def __init__(self):
super(TakeCoverTask, self).__init__()
self._float_text_env = False
self._text_img_path = '/opt/app/takecover_variants/attention_agent.png'
def create_task(self, **kwargs):
if 'render' in kwargs:
self._render = kwargs['render']
if 'logger' in kwargs:
self._logger = kwargs['logger']
modification = 'original'
if 'modification' in kwargs:
modification = kwargs['modification']
if modification == 'text':
self._float_text_env = True
self._logger.info('modification: {}'.format(modification))
self._env = DoomTakeCoverEnv(modification)
return self
def _process_observation(self, observation):
if not self._float_text_env:
return observation
img = cv2.imread(self._text_img_path, cv2.IMREAD_GRAYSCALE)
h, w = img.shape
full_color_patch = np.ones([h, w], dtype=np.uint8) * 255
zero_patch = np.zeros([h, w], dtype=np.uint8)
x = 150
y = 30
mask = (img == 0)
observation[y:(y+h), x:(x+w), 0][mask] = zero_patch[mask]
observation[y:(y+h), x:(x+w), 1][mask] = zero_patch[mask]
observation[y:(y+h), x:(x+w), 2][mask] = full_color_patch[mask]
observation[y:(y+h), x:(x+w), 0][~mask] = zero_patch[~mask]
observation[y:(y+h), x:(x+w), 1][~mask] = full_color_patch[~mask]
observation[y:(y+h), x:(x+w), 2][~mask] = full_color_patch[~mask]
return observation
def _process_action(self, action):
# Follow the code in world models.
action_to_apply = [0] * 43
threshold = 0.3333
if action > threshold:
action_to_apply[10] = 1
if action < -threshold:
action_to_apply[11] = 1
return action_to_apply
def set_video_dir(self, video_dir):
from gym.wrappers import Monitor
self._env = Monitor(
env=self._env,
directory=video_dir,
video_callable=lambda x: True
)
@gin.configurable
class CarRacingTask(GymTask):
"""Gym CarRacing-v0 task."""
def __init__(self):
super(CarRacingTask, self).__init__()
self._max_steps = 0
self._neg_reward_cnt = 0
self._neg_reward_cap = 0
self._action_high = np.array([1., 1., 1.])
self._action_low = np.array([-1., 0., 0.])
def _process_action(self, action):
return (action * (self._action_high - self._action_low) / 2. +
(self._action_high + self._action_low) / 2.)
def reset(self):
ob = super(CarRacingTask, self).reset()
self._neg_reward_cnt = 0
return ob
def _overwrite_terminate_flag(self, reward, done, step_cnt, evaluate):
if evaluate:
return done
if reward < 0:
self._neg_reward_cnt += 1
else:
self._neg_reward_cnt = 0
too_many_out_of_tracks = 0 < self._neg_reward_cap < self._neg_reward_cnt
too_many_steps = 0 < self._max_steps <= step_cnt
return done or too_many_out_of_tracks or too_many_steps
def create_task(self, **kwargs):
if 'render' in kwargs:
self._render = kwargs['render']
if 'out_of_track_cap' in kwargs:
self._neg_reward_cap = kwargs['out_of_track_cap']
if 'max_steps' in kwargs:
self._max_steps = kwargs['max_steps']
if 'logger' in kwargs:
self._logger = kwargs['logger']
env_string = 'CarRacing-v0'
if 'modification' in kwargs:
if kwargs['modification'] == 'color':
env_string = 'CarRacingColor-v0'
elif kwargs['modification'] == 'bar':
env_string = 'CarRacingBar-v0'
elif kwargs['modification'] == 'blob':
env_string = 'CarRacingBlob-v0'
self._logger.info('env_string: {}'.format(env_string))
self._env = gym.make(env_string)
return self
def set_video_dir(self, video_dir):
from gym.wrappers import Monitor
self._env = Monitor(
env=self._env,
directory=video_dir,
video_callable=lambda x: True
)
| apache-2.0 | -87,645,405,447,678,300 | 30.810427 | 80 | 0.56183 | false | 3.671772 | false | false | false |
dasadc/conmgr | support/qgen.py | 1 | 15383 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 DA Symposium 2017
# All rights reserved.
#
"""
アルゴリズムデザインコンテスト2017の問題データと正解データを、ランダムに作成する。
"""
from __future__ import print_function
import numpy as np
import random
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../server')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), './lib')))
from nlcheck import NLCheck
import nldraw2
import openpyxl
from openpyxl import Workbook
_size = (3,3,1) # x,y,z
size = None
nlines = 999
retry = 2
debug = False
verbose = False
newline = '\n' # 改行コード
template_move = 'newsud' # 移動方向(6方向)を表す文字列
template_move0 = 'news'*10 + 'ud' # 垂直移動よりも、水平移動を(10倍)優先する
template_move1 = template_move0 + 'G'*20 # 直進(G)は(20倍)優先する
class dotdict(dict):
"""
dot.notation access to dictionary attributes
https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
unit_vec_xyz = dotdict({ 'n': ( 0, -1, 0),
'e': ( 1, 0, 0),
'w': (-1, 0, 0),
's': ( 0, 1, 0),
'u': ( 0, 0, 1),
'd': ( 0, 0, -1) })
class Ban:
"""
盤のデータ構造
"""
def __init__(self, x, y, z):
self.size = dotdict({'x': x, 'y': y, 'z': z})
self.xmat = np.zeros((z+2, y+2, x+2), dtype=np.integer)
def get_size(self):
return self.size
def get(self, x,y,z):
return self.xmat[z+1, y+1, x+1]
def get_xyz(self, xyz):
x,y,z = xyz
return self.xmat[z+1, y+1, x+1]
def set(self, x,y,z, val):
self.xmat[z+1, y+1, x+1] = val
def set_xyz(self, xyz, val):
x,y,z = xyz
self.xmat[z+1, y+1, x+1] = val
def print(self):
print(self.xmat[1:-1, 1:-1, 1:-1])
def zyx1_to_xyz(self, zyx1):
return (zyx1[2]-1, zyx1[1]-1, zyx1[0]-1)
def find_zero_random(self, dont_use=[]):
"値が0の座標を、ランダムに返す。リストdont_useに含まれる座標は、選ばない。"
cand = []
for k1, v in np.ndenumerate(self.xmat):
if self.inside_zyx1(k1):
xyz = self.zyx1_to_xyz(k1)
if (v == 0) and (xyz not in dont_use):
cand.append(k1)
if len(cand) == 0:
return False
i = random.randint(0, len(cand)-1)
return self.zyx1_to_xyz(cand[i])
def inside(self, xyz):
"座標xyzが、盤の中にあるか?"
x, y, z = xyz
if ((0 <= x and x < self.size.x) and
(0 <= y and y < self.size.y) and
(0 <= z and z < self.size.z)):
return True
else:
return False
def inside_zyx1(self, zyx1):
"(+1されている)座標zyx1が、盤の中にあるか?"
z = zyx1[0]-1
y = zyx1[1]-1
x = zyx1[2]-1
return self.inside((x,y,z))
def move_xyz_to(self, xyz, move):
"座標xyzから、move(=n,e,w,s,u,d)の方向に移動した座標を返す"
uv = unit_vec_xyz[move]
return (xyz[0] + uv[0], xyz[1] + uv[1], xyz[2] + uv[2])
def rip_line(self, number):
"線numberを、引き剥がす"
indexes = np.where(self.xmat == number)
n = len(indexes[0])
#print('rip_line', number, n)
#self.print()
for j in range(0, n):
z = indexes[0][j]
y = indexes[1][j]
x = indexes[2][j]
#print(x,y,z)
self.xmat[z,y,x] = 0
#self.print()
def empty_cells(self):
"空白マスの数を返す"
indexes = np.where(self.xmat[1:-1, 1:-1, 1:-1] == 0)
return len(indexes[0])
def neighbors(self, xyz):
"セルxyzの隣接セルの数値を取り出す"
x, y, z = xyz
return dotdict({ 'n': self.get(x, y-1, z), # north
'e': self.get(x+1, y, z), # east
'w': self.get(x-1, y, z), # west
's': self.get(x, y+1, z), # south
'u': self.get(x, y, z+1), # upstairs
'd': self.get(x, y, z-1)}) # downstairs
def A_data(self):
"回答テキストを作る"
out = 'SIZE %dX%dX%d%s' % (self.size.x, self.size.y, self.size.z, newline)
for z in range(0, self.size.z):
out += 'LAYER %d%s' % (z+1, newline)
for y in range(0, self.size.y):
row = ''
for x in range(0, self.size.x):
num = self.get_xyz((x,y,z))
row += '%02d' % num
if x < self.size.x -1:
row += ','
out += row + newline
return out
def vector_char(a, b):
"""
a から bへのベクトルを、n,e,w,s,u,dで求める。aとbは隣接していること。
"""
ba = (b[0]-a[0], b[1]-a[1], b[2]-a[2])
for k,v in unit_vec_xyz.iteritems():
if ba == v:
return k
raise Exception('vector not found')
def draw_line_next(ban, number=0, prev=None, curr=None):
"""
直前に引いた線は、prevからcurrだったとき、
セルcurrから、セルnext_xyzへ、1マスだけ、線を引く。
next_xyzは、ランダムに決定する。
#
# prev curr next_xyz
# ● ● ○
#
#
"""
neig = ban.neighbors(curr)
# sは、候補となる方角の文字(n,e,w,s,u,d)で構成された文字列。このあとシャッフルする
if prev == None:
s = template_move0
else:
s = template_move1
vec_char = vector_char(prev, curr)
s = s.replace('G', vec_char)
if debug: print('0: s=', s)
# 隣接セル(n,e,w,s,u,d)に、線が引けるか?事前にチェックする
for i in range(0, len(template_move)):
vec_char = template_move[i]
next_xyz = ban.move_xyz_to(curr, vec_char)
if debug: print('curr=', curr, ' vec_char=', vec_char, ' next_xyz=', next_xyz)
drawable = True
if not ban.inside(next_xyz):
drawable = False # 盤からはみ出た
elif ban.get_xyz(next_xyz) != 0:
drawable = False # すでに線が引かれている
else:
# next_xyzの隣接セルで、番号がnumberのセルの個数を数える
next_neigh = ban.neighbors(next_xyz)
same_number = 0
for j in range(0, len(template_move)):
if next_neigh[template_move[j]] == number:
same_number += 1
if 2 <= same_number:
# 2以上あるということは、ループができるということ(そのはず)
drawable = False
if not drawable:
s = s.replace(vec_char, '') # 候補から削除
if debug: print('1: s=', s)
if len(s) == 0:
return curr, None # もう線を引けない
rs = ''.join(random.sample(s, len(s))) # sをシャフルしてrsに
vec_char = rs[0]
if debug: print('vec_char=', vec_char)
next_xyz = ban.move_xyz_to(curr, vec_char)
ban.set_xyz(next_xyz, number)
prev = curr
curr = next_xyz
return prev, curr
def draw_line(ban, number, max_retry=1, dont_use=[], Q_data={}):
"""
線numberを、ランダムに引いてみる。
"""
trial = 0
if debug: print('number=', number)
while trial < max_retry:
trial += 1
#print('dont_use=', dont_use)
start = ban.find_zero_random(dont_use) # 始点をランダムに決定
end = None
if debug: print('start=', start)
if start is False:
return False
ban.set_xyz(start, number)
line_length = 0
prev = None
curr = start
while curr is not None:
line_length += 1
if debug: print('prev=', prev, ' curr=', curr)
if debug: ban.print()
prev, curr = draw_line_next(ban, prev=prev, curr=curr, number=number)
if curr != None:
end = curr
if line_length == 1:
# 1マスも引けなかった。1マスだけの線はありえないので、消す。
# startの値はタプルなので、copyしなくてよいはず
if debug: print('clear start=', start)
ban.set_xyz(start, 0)
dont_use.append(start)
trial -= 1 # この場合、trial回数は数えないことにする
elif (line_length <= 2) and (trial < max_retry): # 短い線は、おもしろくないので、引き直す
if verbose: print('rip LINE#%d' % number)
ban.rip_line(number)
else:
# 線が引けた
Q_data[number] = {'start': start, 'end': end, 'length': line_length}
return True
# リトライする
if verbose:
print('retry %d/%d LINE#%d, #dont_use=%d' % (trial, max_retry, number, len(dont_use)))
# 線が引けなかった
return False
def generate(x,y,z, num_lines=0, max_retry=1, Q_data={}, dont_use=[]):
"""
盤サイズ(x,y,z)のときの、解答データと問題データを、ランダムに生成する。
線の本数は、最大でnum_linesまでとする。
"""
ban = Ban(x,y,z)
for line in range(1, 1+num_lines):
if draw_line(ban, line, max_retry=max_retry, dont_use=dont_use, Q_data=Q_data) == False:
return line-1, ban
return num_lines, ban
def Q_text(Q_data):
"問題データのテキストを生成する。"
size = Q_data['size']
out = 'SIZE %dX%dX%d%s' % (size[0], size[1], size[2], newline)
num_lines = Q_data['line_num']
out += 'LINE_NUM %d%s' % (num_lines, newline)
for j in range(1, 1+num_lines):
s = Q_data[j]['start']
e = Q_data[j]['end']
out += 'LINE#%d (%d,%d,%d) (%d,%d,%d)%s' % (j, s[0],s[1],s[2]+1, e[0],e[1],e[2]+1, newline)
return out
def excel(ban, basename):
"Excelファイル(.xlsx)に書き出す。"
wb = Workbook()
bgYellow = openpyxl.styles.PatternFill(patternType='solid', fgColor='FFFFFF00')
bgIndex = openpyxl.styles.PatternFill(patternType='solid', fgColor='FFBBFFF6')
size = ban.get_size()
for z in range(0, size.z):
if z == 0:
wsl = wb.active
else:
wsl = wb.create_sheet()
wsl.title = '%s.%d' % (basename, z+1)
wsl['B1'] = u'行'
wsl['B2'] = u'列'
wsl['C1'] = 'A'
wsl['E1'] = ' / '
wsl['G1'] = u'層'
for cell in ['A1', 'A2', 'C1', 'D1', 'F1']:
wsl[cell].fill = bgYellow
wsl['A1'].value = size.x
wsl['A2'].value = size.y
wsl['D1'].value = z+1
wsl['F1'].value = size.z
for y in range(0, size.y):
for x in range(0, size.x):
num = ban.get_xyz((x,y,z))
wsl.cell(row=4+y, column=2+x).value = num
# Y座標
i = 0
for y in range(4, 4+size.y):
wsl.cell(row=y, column=1).value = i
wsl.cell(row=y, column=1).fill = bgIndex
i += 1
# X座標
i = 0
for x in range(2, 2+size.x):
wsl.cell(row=3, column=x).value = i
wsl.cell(row=3, column=x).fill = bgIndex
i += 1
# 列の幅
for x in range(1, 1+size.x+1):
wsl.column_dimensions[openpyxl.utils.get_column_letter(x)].width = 3.5
wb.save(filename=basename+'.xlsx')
def run(x,y,z, num_lines=0, max_retry=1, basename=None):
"""
指定されたサイズ、線数の問題データと正解データを自動生成して、ファイルbasename*.txtに書き出す。
@param x,y,z 盤のサイズ
@param num_lines 線の本数
@param basename 出力先ファイル名。問題ファイルはbasename_adc.txt、正解ファイルはbasename_adc_sol.txtになる。
"""
Q = {'size': (x, y, z)}
num_lines, ban = generate(x, y, z, num_lines=num_lines, max_retry=max_retry, Q_data=Q)
Q['line_num'] = num_lines
Q['empty_cells'] = ban.empty_cells()
print('number of lines:', Q['line_num'])
print('number of empty cells:', Q['empty_cells'])
#if verbose: ban.print()
#if verbose: print('Q=', Q)
txtQ = Q_text(Q)
txtA = ban.A_data()
# nlcheckする
nlc = NLCheck()
q = nlc.read_input_str(txtQ)
a = nlc.read_target_str(txtA)
#nlc.verbose = verbose
judges = nlc.check( q, a )
print("judges = ", judges)
# 描画する
nldraw2.setup_font('nonexistent') # あとで考える
images = nldraw2.draw(q, a, nlc)
for num, img in enumerate(images):
ifile = "%s.%d.gif" % (basename, num+1) # 層の番号は1から始まる
img.save(ifile, 'gif')
print(ifile)
if 1 < len(images):
nldraw2.merge_images(images).save(basename+'.gif', 'gif')
# QとAを出力する
if basename is None:
print(txtQ)
print(txtA)
else:
qfile = '%s_adc.txt' % basename
with open(qfile, 'w') as f:
f.write(txtQ)
afile = '%s_adc_sol.txt' % basename
with open(afile, 'w') as f:
f.write(txtA)
excel(ban, basename)
def test1():
"動作確認"
x,y,z = _size
ban = Ban(x,y,z)
ban.set(0,0,0, 1)
ban.set(1,0,0, 2)
ban.set(0,1,0, 3)
ban.set(x-1, y-1, z-1, 1)
ban.set(x-2, y-1, z-1, 2)
ban.set(x-1, y-2, z-1, 3)
ban.print()
def main():
global size, nlines, debug, verbose
import argparse
parser = argparse.ArgumentParser(description='NumberLink Q generator')
parser.add_argument('-d', '--debug', action='store_true', default=debug, help='enable debug (default: %(default)s)')
parser.add_argument('-v', '--verbose', action='store_true', default=verbose, help='verbose output (default: %(default)s)')
parser.add_argument('-x', metavar='X', default=_size[0], type=int, help='size X (default: %(default)s)')
parser.add_argument('-y', metavar='Y', default=_size[1], type=int, help='size Y (default: %(default)s)')
parser.add_argument('-z', metavar='Z', default=_size[2], type=int, help='size Z (default: %(default)s)')
parser.add_argument('-l', '--lines', metavar='N', default=nlines, type=int, help='max number of lines (default: %(default)s)')
parser.add_argument('-r', '--retry', metavar='N', default=retry, type=int, help='max number of retry (default: %(default)s)')
parser.add_argument('-o', '--output', metavar='FILE', help='output file')
#parser.add_argument('--test1', action='store_true', help='run test1')
args = parser.parse_args()
debug = args.debug
verbose = args.verbose
#if args.test1: test1()
run(args.x, args.y, args.z, num_lines=args.lines, basename=args.output, max_retry=args.retry)
if __name__ == "__main__":
main()
| bsd-3-clause | 9,050,501,921,223,880,000 | 31.071594 | 130 | 0.518615 | false | 2.508037 | false | false | false |
miyyer/rmn | util.py | 1 | 2018 | import theano, cPickle, h5py, lasagne, random, csv, gzip
import numpy as np
import theano.tensor as T
# convert csv into format readable by rmn code
def load_data(span_path, metadata_path):
x = csv.DictReader(gzip.open(span_path, 'rb'))
wmap, cmap, bmap = cPickle.load(open(metadata_path, 'rb'))
max_len = -1
revwmap = dict((v,k) for (k,v) in wmap.iteritems())
revbmap = dict((v,k) for (k,v) in enumerate(bmap))
revcmap = dict((v,k) for (k,v) in cmap.iteritems())
span_dict = {}
for row in x:
text = row['Words'].split()
if len(text) > max_len:
max_len = len(text)
key = '___'.join([row['Book'], row['Char 1'], row['Char 2']])
if key not in span_dict:
span_dict[key] = []
span_dict[key].append([wmap[w] for w in text])
span_data = []
for key in span_dict:
book, c1, c2 = key.split('___')
book = np.array([revbmap[book], ]).astype('int32')
chars = np.array([revcmap[c1], revcmap[c2]]).astype('int32')
# convert spans to numpy matrices
spans = span_dict[key]
s = np.zeros((len(spans), max_len)).astype('int32')
m = np.zeros((len(spans), max_len)).astype('float32')
for i in range(len(spans)):
curr_span = spans[i]
s[i][:len(curr_span)] = curr_span
m[i][:len(curr_span)] = 1.
span_data.append([book, chars, s, m])
return span_data, max_len, wmap, cmap, bmap
def generate_negative_samples(num_traj, span_size, negs, span_data):
inds = np.random.randint(0, num_traj, negs)
neg_words = np.zeros((negs, span_size)).astype('int32')
neg_masks = np.zeros((negs, span_size)).astype('float32')
for index, i in enumerate(inds):
rand_ind = np.random.randint(0, len(span_data[i][2]))
neg_words[index] = span_data[i][2][rand_ind]
neg_masks[index] = span_data[i][3][rand_ind]
return neg_words, neg_masks
| mit | 8,096,248,828,738,338,000 | 35.690909 | 106 | 0.561447 | false | 3.062215 | false | false | false |
anksp21/Community-Zenpacks | ZenPacks.community.DellEqualLogicMon/ZenPacks/community/DellEqualLogicMon/DellEqualLogicVolume.py | 2 | 3866 | ################################################################################
#
# This program is part of the DellEqualLogicMon Zenpack for Zenoss.
# Copyright (C) 2010 Eric Enns.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
from Globals import DTMLFile, InitializeClass
from Products.ZenModel.OSComponent import *
from Products.ZenRelations.RelSchema import *
from Products.ZenModel.ZenossSecurity import *
from DellEqualLogicComponent import *
from Products.ZenUtils.Utils import convToUnits
from Products.ZenUtils.Utils import prepId
import logging
log = logging.getLogger("zen.DellEqualLogicVolume")
def manage_addVolume(context, id, userCreated, REQUEST=None):
svid = prepId(id)
sv = DellEqualLogicVolume(svid)
context._setObject(svid, sv)
sv = context._getOb(svid)
if userCreated: sv.setUserCreatedFlag()
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(context.absolute_url()+'/manage_main')
return sv
class DellEqualLogicVolume(OSComponent, DellEqualLogicComponent):
portal_type = meta_type = 'DellEqualLogicVolume'
caption = ""
volumeProvisionedSize = 0
volumeReservedSize = 0
thinProvisioned = 2
state = "OK"
_properties = OSComponent._properties + (
{'id':'caption', 'type':'string', 'mode':'w'},
{'id':'state', 'type':'string', 'mode':'w'},
{'id':'volumeProvisionedSize', 'type':'int', 'mode':'w'},
{'id':'volumeReservedSize', 'type':'int', 'mode':'w'},
{'id':'thinProvisioned', 'type':'int', 'mode':'w'},
)
_relations = OSComponent._relations + (
("os", ToOne(
ToManyCont,
"ZenPacks.community.DellEqualLogicMon.DellEqualLogicDevice.DellEqualLogicDeviceOS",
"volumes")),
)
factory_type_information = (
{
'id' : 'Volume',
'meta_type' : 'Volume',
'description' : """Arbitrary device grouping class""",
'icon' : 'StoragePool_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addVolume',
'immediate_view' : 'viewDellEqualLogicVolume',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewDellEqualLogicVolume'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'events'
, 'name' : 'Events'
, 'action' : 'viewEvents'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_DEVICE, )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
def reservedSize(self):
return self.volumeReservedSize or 0
def reservedSizeString(self):
return convToUnits(self.reservedSize(), divby=1024)
def provisionedSize(self):
return self.volumeProvisionedSize or 0
def provisionedSizeString(self):
return convToUnits(self.provisionedSize(), divby=1024)
def isThinProvisioned(self):
if (self.thinProvisioned == 1):
return "true"
else:
return "false"
# def getRRDNames(self):
# return ['Volume_Occupancy']
InitializeClass(DellEqualLogicVolume)
| gpl-2.0 | -4,979,848,674,276,066,000 | 32.617391 | 95 | 0.53104 | false | 3.981462 | false | false | false |
jodal/mopidy-gmusic | mopidy_gmusic/playlists.py | 1 | 4447 | from __future__ import unicode_literals
import logging
import operator
from mopidy import backend
from mopidy.models import Playlist, Ref
logger = logging.getLogger(__name__)
class GMusicPlaylistsProvider(backend.PlaylistsProvider):
def __init__(self, *args, **kwargs):
super(GMusicPlaylistsProvider, self).__init__(*args, **kwargs)
self._radio_stations_as_playlists = (
self.backend.config['gmusic']['radio_stations_as_playlists'])
self._radio_stations_count = (
self.backend.config['gmusic']['radio_stations_count'])
self._radio_tracks_count = (
self.backend.config['gmusic']['radio_tracks_count'])
self._playlists = {}
def as_list(self):
refs = [
Ref.playlist(uri=pl.uri, name=pl.name)
for pl in self._playlists.values()]
return sorted(refs, key=operator.attrgetter('name'))
def get_items(self, uri):
playlist = self._playlists.get(uri)
if playlist is None:
return None
return [Ref.track(uri=t.uri, name=t.name) for t in playlist.tracks]
def create(self, name):
pass # TODO
def delete(self, uri):
pass # TODO
def lookup(self, uri):
return self._playlists.get(uri)
def refresh(self):
playlists = {}
# add thumbs up playlist
tracks = []
for track in self.backend.session.get_promoted_songs():
trackId = None
if 'trackId' in track:
trackId = track['trackId']
elif 'storeId' in track:
trackId = track['storeId']
if trackId:
tracks += self.backend.library.lookup(
'gmusic:track:' + trackId)
if len(tracks) > 0:
uri = 'gmusic:playlist:promoted'
playlists[uri] = Playlist(uri=uri, name='Promoted', tracks=tracks)
# load user playlists
for playlist in self.backend.session.get_all_user_playlist_contents():
tracks = []
for track in playlist['tracks']:
if not track['deleted']:
tracks += self.backend.library.lookup('gmusic:track:' +
track['trackId'])
uri = 'gmusic:playlist:' + playlist['id']
playlists[uri] = Playlist(uri=uri,
name=playlist['name'],
tracks=tracks)
# load shared playlists
for playlist in self.backend.session.get_all_playlists():
if playlist.get('type') == 'SHARED':
tracks = []
tracklist = self.backend.session.get_shared_playlist_contents(
playlist['shareToken'])
for track in tracklist:
tracks += self.backend.library.lookup('gmusic:track:' +
track['trackId'])
uri = 'gmusic:playlist:' + playlist['id']
playlists[uri] = Playlist(uri=uri,
name=playlist['name'],
tracks=tracks)
l = len(playlists)
logger.info('Loaded %d playlists from Google Music', len(playlists))
# load radios as playlists
if self._radio_stations_as_playlists:
logger.info('Starting to loading radio stations')
stations = self.backend.session.get_radio_stations(
self._radio_stations_count)
for station in stations:
tracks = []
tracklist = self.backend.session.get_station_tracks(
station['id'], self._radio_tracks_count)
for track in tracklist:
tracks += self.backend.library.lookup('gmusic:track:' +
track['nid'])
uri = 'gmusic:playlist:' + station['id']
playlists[uri] = Playlist(uri=uri,
name=station['name'],
tracks=tracks)
logger.info('Loaded %d radios from Google Music',
len(playlists) - l)
self._playlists = playlists
backend.BackendListener.send('playlists_loaded')
def save(self, playlist):
pass # TODO
| apache-2.0 | -6,140,924,827,346,596,000 | 37.336207 | 79 | 0.515179 | false | 4.473843 | false | false | false |
robertavram/Linux-Server-Configuration | FlaskApp/secret_keys.py | 1 | 1363 | CSRF_SECRET_KEY, SESSION_KEY = "0h97kel3aq17853645odikh97kel3aq4vndtonignnobfjh", "3aq4vnd4vndtonignnt801785onignnob"
# Google APIs
GOOGLE_APP_ID = '768017853645-odikh97kel3aq4vndtonignnobfjhkea.apps.googleusercontent.com'
GOOGLE_APP_SECRET = 'gb2X0NdP36xF-2kmj_S2IN3U'
#GOOGLE_REDIRECT_URI = 'http://localhost:5000/auth/google/callback'
#GOOGLE_REDIRECT_URI = 'http://www.flutterhub.com/auth/google/callback'
GOOGLE_REDIRECT_URI = 'http://52.27.185.214/auth/google/callback'
# Facebook auth apis
FB_APP_ID = '382093888646657'
FB_APP_SECRET = '2ba3373b14a801141d26c32bf9c9b205'
#FB_REDIRECT_URI = "http://localhost:5000/auth/facebook/callback"
#FB_REDIRECT_URI = "http://www.flutterhub.com/auth/facebook/callback"
FB_REDIRECT_URI = "http://52.27.185.214/auth/facebook/callback"
# Key/secret for both LinkedIn OAuth 1.0a and OAuth 2.0
# https://www.linkedin.com/secure/developer
LINKEDIN_KEY = 'consumer key'
LINKEDIN_SECRET = 'consumer secret'
# https://manage.dev.live.com/AddApplication.aspx
# https://manage.dev.live.com/Applications/Index
WL_CLIENT_ID = 'client id'
WL_CLIENT_SECRET = 'client secret'
# https://dev.twitter.com/apps
TWITTER_CONSUMER_KEY = 'oauth1.0a consumer key'
TWITTER_CONSUMER_SECRET = 'oauth1.0a consumer secret'
# https://foursquare.com/developers/apps
FOURSQUARE_CLIENT_ID = 'client id'
FOURSQUARE_CLIENT_SECRET = 'client secret'
| apache-2.0 | -3,014,239,941,012,855,300 | 40.30303 | 117 | 0.775495 | false | 2.606119 | false | false | false |
davelab6/html2markdown | html2markdown.py | 1 | 38624 | #!/usr/bin/python
#
# html2markdown
# Copyright 2005 Dale Sedivec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# XXX
# TODO:
# * Change constant names to upper case.
# * Test wrapping of HTML in Markdown source with long attributes that
# have whitespace in their contents.
# * Should probably put non-breaking spaces in the middle of a
# Markdown image markup.
# * Stop all the interpolation and concatenation operations and take
# advantage of buffers more (use write not +)
# * In code, do a consistency check WRT indentation on continued
# statements.
# * Look at inline HTML in indented block elements (block quote, list,
# maybe code block)
# * Test CLI.
# * Check through for classes that are too big (refactoring)
# * Write test for <li>[whitespace]<p>...</p></li>. I'm not sure that
# Markdown will ever generate this, but it still looks likely to
# happen in hand-written HTML.
# * Make test with numeric entity to make sure handle_charref is
# implemented.
# * It's possible that (almost) everywhere we do an isinstance()
# check, we should really be doing some kind of hasFeature() check,
# hasFeature() being a method we implement? More flexible.
from HTMLParser import HTMLParser
from StringIO import StringIO
import logging
import textwrap
import re
import string
import inspect
import sys
from itertools import repeat, chain
WRAP_AT_COLUMN = 70
# XXX This is kind of dumb, really, since certain types of syntax
# demand certain types of indents. To parameterize this, we should
# probably find all indent instances, change them to this variable,
# then see what breaks with one indent or the other and hard code that
# particular indent.
MARKDOWN_INDENT = " "
log = logging.getLogger("html2markdown")
try:
any
except NameError:
def any(items):
for item in items:
if item:
return True
return False
def all(items):
for item in items:
if not item:
return False
return True
# XXX TEST this is not tested? Plus it probably doesn't belong here.
# At least document it.
# def getMyCaller(): #pragma: no cover
# try:
# callerFrame = inspect.getouterframes(inspect.currentframe())[2]
# return "%s:%d" % (callerFrame[3], callerFrame[2])
# finally:
# del callerFrame
class Box (object):
def __init__(self):
self.parent = None
def render(self, writer):
raise NotImplementedError("you must overload this") #pragma: no cover
width = property(fget=lambda self: self.parent.width)
class ContainerBox (Box):
def __init__(self):
super(ContainerBox, self).__init__()
self.children = []
def addChild(self, child):
self.children.append(child)
child.parent = self
def makeChild(self, childClass):
child = childClass()
self.addChild(child)
return child
class CompositeBox (ContainerBox):
def __init__(self, addNewLines=True):
super(CompositeBox, self).__init__()
self.__addNewLineAfterChild = []
self.__addNewLines = addNewLines
def addChild(self, child):
super(CompositeBox, self).addChild(child)
self.__addNewLineAfterChild.append(self.__addNewLines)
def insertNewLineAfterChild(self, childIndex):
assert childIndex >= 0, childIndex
self.__addNewLineAfterChild[childIndex] = True
def insertNewLineBeforeLastChild(self):
self.__addNewLineAfterChild[-2] = True
def render(self, writer):
if self.children:
assert len(self.__addNewLineAfterChild) == len(self.children)
addNewLine = iter(self.__addNewLineAfterChild)
self.children[0].render(writer)
for child in self.children[1:]:
if addNewLine.next():
writer("\n")
child.render(writer)
class RootBox (CompositeBox):
# Override the property set in a superclass. (XXX Is this the
# cleanest way to do this?)
width = None
def __init__(self, width):
super(RootBox, self).__init__()
self.width = width
def ijoin(iterable, joinString):
"""Yields joinString between items from iterable.
s.join(i) == "".join(ijoin(i, s))
"""
iterator = iter(iterable)
yield iterator.next()
for item in iterator:
yield joinString
yield item
class TextBox (Box):
def __init__(self):
self.__lines = [StringIO()]
def addText(self, text):
self.__lines[-1].write(text)
def addLineBreak(self):
self.__lines.append(StringIO())
def _iterLines(self):
for line in self.__lines:
yield line.getvalue()
def render(self, writer):
for string in ijoin(self._iterLines(), " \n"):
writer(string)
if string[-1] != "\n":
writer("\n")
class iterAllButLast (object):
def __init__(self, iterable):
self._iterator = iter(iterable)
def __iter__(self):
lastItem = self._iterator.next()
for item in self._iterator:
yield lastItem
lastItem = item
self.last = lastItem
class WrappedTextBox (TextBox):
__wordBoundaryRegexp = re.compile(r'(\s+)')
def render(self, writer):
def fill(line, lastLineSuffix=""):
return self.__fill(line, self.width, lastLineSuffix, writer)
lines = iterAllButLast(self._iterLines())
for line in lines:
writer(fill(line, " "))
writer(fill(lines.last))
# XXX REFACTOR I'd say refactor this, but right now I don't see a
# particularly clean way to do it.
#
# There should be a way, though. All this code seems so verbose,
# if not needlessly complex.
def __fill(self, text, width, lastLineSuffix, writer):
log.debug("fill text=%r suffix=%r" % (text, lastLineSuffix))
words = self.__splitTextIntoWordsAndSpaces(text)
firstSpace, firstWord = words.pop(0)
linePosition = self.__writeFirstWordOnLine(firstWord, writer)
for spaceBefore, word in words:
spaceLen = len(spaceBefore)
wordLen = len(word)
if (linePosition + spaceLen + wordLen) > width:
writer("\n")
self.__writeFirstWordOnLine(word, writer)
linePosition = wordLen
else:
writer(spaceBefore)
writer(word)
linePosition += spaceLen + wordLen
writer(lastLineSuffix)
writer("\n")
# The second grouping prevents **strong** from tripping this
# regular expression.
__beginningOfLineTokens = re.compile(r"^([0-9]+\.|[*+-]([^*]|$)|#)")
def __writeFirstWordOnLine(self, word, writer):
"""Writes the first word using writer, adding escaping if needed.
Markdown assigns special meaning to certain tokens when they
appear at the beginning of a line. We have to esacpe these
special characters if they happen to appear at the beginning
of a line after a paragraph is wrapped. This function will
return the total number of characters written, which might be
bigger than len(word) if an escape character is added.
"""
wordLen = len(word)
tokenMatch = self.__beginningOfLineTokens.search(word)
if tokenMatch:
matchEndPosition = tokenMatch.end(1)
log.debug("word=%r matchEndPosition=%r" % (word, matchEndPosition))
writer(word[0:matchEndPosition - 1])
writer("\\")
writer(word[matchEndPosition - 1:])
return wordLen + 1
else:
log.debug("word=%r no match" % (word,));
writer(word)
return wordLen
def __splitTextIntoWordsAndSpaces(self, text):
"""
Builds and returns a list of tuples in the form (space
before word, word), where the spaces and words are determined
by splitting text on word boundaries. This is used primarily
by the fill() method.
"""
log.debug("splitTextIntoWordsAndSpaces: text=%r" % (text,))
parts = self.__wordBoundaryRegexp.split(text)
log.debug("splitTextIntoWordsAndSpaces: normalizing %r" % (parts,))
self.__normalizeSplitTextParts(parts)
log.debug("splitTextIntoWordsAndSpaces: after normalizing %r"
% (parts,))
words = []
lastWord = ""
for spaceBefore, word in zip(parts[::2], parts[1::2]):
spaceBefore = self.__normalizeWordSpacing(spaceBefore, lastWord)
words.append((spaceBefore, word))
lastWord = word
return words
def __normalizeWordSpacing(self, spaceBefore, precedingWord):
# If the input is "foo.\nbar" you'll end up with "foo. bar"
# even if you separate your sentences with two spaces. I'm
# not inclined to do anything to fix this until someone really
# bitches about it. Also, two spaces are "safer" than one in
# the case of (for example) "Mr.\nSmith".
if spaceBefore[0:2] == " " and precedingWord[-1] in ".?!:":
spaceBefore = " "
else:
spaceBefore = " "
return spaceBefore
def __normalizeSplitTextParts(self, parts):
"""
This method makes sure that the parts list is a list of space,
word, space, word, space, word, ... The first element in the
list will always be the empty string (an empty space).
This method is used by the wrapping code.
"""
if parts[0] == "":
del parts[1]
else:
parts.insert(0, "")
if parts[-1] == "":
del parts[-2:]
assert (len(parts) % 2) == 0, "List normalizing failed: %r" % (parts,)
class IndentedBox (ContainerBox):
def __init__(self, indent, firstLineIndent=None):
super(IndentedBox, self).__init__()
self.__indentLength = len(indent)
self.__subsequentLineIndent = indent
if firstLineIndent is not None:
assert len(firstLineIndent) == self.__indentLength
self.__firstLineIndent = firstLineIndent
else:
self.__firstLineIndent = indent
def render(self, writer):
childRendering = StringIO()
self.__renderChildren(childRendering.write)
self.__rewindFile(childRendering)
self.__renderLinesFromFile(childRendering, writer)
def __renderLinesFromFile(self, childRendering, writer):
indentGenerator = chain([self.__firstLineIndent],
repeat(self.__subsequentLineIndent))
for line in childRendering:
indent = indentGenerator.next()
if self.__isBlankLine(line):
indent = indent.rstrip()
writer(indent)
writer(line)
def __isBlankLine(self, line):
return not line.rstrip("\r\n")
def __rewindFile(self, childRendering):
childRendering.seek(0)
def __renderChildren(self, writer):
for child in self.children:
child.render(writer)
def _getWidth(self):
return super(IndentedBox, self).width - self.__indentLength
width = property(fget=_getWidth)
class RawTextBox (TextBox):
"""A TextBox whose contents shouldn't have Markdown elements escaped."""
pass
# Based on DOM. Should probably refer to this as MDDOM (Markdown
# DOM). I think I used "micro-DOM" somewhere else.
class Node (object):
def __init__(self):
self.parent = None
class ContainerNode (Node):
def __init__(self):
super(ContainerNode, self).__init__()
self.children = []
def makeChild(self, type):
child = type()
self.addChild(child)
return child
def addChild(self, child):
self.children.append(child)
child.parent = self
# An InlineNode is a Node that does not render to a Box, but rather
# modifies the Box inside which it occurs. Currently this is used to
# mark Nodes whose transformation requires a Box that supports
# addText().
class InlineNode (Node):
pass
# A TextContainer is a ContainerNode that may also hold
# TextRelatedNodes. The HTML parser will ignore text that occurs
# outside of a TextContainer.
class TextContainer (ContainerNode):
pass
class InlineTextContainer (InlineNode, TextContainer):
pass
class Text (InlineNode):
def __init__(self, text=""):
super(Node, self).__init__()
self.text = text
class Document (ContainerNode):
pass
class List (ContainerNode):
pass
class OrderedList (List):
def getChildIndex(self, child):
return self.children.index(child)
class UnorderedList (List):
pass
class ListItem (TextContainer):
def getItemNumber(self):
# This method is only valid when this is an item in an
# OrderedList. Obviously.
return self.parent.getChildIndex(self) + 1
class BlockQuote (ContainerNode):
pass
class Paragraph (TextContainer):
pass
class Preformatted (TextContainer):
pass
class HTML (TextContainer):
pass
class Code (InlineTextContainer):
pass
class Emphasized (InlineTextContainer):
pass
class Strong (InlineTextContainer):
pass
class LineBreak (InlineNode):
pass
class Image (InlineNode):
def __init__(self, url, alternateText=None, title=None):
super(Image, self).__init__()
self.url = url
self.alternateText = alternateText
self.title = title
class Heading (TextContainer):
def __init__(self, level):
super(Heading, self).__init__()
self.level = level
class HorizontalRule (Node):
pass
class Anchor (InlineTextContainer):
def __init__(self, url, title=None):
super(Anchor, self).__init__()
self.url = url
self.title = title
class UnknownInlineElement (InlineTextContainer):
def __init__(self, tag, attributes):
super(UnknownInlineElement, self).__init__()
self.tag = tag
self.attributes = attributes
class MarkdownTransformer (object):
__formattingCharactersRegexp = re.compile(r"((?<=\S)([*_])|([*_])(?=\S))")
def transform(self, document):
rootBox = RootBox(width=WRAP_AT_COLUMN)
self.__dispatchChildren(document, rootBox)
return rootBox
def __dispatch(self, node, parentBox):
log.debug("Dispatching node=%r parentBox=%r" % (node, parentBox))
if isinstance(node, List):
nodeTypeName = "List"
else:
nodeTypeName = type(node).__name__
getattr(self, "_transform" + nodeTypeName)(node, parentBox)
# self.__handlers[type(node)](self, node, parentBox)
def __dispatchChildren(self, node, parentBox):
self.__dispatchList(node.children, parentBox)
def __dispatchList(self, nodeList, parentBox):
for node in nodeList:
self.__dispatch(node, parentBox)
def _transformParagraph(self, node, parentBox):
box = parentBox.makeChild(WrappedTextBox)
self.__dispatchChildren(node, box)
def _transformBlockQuote(self, node, parentBox):
indentedBox = IndentedBox(indent="> ")
parentBox.addChild(indentedBox)
dividedBox = indentedBox.makeChild(CompositeBox)
self.__dispatchChildren(node, dividedBox)
def _transformPreformatted(self, node, parentBox):
indentedBox = IndentedBox(indent=MARKDOWN_INDENT)
parentBox.addChild(indentedBox)
textBox = indentedBox.makeChild(TextBox)
self.__dispatchChildren(node, textBox)
def _transformText(self, node, parentBox):
if isinstance(node.parent, (HTML, Preformatted, Code)) \
or isinstance(parentBox, RawTextBox):
text = node.text
else:
text = self.__escapeFormattingCharacters(node.text)
parentBox.addText(text)
def __escapeFormattingCharacters(self, data):
escapedData = data.replace("\\", "\\\\")
escapedData = self.__formattingCharactersRegexp.sub(r"\\\1",
escapedData)
return escapedData
def _transformList(self, node, parentBox):
box = CompositeBox(addNewLines=False)
parentBox.addChild(box)
self.__dispatchChildren(node, box)
self.__addExplicitParagraphsInList(node, box)
# XXX REFACTOR if you dare. The list code (here and ListItem
# processing) is nigh incomprehensible. Of course, I can't even
# figure out how to simplify this function since the way it
# figures out where to put explicit paragraphs is so arcane (and
# the rules for how to generate <p></p> are, shall we say,
# "tedious").
def __addExplicitParagraphsInList(self, node, box):
paragraphAnalysis = []
for listItem in node.children:
isSingleParagraph = False
if isinstance(listItem.children[0], Paragraph):
isSingleParagraph = True
for child in listItem.children[1:]:
if isinstance(child, List):
break
elif not isinstance(child, Text):
isSingleParagraph = False
break
paragraphAnalysis.append(isSingleParagraph)
log.debug("paragraphAnalysis=%r" % (paragraphAnalysis,))
consecutiveSingleParas = 0
for childIndex, isSingleParagraph in enumerate(paragraphAnalysis):
if isSingleParagraph:
consecutiveSingleParas += 1
if consecutiveSingleParas >= 2:
box.insertNewLineAfterChild(childIndex - 1)
else:
if consecutiveSingleParas == 1:
if any([ isinstance(n, List) for n
in node.children[childIndex - 1].children ]):
# A List node's children can only be
# ListItems, and a ListItem always generates
# an outer CompositeBox, so box.children are
# all CompositeBoxes.
box.children[childIndex - 1].insertNewLineAfterChild(0)
else:
box.insertNewLineBeforeLastChild()
consecutiveSingleParas = 0
# XXX Near exact copy of above code.
if consecutiveSingleParas == 1:
if any([ isinstance(n, List) for n
in node.children[childIndex].children ]):
box.children[childIndex].insertNewLineAfterChild(0)
else:
box.insertNewLineBeforeLastChild()
# XXX REFACTOR
def _transformListItem(self, node, parentBox):
BOX_AT_BULLET_LEVEL = 1
BOX_AT_LIST_ITEM_LEVEL = 2
outerBox = CompositeBox(addNewLines=False)
parentBox.addChild(outerBox)
# XXX This code to determine indents will have a tendency to
# not work right if you want to make MARKDOWN_INDENT = "\t"
# (for example).
bulletIndent = " "
if isinstance(node.parent, OrderedList):
number = "%d. " % (node.getItemNumber(),)
number = number + " " * (4 - len(number))
# XXX Should we use len(number) instead of 4 here? Are
# more than four spaces on continued lines fine with
# Markdown?
indentedBox = IndentedBox(firstLineIndent=number,
indent=bulletIndent)
else:
indentedBox = IndentedBox(firstLineIndent="* ",
indent=bulletIndent)
outerBox.addChild(indentedBox)
innerBox = indentedBox.makeChild(CompositeBox)
children = node.children[:]
# The first child has to be in the indent box that has the
# list bullet.
if isinstance(children[0], InlineNode):
# A ListItem that starts with text can only have text or
# nested lists under it. I think.
log.debug("List item dispatching text children")
textBox = innerBox.makeChild(WrappedTextBox)
while children and isinstance(children[0], InlineNode):
self.__dispatch(children.pop(0), textBox)
elif isinstance(children[0], List):
# Immediate sublist.
listIndentBox = IndentedBox(indent=MARKDOWN_INDENT)
innerBox.addChild(listIndentBox)
self.__dispatch(children.pop(0), listIndentBox)
else:
self.__dispatch(children.pop(0), innerBox)
innerBoxType = BOX_AT_BULLET_LEVEL
for child in children:
if isinstance(child, Text):
# Ignore whitespace that occurs between elements.
continue
elif isinstance(child, (Preformatted, List)):
if innerBoxType != BOX_AT_LIST_ITEM_LEVEL:
innerBox = IndentedBox(indent=MARKDOWN_INDENT)
outerBox.addChild(innerBox)
if isinstance(child, Preformatted):
outerBox.insertNewLineBeforeLastChild()
innerBoxType = BOX_AT_LIST_ITEM_LEVEL
else:
if innerBoxType != BOX_AT_BULLET_LEVEL:
indentedBox = IndentedBox(indent=bulletIndent)
outerBox.addChild(indentedBox)
outerBox.insertNewLineBeforeLastChild()
innerBox = indentedBox.makeChild(CompositeBox)
innerBoxType = BOX_AT_BULLET_LEVEL
self.__dispatch(child, innerBox)
# XXX Might want to factor out this pattern.
def _transformHTML(self, node, parentBox):
box = parentBox.makeChild(TextBox)
self.__dispatchChildren(node, box)
__backtickRegexp = re.compile("`+")
def _transformCode(self, node, parentBox):
contents = self.__renderChildren(node)
codeDelimiter = self.__makeCodeDelimiter(contents)
parentBox.addText(codeDelimiter)
if contents[0] == "`":
parentBox.addText(" ")
parentBox.addText(contents)
if contents[-1] == "`":
parentBox.addText(" ")
parentBox.addText(codeDelimiter)
def __makeCodeDelimiter(self, content):
"""Returns the correct number of backticks to set off string as code.
Markdown requires you to use at least one more backtick to
introduce/conclude a code span than there are backticks within
the code span. For example, if contents="foo ``date`` bar",
Markdown would require ``` to be used to begin/end the code
span for that string.
"""
matches = self.__backtickRegexp.findall(content)
if matches:
codeDelimiterLength = max([ len(m) for m in matches ]) + 1
else:
codeDelimiterLength = 1
return "`" * codeDelimiterLength
def _transformEmphasized(self, node, parentBox):
parentBox.addText("_")
self.__dispatchChildren(node, parentBox)
parentBox.addText("_")
def _transformLineBreak(self, node, parentBox):
parentBox.addLineBreak()
def _transformImage(self, node, parentBox):
parentBox.addText("![")
parentBox.addText(node.alternateText)
parentBox.addText("](")
parentBox.addText(node.url)
if node.title:
parentBox.addText(' "')
parentBox.addText(node.title)
parentBox.addText('"')
parentBox.addText(")")
def _transformHeading(self, node, parentBox):
box = parentBox.makeChild(TextBox)
box.addText("#" * node.level + " ")
self.__dispatchChildren(node, box)
box.addText(" " + node.level * "#")
def _transformHorizontalRule(self, node, parentBox):
box = parentBox.makeChild(TextBox)
box.addText("---")
def _transformAnchor(self, node, parentBox):
# Sometimes this renders the contents twice: once as "raw
# text" (no escaping of formatting characters) so we can match
# a URL that might have Markdown formatting characters in it
# (f.e. http://example.com/foo_bar_baz), and the second time
# with Markdown escaping if the contents aren't the same as
# the href.
linkContents = self.__renderChildren(node, boxType=RawTextBox)
url = node.url
isMailto = url.startswith("mailto:")
if linkContents == url or (isMailto and linkContents == url[7:]):
parentBox.addText("<")
parentBox.addText(linkContents)
parentBox.addText(">")
else:
parentBox.addText("[")
parentBox.addText(self.__renderChildren(node))
parentBox.addText("](")
parentBox.addText(url)
if node.title:
parentBox.addText(' "')
parentBox.addText(node.title)
parentBox.addText('"')
parentBox.addText(")")
def __renderChildren(self, node, boxType=TextBox):
textBox = boxType()
self.__dispatchChildren(node, textBox)
contents = StringIO()
textBox.render(contents.write)
return contents.getvalue().strip()
def _transformStrong(self, node, parentBox):
parentBox.addText("**")
self.__dispatchChildren(node, parentBox)
parentBox.addText("**")
def _transformUnknownInlineElement(self, node, parentBox):
write = parentBox.addText
write("<")
write(node.tag)
for name, value in node.attributes:
if '"' in value:
quotingChar = "'"
else:
quotingChar = '"'
write(" ")
write(name)
write('=')
write(quotingChar)
write(value)
write(quotingChar)
if node.children:
write(">")
self.__dispatchChildren(node, parentBox)
write("</")
write(node.tag)
write(">")
else:
write(" />")
# XXX TEST Should test this?
class LineNumberedBuffer (StringIO):
__eolRegexp = re.compile(r"(\r?\n)")
def __init__(self):
StringIO.__init__(self)
self.__linePositions = [0]
def write(self, string):
parts = self.__eolRegexp.split(string)
log.debug("LineNumberedBuffer write split parts=%r" % (parts,))
for part in parts:
StringIO.write(self, part)
if "\n" in part:
log.debug("new line at %d" % (self.tell(),))
self.__linePositions.append(self.tell())
log.debug("LineNumberedBuffer.write final pos=%d" % (self.tell(),))
def seekLinePosition(self, lineNumber, offset):
"""Seek to an offset from the start of line lineNumber.
The first line is 1, the first character on a line is 0. This
is in line with HTMLParser.getpos().
"""
position = self.__linePositions[lineNumber - 1] + offset
log.debug("seekLinePosition (%d,%d)=%d" % (lineNumber, offset,
position))
self.seek(position, 0)
log.debug("seekLinePosition tell=%d" % (self.tell(),))
assert self.tell() == position, "seekLinePosition failed"
# XXX Turn this into MDDOMParser, outputs MDDOM? Then you take the
# Document and ship it off to MarkdownTransformer. Should at least
# give this class a better name.
class MarkdownTranslator (HTMLParser):
__translatedEntities = {"amp": "&",
"lt": "<",
"gt": ">",
"quot": '"'}
__unsupportedBlockElements = ("dl", "div", "noscript", "form", "table",
"fieldset", "address")
def reset(self):
HTMLParser.reset(self)
self.__shouldOutputStack = [False]
self.__unsupportedElementDepth = 0
self.__unsupportedBlockStart = None
self.__input = LineNumberedBuffer()
self.__currentNode = Document()
def feed(self, text):
self.__input.write(text)
HTMLParser.feed(self, text)
def handle_starttag(self, tag, attrs):
if self.__unsupportedElementDepth:
self.__unsupportedElementDepth += 1
elif tag == "code" \
and isinstance(self.__currentNode,
Preformatted) \
and len(self.__currentNode.children) == 0:
# Special case: ignore <code> immediately following <pre>.
# Markdown emits <pre><code>...</code></pre> for a
# preformatted text block.
#
# XXX In the interest of moving to just a DOM HTML parser,
# I think I support moving this logic to
# MarkdownTransformer.
pass
else:
# XXX REFACTOR
element = None
handler = self.__recognizedTags.get(tag)
if handler:
if not isinstance(handler, type):
element = handler(self, tag, attrs)
isBlock = handler.isBlock
elif attrs:
isBlock = not issubclass(handler, InlineNode)
else:
element = self.__currentNode.makeChild(handler)
else:
isBlock = tag in self.__unsupportedBlockElements
if not element and not isBlock:
element = UnknownInlineElement(tag, attrs)
self.__currentNode.addChild(element)
if element:
self.__currentNode = element
self.__shouldOutputStack.append(isinstance(element,
TextContainer))
else:
self.__enterUnsupportedBlockElement()
def handle_endtag(self, tag):
log.debug("Leaving tag=%r" % (tag,))
if self.__unsupportedElementDepth:
log.debug("Leaving unsupported element")
self.__leaveUnsupportedElement()
elif tag == "code" and isinstance(self.__currentNode,
Preformatted):
# Special case for </code></pre>. See similar exception
# in handle_starttag() for explanation.
pass
else:
log.debug("Leaving element")
self.__leaveElement()
def __enterUnsupportedBlockElement(self):
self.__unsupportedElementDepth = 1
self.__unsupportedBlockStart = self.getpos()
# XXX REFACTOR
def __leaveUnsupportedElement(self):
self.__unsupportedElementDepth -= 1
log.debug("unsupportedBlockDepth=%r"
% (self.__unsupportedElementDepth,))
if not self.__unsupportedElementDepth:
log.debug("Finished with unsupported block element");
log.debug("positions begin=%r end=%r"
% (self.__unsupportedBlockStart, self.getpos()))
html = self.__getUnsupportedBlockElementHTML()
htmlNode = self.__currentNode.makeChild(HTML)
htmlNode.addChild(Text(html))
self.__positionInputBufferAtEnd()
# XXX Maybe refactor -- or rename to something shorter (applies to
# all methods following this naming convention).
def __getUnsupportedBlockElementHTML(self):
"""Side effect: repositions self.__input."""
endPosition = self.__getEndOfTagPosition(self.getpos())
self.__input.seekLinePosition(*self.__unsupportedBlockStart)
startPosition = self.__input.tell()
htmlLength = endPosition - startPosition
log.debug("endPosition=%d startPosition=%d len=%d"
% (endPosition, startPosition, htmlLength))
html = StringIO()
html.write(self.__input.read(htmlLength))
html.write("\n")
return html.getvalue()
def __getEndOfTagPosition(self, startAt):
"""Side effect: repositions self.__input."""
self.__input.seekLinePosition(*startAt)
self.__searchInputForTagClose()
return self.__input.tell()
def __searchInputForTagClose(self):
# XXX expensive debugging statement
log.debug("searchInputForTagClose pos=%d input=%r"
% (self.__input.tell(),
self.__input.getvalue()))
while True:
nextCharacter = self.__input.read(1)
if not nextCharacter:
assert False, "premature tag end in input" #pragma: no cover
elif nextCharacter == ">":
break
def __positionInputBufferAtEnd(self):
self.__input.seek(0, 2)
def __leaveElement(self):
assert len(self.__shouldOutputStack) > 1
self.__shouldOutputStack.pop()
self.__currentNode = self.__currentNode.parent
# XXX REFACTOR
def _enterImg(self, tag, attributes):
if True not in map(lambda attr: attr[0] not in ("src", "alt", "title"),
attributes):
attributes = dict(attributes)
parameters = {"url": attributes["src"]}
if "alt" in attributes:
parameters["alternateText"] = attributes["alt"]
if "title" in attributes:
parameters["title"] = attributes["title"]
image = Image(**parameters)
self.__currentNode.addChild(image)
return image
else:
return None
_enterImg.isBlock = False
__numericEntityRegexp = re.compile("&#(x[0-9A-F]{2}|[0-9]{2,3});")
def __substituteNumericEntity(self, match):
return self.__translateNumericEntity(match.group(1))
def __translateNumericEntity(self, ref):
if ref[0] == "x":
value = int(ref[1:], 16)
else:
value = int(ref)
if self.__shouldDecodeNumericEntity(value):
return chr(value)
else:
return "&#%s;" % (ref,)
def __shouldDecodeNumericEntity(self, characterCode):
return 32 <= characterCode <= 126
def _enterA(self, tag, attributes):
if all([ attr[0] in ("href", "title") for attr in attributes ]):
attributes = dict(attributes)
# XXX REFACTOR This indentation/wrapping is ugly and looks
# unnecessary. Should think about reducing name lengths.
href = self.__numericEntityRegexp.sub(
self.__substituteNumericEntity, attributes["href"])
anchor = Anchor(href, title=attributes.get("title", None))
self.__currentNode.addChild(anchor)
return anchor
else:
return None
_enterA.isBlock = False
# XXX TEST <h*> with attributes.
def _enterHeading(self, tag, attributes):
level = int(tag[1:])
heading = Heading(level)
self.__currentNode.addChild(heading)
return heading
_enterHeading.isBlock = True
def __shouldOutput(self):
return self.__shouldOutputStack[-1]
def handle_data(self, data):
if self.__shouldOutput():
log.debug("output %r" % (data,))
self.__currentNode.addChild(Text(data))
def handle_entityref(self, name):
log.debug("entity=%r" % (name,))
if not self.__unsupportedElementDepth:
if name in self.__translatedEntities:
self.handle_data(self.__translatedEntities[name])
else:
self.handle_data("&%s;" % (name,))
def handle_charref(self, ref):
if not self.__unsupportedElementDepth:
self.handle_data(self.__translateNumericEntity(ref))
# XXX some day we should probably change this interface to write
# to a file, or to a callable
def getOutput(self):
assert isinstance(self.__currentNode, Document), `self.__currentNode`
log.debug(self.__renderTreeForDebug(self.__currentNode))
box = MarkdownTransformer().transform(self.__currentNode)
log.debug(self.__renderTreeForDebug(box))
result = StringIO()
box.render(result.write)
return result.getvalue()
# XXX OPTIMIZE Could short-circuit this code when debug is off, as
# an alternative to not calling it (log.debug("%s" %
# (__renderTreeForDebug(),))).
def __renderTreeForDebug(self, node):
result = StringIO()
result.write("(%s" % (node.__class__.__name__,))
if hasattr(node, "children"):
for child in node.children:
result.write(" ")
result.write(self.__renderTreeForDebug(child))
result.write(")")
return result.getvalue()
__recognizedTags = {"p": Paragraph,
"blockquote": BlockQuote,
"ol": OrderedList,
"ul": UnorderedList,
"li": ListItem,
"code": Code,
"em": Emphasized,
"pre": Preformatted,
"br": LineBreak,
"img": _enterImg,
"hr": HorizontalRule,
"a": _enterA,
"strong": Strong}
for level in range(1, 10):
__recognizedTags["h%d" % (level,)] = _enterHeading
def html2markdown(html):
return html2markdown_file(StringIO(html))
def html2markdown_file(inputFile):
translator = MarkdownTranslator()
for line in inputFile:
translator.feed(line)
translator.close()
return 0, translator.getOutput()
if __name__ == "__main__": #pragma: no cover
logging.basicConfig()
if len(sys.argv) > 1:
inputFile = open(sys.argv[1], "r")
else:
inputFile = sys.stdin
status, output = html2markdown_file(inputFile)
if status == 0:
sys.stdout.write(output)
sys.exit(status)
| gpl-2.0 | 6,418,462,569,879,237,000 | 35.130964 | 79 | 0.594061 | false | 4.293941 | false | false | false |
LLNL/spack | var/spack/repos/builtin/packages/py-joblib/package.py | 5 | 1234 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJoblib(PythonPackage):
"""Python function as pipeline jobs"""
homepage = "http://packages.python.org/joblib/"
url = "https://pypi.io/packages/source/j/joblib/joblib-0.14.0.tar.gz"
import_modules = [
'joblib', 'joblib.externals', 'joblib.externals.cloudpickle',
'joblib.externals.loky', 'joblib.externals.loky.backend'
]
version('0.14.0', sha256='6fcc57aacb4e89451fd449e9412687c51817c3f48662c3d8f38ba3f8a0a193ff')
version('0.13.2', sha256='315d6b19643ec4afd4c41c671f9f2d65ea9d787da093487a81ead7b0bac94524')
version('0.11', sha256='7b8fd56df36d9731a83729395ccb85a3b401f62a96255deb1a77220c00ed4085')
version('0.10.3', sha256='29b2965a9efbc90a5fe66a389ae35ac5b5b0c1feabfc7cab7fd5d19f429a071d')
version('0.10.2', sha256='3123553bdad83b143428033537c9e1939caf4a4d8813dade6a2246948c94494b')
version('0.10.0', sha256='49b3a0ba956eaa2f077e1ebd230b3c8d7b98afc67520207ada20a4d8b8efd071')
depends_on('py-setuptools', when='@0.14:', type='build')
| lgpl-2.1 | -6,032,124,726,619,497,000 | 44.703704 | 96 | 0.757699 | false | 2.45328 | false | false | false |
EclipseXuLu/DataHouse | DataHouse/crawler/master_avatar_crawler.py | 1 | 1477 | import os
import requests
URL_TEMPLATE = "http://yjsjy.hust.edu.cn/Uploadfiles/StudentPhoto/%s.jpg"
SAVE_TO_DIR_ROOT = "D:/HUST"
def mkdirs_if_not_exist(dir_name):
"""
create new folder if not exist
:param dir_name:
:return:
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def crawl_avatar(avatar_url):
response = requests.get(avatar_url, timeout=20)
if response.status_code != 404:
avatar_filename = avatar_url.split('/')[-1]
year = avatar_filename[0:4]
college = avatar_filename[4:7]
mkdirs_if_not_exist(os.path.join(SAVE_TO_DIR_ROOT, year, college))
with open(os.path.join(SAVE_TO_DIR_ROOT, year, college, avatar_filename), mode='wb') as f:
f.write(response.content)
f.flush()
f.close()
print('{0} has been downloaded...'.format(avatar_filename))
if __name__ == '__main__':
for year in [2008, 2009, 2010, 2011, 2012]:
for college in [_ for _ in range(301, 320)]:
for i in range(200):
if i < 10:
idx = str(year) + str(college) + "01000" + str(i)
elif 10 <= i < 100:
idx = str(year) + str(college) + "0100" + str(i)
else:
idx = str(year) + str(college) + "010" + str(i)
try:
crawl_avatar(URL_TEMPLATE % str(idx))
except:
pass
| mit | -7,978,295,527,017,499,000 | 29.142857 | 98 | 0.526066 | false | 3.334086 | false | false | false |
unclechu/pi-pedalboard | server.py | 1 | 5528 | #!/usr/bin/env python3
# pedalboard server
import socket
from threading import Thread
from gpiozero import Button
from signal import pause
from time import sleep, time
from radio import Radio
TCP_IP = '0.0.0.0'
TCP_PORT = 31415
ENC = 'UTF-8'
NEW_PRESS_DELAY = 0.3 # in seconds
CONNECTIONS_LIMIT = 5
buttons_map = [
(1, 2),
(2, 3),
(3, 4),
(4, 17),
(5, 27),
(6, 22),
(7, 10),
(8, 9),
(9, 11)
]
class BtnsThread(Thread):
is_dead = True
buttons = None
def __init__(self, radio):
self.is_dead = False
self.radio = radio
self.last_press_time = 0
self.is_released = True
super().__init__()
def __del__(self):
if self.is_dead: return
print('Stopping listening for buttons…')
if self.buttons is not None:
for btn in self.buttons:
btn[1].when_pressed = None
btn[1].when_released = None
del self.buttons
del self.radio
del self.last_press_time
del self.is_released
del self.is_dead
def pressed(self, n):
def f():
if time() - (self.last_press_time + NEW_PRESS_DELAY) <= 0: return
print('Pressed button #%d' % n)
self.last_press_time = time()
self.is_released = False
self.radio.trigger('button pressed', n=n)
return f
def released(self, n):
def f():
if self.is_released: return
print('Released button #%d' % n)
self.is_released = True
self.radio.trigger('button released', n=n)
return f
def run(self):
self.buttons = [(x[0], Button(x[1])) for x in buttons_map]
for btn in self.buttons:
btn[1].when_pressed = self.pressed(btn[0])
btn[1].when_released = self.released(btn[0])
print('Started buttons listening')
class SocketThread(Thread):
is_dead = True
def __init__(self, radio, conn, addr):
self.is_dead = False
self.radio = radio
self.conn = conn
self.addr = addr
self.radio.trigger('add connection', connection=self)
self.radio.on('close connections', self.__del__)
super().__init__()
def __del__(self):
if self.is_dead: return
self.radio.off('close connections', self.__del__)
self.radio.off('button pressed', self.send_pressed, soft=True)
self.radio.off('button released', self.send_released, soft=True)
self.conn.close()
self.radio.trigger('remove connection', connection=self)
print('Connection lost for:', self.addr)
del self.radio
del self.conn
del self.addr
del self.is_dead
def send_pressed(self, n):
try:
self.conn.send(bytes('button pressed|%d' % n, ENC))
print('Sent about button pressed to', self.addr)
except BrokenPipeError:
self.__del__()
def send_released(self, n):
try:
self.conn.send(bytes('button released|%d' % n, ENC))
print('Sent about button released to', self.addr)
except BrokenPipeError:
self.__del__()
def run(self):
print('Address connected:', self.addr)
self.radio.on('button pressed', self.send_pressed)
self.radio.on('button released', self.send_released)
class ConnectionsHandler:
is_dead = True
def __init__(self, radio):
self.is_dead = False
self.connections = []
self.radio = radio
self.radio.reply('opened connections count', self.get_connections_count)
self.radio.on('add connection', self.register_connection)
self.radio.on('remove connection', self.unregister_connection)
print('Started connections handling')
def __del__(self):
if self.is_dead: return
self.radio.stopReplying(
'opened connections count',
self.get_connections_count
)
self.radio.off('add connection', self.register_connection)
self.radio.off('remove connection', self.unregister_connection)
for conn in self.connections:
conn.__del__()
del conn
print('Stopped connections handling')
del self.connections
del self.radio
del self.is_dead
def register_connection(self, connection):
for conn in self.connections:
if conn == connection:
raise Exception('Connection already registered')
self.connections.append(connection)
def unregister_connection(self, connection):
new_connections = []
for conn in self.connections:
if conn != connection:
new_connections.append(conn)
if len(new_connections) == len(self.connections):
raise Exception('Connection not found to unregister')
elif len(new_connections) != len(self.connections) - 1:
raise Exception('More than one connection to unregister')
else:
self.connections = new_connections
def get_connections_count(self):
return len(self.connections)
radio = Radio()
btns = BtnsThread(radio)
btns.start()
conn_handler = ConnectionsHandler(radio)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(CONNECTIONS_LIMIT)
try:
print('Starting listening for socket connections…')
while True:
conn, addr = s.accept()
SocketThread(radio, conn, addr).start()
except (KeyboardInterrupt, SystemExit):
print('Exiting… Closing all connections…')
radio.trigger('close connections')
while True:
conns_count = radio.request('opened connections count')
if conns_count == 0: break
sleep(0.1)
conn_handler.__del__()
del conn_handler
btns.__del__()
del btns
radio.__del__()
del radio
s.shutdown(socket.SHUT_RDWR)
print('Done')
| gpl-3.0 | 5,056,181,114,897,626,000 | 23.317181 | 76 | 0.644565 | false | 3.552124 | false | false | false |
tobi2006/mysds | export/views.py | 1 | 84393 | from datetime import date
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseForbidden)
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.templatetags.static import static
from random import shuffle
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.platypus import (
SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer,
BaseDocTemplate, Frame, PageTemplate, Image)
from reportlab.platypus.flowables import PageBreak
from database.views import is_teacher, is_admin, is_student
from database.models import *
from feedback.models import *
from feedback.categories import *
from anonymous_marking.models import *
from mysds.unisettings import *
# The different marking categories are in feedback/categories.py
# Helper Functions
def logo():
"""Returns the university logo, unless it is not available"""
styles = getSampleStyleSheet()
url = "https://cccu.tobiaskliem.de/static/images/cccu.jpg"
try:
image = Image(url, 2.45*inch, 1*inch)
except IOError:
image = Paragraph(
"Canterbury Christ Church University", styles['Heading1'])
return image
def bold(string):
"""Adds <b> tags around a string"""
bold_string = '<b>' + string + '</b>'
return bold_string
def heading(string, headingstyle='Heading2'):
"""Returns a proper paragraph for the header line"""
styles = getSampleStyleSheet()
tmp = '<para alignment = "center">' + string + '</para>'
result = Paragraph(tmp, styles[headingstyle])
return result
def formatted_date(raw_date):
"""Returns a proper date string
This returns a string of the date in British Format.
If the date field was left blank, an empty string is returned.
"""
if raw_date is None:
result = ''
else:
result = (
str(raw_date.day) + '/' + str(raw_date.month) + '/' +
str(raw_date.year))
return result
def two_markers(marker1, marker2):
"""Returns a string containing two markers, sorted alphabetically"""
marker_1_sort = marker1.last_name + "/" + marker1.first_name
marker_2_sort = marker2.last_name + "/" + marker2.first_name
markers = [marker_1_sort, marker_2_sort]
markers.sort()
marker_1_list = markers[0].split("/")
marker_2_list = markers[1].split("/")
marker_1_return = marker_1_list[1] + ' ' + marker_1_list[0]
marker_2_return = marker_2_list[1] + ' ' + marker_2_list[0]
result = marker_1_return + ' / ' + marker_2_return
return result
def paragraph(string):
"""Returns a paragraph with normal style"""
styles = getSampleStyleSheet()
return Paragraph(string, styles['Normal'])
def bold_paragraph(string):
"""Returns a paragraph with bold formatting"""
styles = getSampleStyleSheet()
tmp = bold(string)
return Paragraph(tmp, styles['Normal'])
def get_title(module, assessment):
assessment_title_string = module.get_assessment_title(assessment)
assessment_title_string = assessment_title_string.replace("/", "or")
return assessment_title_string
# Different marksheets
def essay_sheet(student, module, assessment):
"""Marksheet for Essays
This is the standard marksheet for CCCU Law, including a marking grid
with four different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Essay')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def legal_problem_sheet(student, module, assessment):
"""Marksheet for Legal Problem Questions
This is the standard marksheet for CCCU Law, including a marking grid
with four different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Legal Problem')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_1'])
category_2 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_2'])
category_3 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_3'])
category_4 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def presentation_sheet(student, module, assessment):
"""Marksheet for Oral Presentations
This is the standard marksheet for individual presentations at
CCCU Law, including a marking grid with X different categories
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading('Law Undergraduate Assessment Sheet: Oral Presentation')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Presentation Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['PRESENTATION']['i_1'])
category_2 = paragraph(CATEGORIES['PRESENTATION']['i_2'])
category_3 = paragraph(CATEGORIES['PRESENTATION']['i_3'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date],
[assessment_title, '', '', ''],
[criteria, category_1, category_2, category_3]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (0, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def essay_legal_problem_sheet(student, module, assessment):
"""Marksheet for a cross between Essay and legal problem
This consists of the essay marksheet combined with the legal problem grid
and two different comment sections
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading(
'Law Undergraduate Assessment Sheet: Essay / Legal Problem')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
category_5 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_1'])
category_6 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_2'])
category_7 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_3'])
category_8 = paragraph(CATEGORIES['LEGAL_PROBLEM']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', '']]
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(Spacer(1, 5))
subtitle = Paragraph('Feedback for Part (a): Essay', styles['Heading3'])
elements.append(subtitle)
elements.append(Spacer(1, 5))
data = [[criteria, category_1, category_2, category_3, category_4]]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 1), (0, -1), colors.lightgrey),
('ALIGN', (1, 1), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(Spacer(1, 5))
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
part_1_mark_data = [[
Paragraph('Mark for part(a)', styles['Heading4']),
Paragraph(str(marksheet.part_1_mark), styles['Heading4'])]]
part_1_mark_table = Table(part_1_mark_data)
part_1_mark_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(part_1_mark_table)
elements.append(PageBreak())
heading_2 = Paragraph(
'Feedback for Part (b): Legal Problem', styles['Heading3'])
elements.append(heading_2)
elements.append(Spacer(1, 4))
data_2 = [[criteria, category_5, category_6, category_7, category_8]]
row = ['80 +']
if marksheet.category_mark_5 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 80:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['70 - 79']
if marksheet.category_mark_5 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 79:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['60 - 69']
if marksheet.category_mark_5 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 69:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['50 - 59']
if marksheet.category_mark_5 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 59:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['40 - 49']
if marksheet.category_mark_5 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 49:
row.append('X')
else:
row.append(' ')
data_2.append(row)
row = ['Under 40']
if marksheet.category_mark_5 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_6 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_7 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_8 == 39:
row.append('X')
else:
row.append(' ')
data_2.append(row)
t_2 = Table(data_2)
t_2.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 1), (0, -1), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(t_2)
elements.append(Spacer(1, 5))
comments_2 = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist_2 = marksheet.comments_2.split('\n')
for line in feedbacklist_2:
if line != "":
p = paragraph(line)
comments_2.append(p)
comments_2.append(Spacer(1, 4))
for comment in comments_2:
elements.append(comment)
part_2_mark_data = [[
Paragraph('Mark for part(b)', styles['Heading4']),
Paragraph(str(marksheet.part_2_mark), styles['Heading4'])
]]
part_2_mark_table = Table(part_2_mark_data)
part_2_mark_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(part_2_mark_table)
elements.append(Spacer(1, 10))
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Final Mark for (a) and (b)'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
last_data = [[marked_by_table, '', '', mark_table, '']]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, -1), (-1, -1))])
)
elements.append(last_table)
return elements
def online_test_court_report_sheet(student, module, assessment):
"""Marksheet for Online Test / Court Report
This is a custom marksheet that allows to combine a mark for an online
test with a court report. Essentially, it is the essay marksheet with
a few extra points.
"""
styles = getSampleStyleSheet()
elements = []
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment)
assessment_title = bold(module.get_assessment_title(assessment))
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 5))
title = heading(
'Law Undergraduate Assessment Sheet: Online Test / Court Report')
elements.append(title)
elements.append(Spacer(1, 5))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph(module.title)]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(marksheet.submission_date)
submission_date = [
paragraph('Submission Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
assessment_title = [
paragraph('Assessment Title'),
Spacer(1, 3),
paragraph(assessment_title)]
if module.get_assessment_max_wordcount(assessment):
tmp = (
str(module.get_assessment_max_wordcount(assessment)) +
' Words max.')
else:
tmp = ''
word_count = [
paragraph('Word Count'),
Spacer(1, 3),
bold_paragraph(tmp)]
criteria = paragraph('Criteria')
category_1 = paragraph(CATEGORIES['ESSAY']['i_1'])
category_2 = paragraph(CATEGORIES['ESSAY']['i_2'])
category_3 = paragraph(CATEGORIES['ESSAY']['i_3'])
category_4 = paragraph(CATEGORIES['ESSAY']['i_4'])
data = [
[last_name, '', first_name, ''],
[module_title, '', module_code, submission_date, ''],
[assessment_title, '', word_count, '', ''],
[criteria, category_1, category_2, category_3, category_4]
]
row = ['80 +']
if marksheet.category_mark_1 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 80:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 80:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['70 - 79']
if marksheet.category_mark_1 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 79:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 79:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['60 - 69']
if marksheet.category_mark_1 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 69:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 69:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['50 - 59']
if marksheet.category_mark_1 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 59:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 59:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['40 - 49']
if marksheet.category_mark_1 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 49:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 49:
row.append('X')
else:
row.append(' ')
data.append(row)
row = ['Under 40']
if marksheet.category_mark_1 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_2 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_3 == 39:
row.append('X')
else:
row.append(' ')
if marksheet.category_mark_4 == 39:
row.append('X')
else:
row.append(' ')
data.append(row)
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 0)),
('SPAN', (2, 0), (-1, 0)),
('SPAN', (0, 1), (1, 1)),
('SPAN', (3, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, 3), (-1, 3), colors.lightgrey),
('BACKGROUND', (0, 4), (0, 9), colors.lightgrey),
('ALIGN', (1, 4), (-1, -1), 'CENTER'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
])
)
elements.append(t)
comments = [
bold_paragraph('General Comments'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
comments.append(p)
comments.append(Spacer(1, 4))
for comment in comments:
elements.append(comment)
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Combined Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']
]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
court = 'Mark for Court Report: ' + str(marksheet.part_1_mark)
online = 'Mark for On Line Test: ' + str(marksheet.part_2_mark)
last_data = [
['', '', paragraph(court)],
['', '', paragraph(online)],
[marked_by_table, '', '', mark_table]]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (1, 1)),
('SPAN', (2, 0), (3, 0)),
('SPAN', (2, 1), (3, 1)),
('SPAN', (0, -1), (2, -1))
])
)
elements.append(last_table)
return elements
def negotiation_written_sheet(student, module, assessment):
"""Marksheet for the assessment 'Negotiation / Written Submission'
This is an assessment that includes a group component and is therefore
a little more complex.
"""
elements = []
styles = getSampleStyleSheet()
performance = Performance.objects.get(student=student, module=module)
marksheet = Marksheet.objects.get(
student=student, module=module, assessment=assessment
)
group_no = performance.group_assessment_group
group_feedback = GroupMarksheet.objects.get(
module=module, assessment=assessment, group_no=group_no
)
mark = str(performance.get_assessment_result(assessment))
elements.append(logo())
elements.append(Spacer(1, 3))
title = heading(
'Law Undergraduate Assessment Sheet: Negotiation Study', 'Heading3'
)
elements.append(title)
elements.append(Spacer(1, 3))
last_name = [
bold_paragraph('Student family name'),
Spacer(1, 3),
bold_paragraph(student.last_name)]
first_name = [
paragraph('First name'),
Spacer(1, 3),
bold_paragraph(student.first_name)]
module_title = [
paragraph('Module Title'),
Spacer(1, 3),
bold_paragraph('ELIM')]
module_code = [
paragraph('Module Code'),
Spacer(1, 3),
bold_paragraph(module.code)]
tmp = formatted_date(group_feedback.submission_date)
submission_date = [
paragraph('Presentation Date'),
Spacer(1, 3),
bold_paragraph(tmp)]
tmp = str(performance.seminar_group) + '/' + str(group_no)
group_number = [
paragraph('Seminar/LAU Group'),
Spacer(1, 3),
bold_paragraph(tmp)]
individual_category_1 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_1'])
individual_category_2 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_2'])
individual_category_3 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_3'])
individual_category_4 = bold_paragraph(
CATEGORIES['NEGOTIATION_WRITTEN']['i_4'])
group_category_1 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_1'])
group_category_2 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_2'])
group_category_3 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_3'])
group_category_4 = bold_paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['g_4'])
deduction_explanation = (
paragraph(CATEGORIES['NEGOTIATION_WRITTEN']['i_4_helptext']))
marker = marksheet.marker
if marksheet.second_first_marker:
marker2 = marksheet.second_first_marker
tmp = two_markers(marker, marker2)
else:
tmp = marker.first_name + ' ' + marker.last_name
marking_date = formatted_date(marksheet.marking_date)
marked_by = [
[paragraph('Marked by'), bold_paragraph(tmp)],
[paragraph('Date'), bold_paragraph(marking_date)]]
marked_by_table = Table(marked_by)
mark = [
[
paragraph('Mark'),
Paragraph(mark, styles['Heading1'])
],
['', '']]
mark_table = Table(mark)
mark_table.setStyle(TableStyle([('SPAN', (1, 0), (1, 1))]))
table_header_1 = bold_paragraph('Part 1: Assessed Negotiation')
table_header_2 = bold_paragraph('Marks Available')
table_header_3 = bold_paragraph('Marks Awarded')
part_1_subheader = bold_paragraph('1. Individual Work')
part_2_subheader = bold_paragraph('2. Group Work')
sub_total_1_string = bold_paragraph('Sub-Total Part 1')
sub_total_1 = 0
if marksheet.category_mark_1_free is not None:
sub_total_1 += marksheet.category_mark_1_free
if group_feedback.category_mark_1_free is not None:
sub_total_1 += group_feedback.category_mark_1_free
if group_feedback.category_mark_2_free is not None:
sub_total_1 += group_feedback.category_mark_2_free
table_header_4 = bold_paragraph(
'Part 2: Individual and Written Submission'
)
sub_total_2_string = paragraph('Sub-Total Part 2')
sub_total_2 = 0
if marksheet.category_mark_2_free is not None:
sub_total_2 += marksheet.category_mark_2_free
if marksheet.category_mark_3_free is not None:
sub_total_2 += marksheet.category_mark_3_free
if group_feedback.category_mark_3_free is not None:
sub_total_2 += group_feedback.category_mark_3_free
if group_feedback.category_mark_4_free is not None:
sub_total_2 += group_feedback.category_mark_4_free
deductions_h_1 = bold_paragraph('Deductions possible')
deductions_h_2 = bold_paragraph('Deductions incurred')
i_mark_1 = str(marksheet.category_mark_1_free)
i_mark_2 = str(marksheet.category_mark_2_free)
i_mark_3 = str(marksheet.category_mark_3_free)
i_mark_4 = str(marksheet.category_mark_4_free)
g_mark_1 = str(group_feedback.category_mark_1_free)
g_mark_2 = str(group_feedback.category_mark_2_free)
g_mark_3 = str(group_feedback.category_mark_3_free)
g_mark_4 = str(group_feedback.category_mark_4_free)
data = [[last_name, first_name, group_number, ''],
[module_title, module_code, submission_date, ''],
['', '', '', ''],
['', '', table_header_2, table_header_3],
[table_header_1, '', '', ''],
[part_1_subheader, '', '', ''],
[individual_category_1, '', '40', i_mark_1],
[part_2_subheader, '', '', ''],
[group_category_1, '', '10', g_mark_1],
[group_category_2, '', '10', g_mark_2],
[sub_total_1_string, '', '60', sub_total_1],
[table_header_4, '', '', ''],
[part_1_subheader, '', '', ''],
[individual_category_2, '', '10', i_mark_2],
[individual_category_3, '', '10', i_mark_3],
[part_2_subheader, '', '', ''],
[group_category_3, '', '10', g_mark_3],
[group_category_4, '', '10', g_mark_4],
[sub_total_2_string, '', '40', sub_total_2],
[individual_category_4, '', deductions_h_1, deductions_h_2],
[deduction_explanation, '', '12', i_mark_4]
]
t = Table(data)
t.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (-2, 0), (-1, 0)),
('SPAN', (-2, 1), (-1, 1)),
('SPAN', (0, 2), (-1, 2)),
('BOX', (0, 0), (-1, 1), 0.25, colors.black),
('SPAN', (0, 3), (1, 3)),
('SPAN', (0, 4), (1, 4)),
('SPAN', (0, 5), (1, 5)),
('SPAN', (0, 6), (1, 6)),
('SPAN', (0, 7), (1, 7)),
('SPAN', (0, 8), (1, 8)),
('SPAN', (0, 9), (1, 9)),
('BACKGROUND', (0, 10), (-1, 10), colors.lightgrey),
('SPAN', (0, 10), (1, 10)),
('SPAN', (0, 11), (1, 11)),
('SPAN', (0, 12), (1, 12)),
('SPAN', (0, 13), (1, 13)),
('SPAN', (0, 14), (1, 14)),
('SPAN', (0, 15), (1, 15)),
('SPAN', (0, 16), (1, 16)),
('SPAN', (0, 17), (1, 17)),
('SPAN', (0, 18), (1, 18)),
('SPAN', (0, 19), (1, 19)),
('SPAN', (0, 20), (1, 20)),
('BACKGROUND', (0, 18), (-1, 18), colors.lightgrey),
('BOX', (0, 3), (-1, -1), 0.25, colors.black)])
)
elements.append(t)
elements.append(PageBreak())
# Individual Comments
individual_comments = [
bold_paragraph('Comment on <u>Individual</u> Work for part 1 and 2'),
Spacer(1, 4)]
feedbacklist = marksheet.comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
individual_comments.append(p)
individual_comments.append(Spacer(1, 4))
# Group Comments
group_comments = [
bold_paragraph('Comment on <u>Group</u> Work for part 1 and 2'),
Spacer(1, 4)]
feedbacklist = group_feedback.group_comments.split('\n')
for line in feedbacklist:
if line != "":
p = paragraph(line)
group_comments.append(p)
group_comments.append(Spacer(1, 4))
# Final table
last_data = [
[individual_comments, '', '', ''],
[group_comments, '', '', ''],
[marked_by_table, '', mark_table, '']
]
last_table = Table(last_data)
last_table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('SPAN', (0, 0), (-1, 0)),
('SPAN', (0, 1), (-1, 1)),
('SPAN', (0, 2), (1, 2)),
('SPAN', (2, 2), (-1, 2)),
('BACKGROUND', (0, -1), (-1, -1), colors.lightgrey)])
)
elements.append(last_table)
return elements
# Functions called from website
@login_required
def export_feedback_sheet(request, code, year, assessment, student_id):
"""Will export either one or multiple feedback sheets.
This needs to be given the student id or the string 'all' if
you want all marksheets for the assessment. It will only work if
the person requesting is a teacher, an admin or the student the
marksheet is about.
"""
module = Module.objects.get(code=code, year=year)
assessment_title = get_title(module, assessment)
assessment_type = module.get_marksheet_type(assessment)
if student_id == 'all':
if is_teacher(request.user) or is_admin(request.user):
response = HttpResponse(mimetype='application/pdf')
first_part = module.title.replace(' ', '_')
second_part = assessment_title.replace(' ', '_')
filename_string = (
'attachment; filename=' + first_part +
'_' + second_part + '_-_all_marksheets.pdf')
all_students = module.student_set.all()
documentlist = []
students = [] # Only the students where feedback has been entered
for student in all_students:
try:
performance = Marksheet.objects.get(
student=student, module=module,
assessment=assessment)
students.append(student)
except Marksheet.DoesNotExist:
pass
for student in students:
if assessment_type == 'ESSAY':
elements = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
elements = legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'PRESENTATION':
elements = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
elements = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
elements = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
elements = negotiation_written_sheet(
student, module, assessment)
for element in elements:
documentlist.append(element)
documentlist.append(PageBreak())
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
document.setAuthor = 'Canterbury Christ Church University'
document.build(documentlist)
return response
else:
return HttpResponseForbidden()
else:
student = Student.objects.get(student_id=student_id)
own_marksheet = False # Just for the filename
allowed = False
if is_teacher(request.user) or is_admin(request.user):
allowed = True
elif is_student(request.user):
if student.belongs_to == request.user:
own_marksheet = True
allowed = True
if allowed:
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
assessment_title_string = get_title(module, assessment)
if own_marksheet:
first_part = module.title.replace(' ', '_')
second_part = assessment_title_string.replace(' ', '_')
filename_string = (
'attachment; filename=' + first_part + '_' +
second_part + '_Marksheet.pdf'
)
else:
ln = student.last_name.replace(' ', '_')
fn = student.first_name.replace(' ', '_')
filename_string = (
'attachment; filename=' + ln + '_' + fn + '.pdf'
)
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
document.setAuthor = 'Canterbury Christ Church University'
if assessment_type == 'ESSAY':
elements = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
elements = legal_problem_sheet(
student, module, assessment
)
elif assessment_type == 'PRESENTATION':
elements = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
elements = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
elements = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
elements = negotiation_written_sheet(
student, module, assessment)
document.build(elements)
return response
else:
return HttpResponseForbidden()
@login_required
@user_passes_test(is_teacher)
def export_attendance_sheet(request, code, year):
"""Returns attendance sheets for a module."""
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = (
'attachment; filename=attendance_sheet.pdf')
document = SimpleDocTemplate(response)
elements = []
module = Module.objects.get(code=code, year=year)
styles = getSampleStyleSheet()
next_year = str(module.year + 1)
heading = (
module.title + " (" + module.code + ") " + str(module.year) +
"/" + next_year)
performances = Performance.objects.filter(module=module)
no_of_seminar_groups = 0
for performance in performances:
if performance.seminar_group > no_of_seminar_groups:
no_of_seminar_groups = performance.seminar_group
counter = 0
while counter < no_of_seminar_groups:
counter += 1
subheading = "Seminar Group " + str(counter)
elements.append(Paragraph(heading, styles['Heading1']))
elements.append(Paragraph(subheading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Name']
column = 0
last_week = module.last_session + 1
no_teaching = module.no_teaching_in.split(",")
for week in range(module.first_session, last_week):
strweek = str(week)
if strweek not in no_teaching:
header.append(strweek)
data.append(header)
performances = Performance.objects.filter(
module=module, seminar_group=counter)
for performance in performances:
row = [performance.student]
for week in performance.attendance:
if week == '1':
row.append(u'\u2713')
elif week == 'e':
row.append('e')
else:
row.append(' ')
data.append(row)
table = Table(data)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BACKGROUND', (0, 0), (0, -1), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(PageBreak())
document.build(elements)
return response
@login_required
@user_passes_test(is_admin)
def export_all_anonymous_exam_marks(request, year):
"""Gives an overview of all anonymous marks in the year"""
modules = Module.objects.filter(year=year)
modules_to_use = []
for module in modules:
if module.exam_value:
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
if mark.exam:
modules_to_use.append(module)
break
response = HttpResponse(mimetype='application/pdf')
response['Content-Disposition'] = (
'attachment; filename=anonymous_exam_marks.pdf')
doc = BaseDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
frame1 = Frame(
doc.leftMargin, doc.bottomMargin, doc.width/2-6,
doc.height, id='col1')
frame2 = Frame(
doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
for module in modules_to_use:
heading = (
"Anonymous Marks for " + module.title + " (" +
str(module.year) + "/" + str(module.year + 1) + ")")
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Exam ID', 'Exam Mark']
data.append(header)
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
row = [mark.exam_id, mark.exam]
data.append(row)
table = Table(data, repeatRows=1)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(paragraph(datenow))
elements.append(PageBreak())
doc.addPageTemplates([PageTemplate(id='TwoCol', frames=[frame1, frame2])])
doc.build(elements)
return response
@login_required
@user_passes_test(is_teacher)
def export_anonymous_marks(request, code, year, assessment):
"""Gives an overview of anonymous marks for an assessment"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
module_string = module.title.replace(" ", "_")
filename_string = 'attachment; filename='
filename_string += module_string
filename_string += '.pdf'
response['Content-Disposition'] = filename_string
doc = BaseDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
frame1 = Frame(
doc.leftMargin, doc.bottomMargin, doc.width/2-6,
doc.height, id='col1')
frame2 = Frame(
doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
heading = (
"Anonymous Marks for " + module.title + " (" +
str(module.year) + "/" + str(module.year + 1) + ") - ")
if assessment == 'exam':
heading += "Exam"
else:
assessment = int(assessment)
heading += module.get_assessment_title(assessment)
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['Exam ID', 'Mark']
data.append(header)
marks = AnonymousMarks.objects.filter(module=module)
for mark in marks:
row = [mark.exam_id, mark.get_assessment_result(assessment)]
data.append(row)
table = Table(data, repeatRows=1)
table.setStyle(
TableStyle([
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)])
)
elements.append(table)
elements.append(Spacer(1, 20))
elements.append(paragraph(datenow))
doc.addPageTemplates([PageTemplate(id='TwoCol', frames=[frame1, frame2])])
doc.build(elements)
return response
@login_required
@user_passes_test(is_teacher)
def export_marks(request, code, year):
"""Gives a useful sheet of all marks for the module.
Students will be highlighted if they failed the module, or if a QLD
student failed a component in a Foundational module
"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
filename = module.title.replace(" ", "_")
filename += "_Marks_" + str(module.year) + ".pdf"
responsestring = 'attachment; filename=' + filename
response['Content-Disposition'] = responsestring
doc = SimpleDocTemplate(response)
doc.pagesize = landscape(A4)
elements = []
styles = getSampleStyleSheet()
d = formatted_date(date.today())
datenow = "Exported from MySDS, the CCCU Law DB on " + d
modulestring = (
module.title + ' (' + module.code + ') ' + str(module.year) + '/' +
str(module.year + 1)
)
heading = "Marks for " + modulestring
elements.append(Paragraph(heading, styles['Heading2']))
elements.append(Spacer(1, 20))
data = []
header = ['ID', 'Student', ' Programme', 'QLD']
assessment_range = []
if module.assessment_1_value:
title = (
module.assessment_1_title.strip() +
' (' +
str(module.assessment_1_value) +
'%)'
)
assessment_range.append('1')
header.append(paragraph(title))
if module.assessment_2_value:
title = (
module.assessment_2_title.strip() +
' (' +
str(module.assessment_2_value) +
'%)'
)
assessment_range.append('2')
header.append(paragraph(title))
if module.assessment_3_value:
title = (
module.assessment_3_title.strip() +
' (' +
str(module.assessment_3_value) +
'%)'
)
assessment_range.append('3')
header.append(paragraph(title))
if module.assessment_4_value:
title = (
module.assessment_4_title.strip() +
' (' +
str(module.assessment_4_value) +
'%)'
)
assessment_range.append('4')
header.append(paragraph(title))
if module.assessment_5_value:
title = (
module.assessment_5_title.strip() +
' (' +
str(module.assessment_5_value) +
'%)'
)
assessment_range.append('5')
header.append(paragraph(title))
if module.assessment_6_value:
title = (
module.assessment_6_title.strip() +
' (' +
str(module.assessment_6_value) +
'%)'
)
assessment_range.append('6')
header.append(paragraph(title))
if module.exam_value:
title = (
'Exam (' +
str(module.exam_value) +
'%)'
)
assessment_range.append('exam')
header.append(paragraph(title))
header.append('Total')
header.append('Notes')
data.append(header)
performances = Performance.objects.filter(module=module)
counter = 0
highlight = []
# This needs to be replaced once model changes
ls = Course.objects.get(
title='BSc (Hons) Legal Studies / Sport And Exercise Science')
llb = Course.objects.get(
title='LLB (Hons) Bachelor Of Law')
business = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Business Studies')
ac = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Criminology')
fi = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Forensic Investigation')
ir = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With International Relations')
soc = Course.objects.get(
title='LLB (Hons) Bachelor Of Law With Sociology')
# <<<
for performance in performances:
counter += 1
student = (
performance.student.last_name + ", " +
performance.student.short_first_name()
)
row = [performance.student.student_id, paragraph(student)]
# This needs to be replaced once model changes
if performance.student.course == llb:
course = 'LLB'
elif performance.student.course == business:
course = 'LLB/Business'
elif performance.student.course == ac:
course = 'LLB/AC'
elif performance.student.course == fi:
course = 'LLB/FI'
elif performance.student.course == ir:
course = 'LLB/IR'
elif performance.student.course == soc:
course = 'LLB/Sociology'
elif performance.student.course == ls:
course = 'LS/Sport'
else:
course = ''
row.append(course)
# <<<
if performance.student.qld:
row.append(u'\u2713')
else:
row.append(' ')
notes = ''
if performance.average < PASSMARK:
highlight_yellow = True
else:
highlight_yellow = False
highlight_red = False
for assessment in assessment_range:
concession = performance.get_concession(assessment)
assessment_title = module.get_assessment_title(assessment)
assessment_title = assessment_title.strip()
granted_or_pending = False
if concession == 'G':
granted_or_pending = True
if assessment == 'exam':
if len(notes) == 0:
notes = 'Sit exam'
else:
notes += ', sit exam'
else:
if len(notes) == 0:
notes = 'Submit ' + assessment_title
else:
notes += ', submit ' + assessment_title
if concession == 'P':
granted_or_pending = True
if assessment == 'exam':
if len(notes) == 0:
notes = 'Concession for exam pending'
else:
notes += ', concession for exam pending'
else:
if len(notes) == 0:
notes = (
'Concession for ' +
assessment_title +
' pending')
else:
notes += (
', concession for ' +
assessment_title +
' pending')
if performance.get_assessment_result(assessment):
row.append(performance.get_assessment_result(assessment))
if module.is_foundational and performance.student.qld:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Resubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if not highlight_yellow:
highlight_red = True
elif performance.average < PASSMARK:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Reubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
else:
row.append('-')
if module.is_foundational and performance.student.qld:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Resubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if not highlight_yellow:
highlight_red = True
elif performance.average < PASSMARK:
if (performance.get_assessment_result(assessment)
< PASSMARK):
if not granted_or_pending:
if assessment == 'exam':
if len(notes) == 0:
notes = 'Resit exam'
else:
notes += ', resit exam'
else:
if len(notes) == 0:
notes = 'Reubmit ' + assessment_title
else:
notes += ', resubmit ' + assessment_title
if performance.average:
row.append(performance.average)
else:
row.append('-')
highlight_yellow = True
notes_paragraph = paragraph(notes)
row.append(notes_paragraph)
data.append(row)
if highlight_yellow:
highlight.append((counter, 'y'))
if highlight_red:
highlight.append((counter, 'r'))
table = Table(data, repeatRows=1)
tablestyle = [
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.grey),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
]
for item in highlight:
if item[1] == 'r':
tablestyle.append(
('BACKGROUND', (0, item[0]), (-1, item[0]), colors.red)
)
if item[1] == 'y':
tablestyle.append(
('BACKGROUND', (0, item[0]), (-1, item[0]), colors.yellow)
)
table.setStyle(TableStyle(tablestyle))
elements.append(table)
elements.append(Spacer(1, 20))
elements.append(paragraph(datenow))
elements.append(PageBreak())
doc.build(elements)
return response
def sample_pack(request, code, year):
"""Prepares a nice sample pack for the external examiner"""
module = Module.objects.get(code=code, year=year)
response = HttpResponse(mimetype='application/pdf')
tmp = module.title.replace(" ", "_")
filename_string = 'attachment; filename=' + tmp + '_examiners_pack.pdf'
response['Content-Disposition'] = filename_string
document = SimpleDocTemplate(response)
elements = []
styles = getSampleStyleSheet()
performances = list(Performance.objects.filter(module=module))
samplesize = sample_size(len(performances))
per_range = round(samplesize / 5) # Fail, 40s, 50s, 60s, 70 +
sample = {}
for assessment in module.get_assessment_range():
shuffle(performances) # Make sure the marks are from all over
add = []
first = []
two_one = []
two_two = []
third = []
fail = []
leftover = [] # Needed if there are less than per_range in one
complete = False
for performance in performances:
mark = performance.get_assessment_result(assessment)
if mark:
if mark > 69:
if len(first) < per_range:
first.append(performance)
else:
leftover.append(performance)
elif mark > 59:
if len(two_one) < per_range:
two_one.append(performance)
else:
leftover.append(performance)
elif mark > 49:
if len(two_two) < per_range:
two_two.append(performance)
else:
leftover.append(performance)
elif mark > 39:
if len(third) < per_range:
third.append(performance)
else:
leftover.append(performance)
else:
if len(fail) < per_range:
fail.append(performance)
else:
leftover.append(performance)
this_sample = first + two_one + two_two + third + fail
while len(this_sample) < samplesize:
this_sample.append(leftover.pop())
this_sample.sort(
key=lambda x: x.get_assessment_result(assessment),
reverse=True)
sample[assessment] = this_sample
title = heading('Checklist, not part of the pack')
elements.append(title)
assessment_string = (
'Assessments (at the end, together with the marksheets included in ' +
'this bundle)')
data = [
[
bold_paragraph('Make sure to add the following to this pack'),
'', '', ''],
['The module handbook (after the title page)', '', '', ''],
[bold_paragraph(assessment_string), '', '', '']
]
headline = [0, 2]
only_one = [1]
counter = 2
for assessment in module.get_assessment_range():
if module.get_assessment_title(assessment) == 'Exam':
blind = True
else:
blind = False
newline = True
counter += 1
title = bold_paragraph(module.get_assessment_title(assessment))
headline.append(counter)
data.append([title, '', '', ''])
counter += 1
title = paragraph(
'Instructions for ' + module.get_assessment_title(assessment))
data.append([title, '', '', ''])
only_one.append(counter)
this_sample = sample[assessment]
for performance in this_sample:
if newline:
print "True"
counter += 1
if blind:
first_column = performance.student.exam_id
print first_column
else:
first_column = performance.student.__unicode__()
newline = False
else:
if blind:
data.append(
[
first_column,
'',
performance.student.exam_id,
''
])
else:
data.append(
[
first_column,
'',
performance.student.__unicode__(),
''
])
newline = True
t = Table(data, colWidths=(200, 20, 200, 20))
style = [
('BOX', (0, 1), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 1), (-1, -1), 0.25, colors.black),
]
for line in headline:
style.append(('SPAN', (0, line), (-1, line)))
for line in only_one:
style.append(('SPAN', (0, line), (-2, line)))
# for line in checkboxline:
# style.append(('BOX', (-1, line), (-1, line)))
t.setStyle(TableStyle(style))
elements.append(t)
# Title page
elements.append(PageBreak())
elements.append(Spacer(1, 100))
elements.append(logo())
elements.append(Spacer(1, 80))
title = heading(module.__unicode__(), 'Heading1')
elements.append(title)
elements.append(Spacer(1, 40))
if len(module.eligible) == 1:
tmp = 'Year ' + module.eligible
elif len(module.eligible) == 2:
tmp = 'Years ' + module.eligible[0] + ' and ' + module.eligible[1]
else:
tmp = (
'Years ' +
module.eligible[0] +
', ' +
module.eligible[1] +
' and ' +
module.eligible[2]
)
level = heading(tmp)
elements.append(level)
elements.append(Spacer(1, 40))
subtitle = heading('Exam Board Sample Pack')
elements.append(subtitle)
elements.append(PageBreak())
# Statistics page
title = heading('Module Marks')
elements.append(title)
elements.append(Spacer(1, 20))
no_of_first = 0
no_of_two_one = 0
no_of_two_two = 0
no_of_third = 0
no_of_fail = 0
for performance in performances:
result = performance.average
if result:
if result > 69:
no_of_first += 1
elif result > 59:
no_of_two_one += 1
elif result > 49:
no_of_two_two += 1
elif result > 39:
no_of_third += 1
else:
no_of_fail += 1
first_f = float(no_of_first)
two_one_f = float(no_of_two_one)
two_two_f = float(no_of_two_two)
third_f = float(no_of_third)
fail_f = float(no_of_fail)
first_percent = round(((first_f / len(performances)) * 100), 1)
two_one_percent = round(((two_one_f / len(performances)) * 100), 1)
two_two_percent = round(((two_two_f / len(performances)) * 100), 1)
third_percent = round(((third_f / len(performances)) * 100), 1)
fail_percent = round(((fail_f / len(performances)) * 100), 1)
data = []
data.append(['Range', 'Amount', 'Percentage'])
data.append(['70 +', no_of_first, first_percent])
data.append(['60-69', no_of_two_one, two_one_percent])
data.append(['50-59', no_of_two_two, two_two_percent])
data.append(['40-49', no_of_third, third_percent])
data.append(['Fail', no_of_fail, fail_percent])
t = Table(data)
style = [
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
]
t.setStyle(TableStyle(style))
elements.append(t)
elements.append(PageBreak())
for assessment in module.get_assessment_range():
this_sample = sample[assessment]
assessment_type = module.get_marksheet_type(assessment)
if assessment_type:
for performance in this_sample:
student = performance.student
if assessment_type == 'ESSAY':
marksheet = essay_sheet(student, module, assessment)
elif assessment_type == 'LEGAL_PROBLEM':
marksheet = legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'PRESENTATION':
marksheet = presentation_sheet(student, module, assessment)
elif assessment_type == 'ESSAY_LEGAL_PROBLEM':
marksheet = essay_legal_problem_sheet(
student, module, assessment)
elif assessment_type == 'ONLINE_TEST_COURT_REPORT':
marksheet = online_test_court_report_sheet(
student, module, assessment)
elif assessment_type == 'NEGOTIATION_WRITTEN':
marksheet = negotiation_written_sheet(
student, module, assessment)
else:
marksheet = False
if marksheet:
for element in marksheet:
elements.append(element)
elements.append(PageBreak())
document.build(elements)
return response
| gpl-3.0 | -2,231,097,893,349,251,000 | 33.815594 | 79 | 0.533682 | false | 3.724152 | false | false | false |
pdl30/pyngspipe | setup.py | 1 | 1055 | import os
from setuptools import setup, find_packages
setup(name='pyngspipe',
version='0.0.1',
description='pyngspipe is a pipeline for processing GEO NGS datasets based on the pyrnatools/pychiptools packages',
author='Patrick Lombard',
author_email='ptk.lmb55@gmail.com',
packages=find_packages(),
scripts=['scripts/pyngs_pipe.py', 'scripts/pyngs_report.py'],
package_data={"pyngspipe":['data/*']},
install_requires=['pysam', 'pybedtools'],
license='GPLv3',
platforms='any',
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Environment :: Console',
],
long_description="""
pyngspipe is a pipeline for processing GEO NGS datasets based on the pyrnatools/pychiptools packages
Contact
=============
If you have any questions or comments about pyngspipe, please feel free to contact me via
eMail: ptk.lmb55@gmail.com
""",
)
| gpl-2.0 | 147,680,674,970,054,800 | 31.96875 | 121 | 0.653081 | false | 3.754448 | false | false | false |
JaredKerim-Mozilla/leaderboard-server | leaderboard/fxa/authenticator.py | 2 | 3147 | import re
from django.conf import settings
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.authentication import (
get_authorization_header,
BaseAuthentication,
)
from leaderboard.fxa.client import FXAClientMixin, FXAException
from leaderboard.contributors.models import Contributor
# A regex which matches against a Bearer token
# http://self-issued.info/docs/draft-ietf-oauth-v2-bearer.html#authz-header
FXA_ACCESS_TOKEN_RE = re.compile('Bearer\s+(?P<token>[a-zA-Z0-9._~+\/\-=]+)')
class OAuthTokenAuthentication(FXAClientMixin, BaseAuthentication):
"""
Simple token based authentication for OAuth v2.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Bearer ". For example:
Authorization: Bearer 401f7ac837da42b97f613d789819ff93537bee6a
http://self-issued.info/docs/draft-ietf-oauth-v2-bearer.html#authz-header
"""
def authenticate(self, request):
auth_header = get_authorization_header(request)
if not auth_header:
msg = 'Missing token header.'
raise AuthenticationFailed(msg)
match = FXA_ACCESS_TOKEN_RE.match(auth_header)
if not match:
msg = 'Invalid token header. Must match: `Bearer <token>`.'
raise AuthenticationFailed(msg)
access_token = match.groupdict()['token']
try:
verify_data = self.fxa_client.verify_token(access_token)
except FXAException, e:
msg = (
'Unable to verify access token '
'with Firefox Accounts: {}'
).format(e)
raise AuthenticationFailed(msg)
client_id = verify_data.get('client_id', None)
if client_id != settings.FXA_CLIENT_ID:
msg = (
'Provided access token is not '
'valid for use with this service.'
)
raise AuthenticationFailed(msg)
fxa_uid = verify_data.get('user', None)
if fxa_uid is None:
msg = 'Unable to retrieve Firefox Accounts user id.'
raise AuthenticationFailed(msg)
try:
contributor = Contributor.objects.get(fxa_uid=fxa_uid)
except Contributor.DoesNotExist:
msg = 'No contributor found.'
raise AuthenticationFailed(msg)
try:
profile_data = self.fxa_client.get_profile_data(access_token)
except FXAException, e:
msg = (
'Unable to retrieve profile '
'data from Firefox Accounts: {}'
).format(e)
raise AuthenticationFailed(msg)
display_name = profile_data.get('displayName', None)
if display_name is not None and display_name != contributor.name:
contributor.name = display_name
contributor.save()
return (
contributor,
{
'access_token': access_token,
'profile_data': profile_data,
},
)
def authenticate_header(self, request):
return 'Token'
| mpl-2.0 | 5,073,975,312,316,815,000 | 31.443299 | 79 | 0.614236 | false | 4.168212 | false | false | false |
knowsis/django | tests/text/tests.py | 81 | 4256 | # coding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.encoding import iri_to_uri, force_text
from django.utils.functional import lazy
from django.utils.http import (cookie_date, http_date,
urlquote, urlquote_plus, urlunquote, urlunquote_plus)
from django.utils import six
from django.utils.text import get_text_list, smart_split
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
class TextTests(TestCase):
"""
Tests for stuff in django.utils.text and other text munging util functions.
"""
def test_get_text_list(self):
self.assertEqual(get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(get_text_list(['a']), 'a')
self.assertEqual(get_text_list([]), '')
with override('ar'):
self.assertEqual(get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(smart_split(test)), expected)
def test_urlquote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'),
'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"),
'Paris%20&%20Orl%C3%A9ans')
self.assertEqual(
urlunquote('Paris%20%26%20Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(
urlunquote('Paris%20&%20Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'),
'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"),
'Paris+&+Orl%C3%A9ans')
self.assertEqual(
urlunquote_plus('Paris+%26+Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
self.assertEqual(
urlunquote_plus('Paris+&+Orl%C3%A9ans'),
'Paris & Orl\xe9ans')
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_iri_to_uri(self):
self.assertEqual(iri_to_uri('red%09ros\xe9#red'),
'red%09ros%C3%A9#red')
self.assertEqual(iri_to_uri('/blog/for/J\xfcrgen M\xfcnster/'),
'/blog/for/J%C3%BCrgen%20M%C3%BCnster/')
self.assertEqual(iri_to_uri('locations/%s' % urlquote_plus('Paris & Orl\xe9ans')),
'locations/Paris+%26+Orl%C3%A9ans')
def test_iri_to_uri_idempotent(self):
self.assertEqual(iri_to_uri(iri_to_uri('red%09ros\xe9#red')),
'red%09ros%C3%A9#red')
| bsd-3-clause | -7,975,211,482,157,299,000 | 39.504762 | 90 | 0.540089 | false | 3.261503 | true | false | false |
JoshAshby/Fla.gr | app/controllers/flags/view.py | 1 | 1582 | #!/usr/bin/env python
"""
fla.gr controller for editing flags
For more information, see: https://github.com/JoshAshby/
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
joshuaashby@joshashby.com
"""
from seshat.route import autoRoute
from utils.baseHTMLObject import baseHTMLObject
from views.flags.flagViewTmpl import flagViewTmpl
from views.partials.flags.flagViewTmpl import flagViewTmpl as flagViewTmplPartial
import models.couch.flag.flagModel as fm
import models.couch.user.userModel as um
@autoRoute()
class flagsView(baseHTMLObject):
_title = "view flag"
def GET(self):
"""
"""
flagid = self.env["members"][0]
flag = fm.flagORM.getByID(flagid)
if not flag.visibility and flag.userID != self.session.id:
self.session.pushAlert("This is a private flag! Sorry but we \
can't let you see it.", "Hold it.", "error")
self.head = ("303 SEE OTHER", [("location", "/flags")])
return
flag.format()
flag["joineduserID"] = um.userORM.getByID(flag.userID)
view = flagViewTmpl(searchList=[self.tmplSearchList])
if self.env["cfg"].enableModalFlagDeletes:
view.scripts = ["handlebars_1.0.min",
"jquery.json-2.4.min",
"adminModal.flagr",
"editForm.flagr",
"deleteFlagModal.flagr"]
flagsTmpl = flagViewTmplPartial(searchList=[self.tmplSearchList])
flagsTmpl.flag = flag
view.flag = str(flagsTmpl)
return view
| mit | 9,159,416,367,571,800,000 | 26.754386 | 81 | 0.631479 | false | 3.476923 | false | false | false |
rclement/yodel | demo/custom_filter_design.py | 1 | 3658 | import yodel.analysis
import yodel.filter
import yodel.complex
import yodel.conversion
import matplotlib.pyplot as plt
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
def phase_response(spec_real, spec_imag, degrees=True):
size = len(spec_real)
pha = [0] * size
for i in range(0, size):
pha[i] = yodel.complex.phase(spec_real[i], spec_imag[i])
if degrees:
pha[i] = (pha[i] * 180.0 / math.pi)
return pha
class CustomFilterDesigner:
def __init__(self):
self.samplerate = 48000
self.framesize = 256
self.frsize = int((self.framesize/2)+1)
self.custom_fr = [1] * self.frsize
self.hzscale = [(i*self.samplerate) / (2.0*self.frsize) for i in range(0, self.frsize)]
self.flt = yodel.filter.Custom(self.samplerate, self.framesize)
self.pressed = None
self.update_filter()
self.create_plot()
def update_filter(self):
self.flt.design(self.custom_fr, False)
fr_re, fr_im = frequency_response(self.flt.ir)
self.fft_fr = amplitude_response(fr_re, fr_im, False)
def create_plot(self):
self.fig = plt.figure()
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onpress)
self.cid = self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.cid = self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion)
self.ax_custom_fr = self.fig.add_subplot(111)
self.ax_custom_fr.set_title('Custom Filter Design')
self.plot_custom_fr, = self.ax_custom_fr.plot(self.hzscale, self.custom_fr, 'r', label='Desired Frequency Response')
self.plot_fft_fr, = self.ax_custom_fr.plot(self.hzscale, self.fft_fr[0:self.frsize], 'b', label='Actual Frequency Response')
self.ax_custom_fr.legend()
self.ax_custom_fr.grid()
self.rescale_plot()
def rescale_plot(self):
self.ax_custom_fr.set_ylim(-1, 5)
plt.draw()
def onpress(self, event):
if event.inaxes != self.ax_custom_fr:
return
self.pressed = (event.xdata, event.ydata)
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
def onrelease(self, event):
self.pressed = None
def onmotion(self, event):
if self.pressed != None and event.xdata != None and event.ydata != None:
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
cfd = CustomFilterDesigner()
plt.show()
| mit | 2,789,876,935,842,057,700 | 32.254545 | 132 | 0.609076 | false | 3.137221 | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/healthcareservice.py | 1 | 11073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/HealthcareService) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class HealthcareService(domainresource.DomainResource):
""" The details of a healthcare service available at a location.
"""
resource_type = "HealthcareService"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether this healthcareservice is in active use.
Type `bool`. """
self.appointmentRequired = None
""" If an appointment is required for access to this service.
Type `bool`. """
self.availabilityExceptions = None
""" Description of availability exceptions.
Type `str`. """
self.availableTime = None
""" Times the Service Site is available.
List of `HealthcareServiceAvailableTime` items (represented as `dict` in JSON). """
self.category = None
""" Broad category of service being performed or delivered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.characteristic = None
""" Collection of characteristics (attributes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.comment = None
""" Additional description and/or any specific issues not covered
elsewhere.
Type `str`. """
self.coverageArea = None
""" Location(s) service is inteded for/available to.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.eligibility = None
""" Specific eligibility requirements required to use the service.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.eligibilityNote = None
""" Describes the eligibility conditions for the service.
Type `str`. """
self.endpoint = None
""" Technical endpoints providing access to services operated for the
location.
List of `FHIRReference` items referencing `Endpoint` (represented as `dict` in JSON). """
self.extraDetails = None
""" Extra details about the service that can't be placed in the other
fields.
Type `str`. """
self.identifier = None
""" External identifiers for this item.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Location(s) where service may be provided.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.name = None
""" Description of service as presented to a consumer while searching.
Type `str`. """
self.notAvailable = None
""" Not available during this time due to provided reason.
List of `HealthcareServiceNotAvailable` items (represented as `dict` in JSON). """
self.photo = None
""" Facilitates quick identification of the service.
Type `Attachment` (represented as `dict` in JSON). """
self.programName = None
""" Program Names that categorize the service.
List of `str` items. """
self.providedBy = None
""" Organization that provides this service.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.referralMethod = None
""" Ways that the service accepts referrals.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceProvisionCode = None
""" Conditions under which service is available/offered.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.specialty = None
""" Specialties handled by the HealthcareService.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.telecom = None
""" Contacts related to the healthcare service.
List of `ContactPoint` items (represented as `dict` in JSON). """
self.type = None
""" Type of service that may be delivered or performed.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(HealthcareService, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareService, self).elementProperties()
js.extend([
("active", "active", bool, False, None, False),
("appointmentRequired", "appointmentRequired", bool, False, None, False),
("availabilityExceptions", "availabilityExceptions", str, False, None, False),
("availableTime", "availableTime", HealthcareServiceAvailableTime, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("characteristic", "characteristic", codeableconcept.CodeableConcept, True, None, False),
("comment", "comment", str, False, None, False),
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("eligibility", "eligibility", codeableconcept.CodeableConcept, False, None, False),
("eligibilityNote", "eligibilityNote", str, False, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False),
("extraDetails", "extraDetails", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("location", "location", fhirreference.FHIRReference, True, None, False),
("name", "name", str, False, None, False),
("notAvailable", "notAvailable", HealthcareServiceNotAvailable, True, None, False),
("photo", "photo", attachment.Attachment, False, None, False),
("programName", "programName", str, True, None, False),
("providedBy", "providedBy", fhirreference.FHIRReference, False, None, False),
("referralMethod", "referralMethod", codeableconcept.CodeableConcept, True, None, False),
("serviceProvisionCode", "serviceProvisionCode", codeableconcept.CodeableConcept, True, None, False),
("specialty", "specialty", codeableconcept.CodeableConcept, True, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
])
return js
from . import backboneelement
class HealthcareServiceAvailableTime(backboneelement.BackboneElement):
""" Times the Service Site is available.
A collection of times that the Service Site is available.
"""
resource_type = "HealthcareServiceAvailableTime"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allDay = None
""" Always available? e.g. 24 hour service.
Type `bool`. """
self.availableEndTime = None
""" Closing time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.availableStartTime = None
""" Opening time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.daysOfWeek = None
""" mon | tue | wed | thu | fri | sat | sun.
List of `str` items. """
super(HealthcareServiceAvailableTime, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceAvailableTime, self).elementProperties()
js.extend([
("allDay", "allDay", bool, False, None, False),
("availableEndTime", "availableEndTime", fhirdate.FHIRDate, False, None, False),
("availableStartTime", "availableStartTime", fhirdate.FHIRDate, False, None, False),
("daysOfWeek", "daysOfWeek", str, True, None, False),
])
return js
class HealthcareServiceNotAvailable(backboneelement.BackboneElement):
""" Not available during this time due to provided reason.
The HealthcareService is not available during this period of time due to
the provided reason.
"""
resource_type = "HealthcareServiceNotAvailable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason presented to the user explaining why time not available.
Type `str`. """
self.during = None
""" Service not availablefrom this date.
Type `Period` (represented as `dict` in JSON). """
super(HealthcareServiceNotAvailable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceNotAvailable, self).elementProperties()
js.extend([
("description", "description", str, False, None, True),
("during", "during", period.Period, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | -2,152,641,763,683,152,100 | 40.47191 | 113 | 0.624311 | false | 4.477558 | false | false | false |
twilio/twilio-python | twilio/rest/verify/v2/__init__.py | 1 | 1682 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.verify.v2.form import FormList
from twilio.rest.verify.v2.service import ServiceList
from twilio.rest.verify.v2.verification_attempt import VerificationAttemptList
class V2(Version):
def __init__(self, domain):
"""
Initialize the V2 version of Verify
:returns: V2 version of Verify
:rtype: twilio.rest.verify.v2.V2.V2
"""
super(V2, self).__init__(domain)
self.version = 'v2'
self._forms = None
self._services = None
self._verification_attempts = None
@property
def forms(self):
"""
:rtype: twilio.rest.verify.v2.form.FormList
"""
if self._forms is None:
self._forms = FormList(self)
return self._forms
@property
def services(self):
"""
:rtype: twilio.rest.verify.v2.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services
@property
def verification_attempts(self):
"""
:rtype: twilio.rest.verify.v2.verification_attempt.VerificationAttemptList
"""
if self._verification_attempts is None:
self._verification_attempts = VerificationAttemptList(self)
return self._verification_attempts
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2>'
| mit | 5,366,185,485,770,542,000 | 25.28125 | 82 | 0.585612 | false | 3.902552 | false | false | false |
acutesoftware/faceswap | faceswap.py | 18 | 7474 | #!/usr/bin/python
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This is the code behind the Switching Eds blog post:
http://matthewearl.github.io/2015/07/28/switching-eds-with-python/
See the above for an explanation of the code below.
To run the script you'll need to install dlib (http://dlib.net) including its
Python bindings, and OpenCV. You'll also need to obtain the trained model from
sourceforge:
http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
Unzip with `bunzip2` and change `PREDICTOR_PATH` to refer to this file. The
script is run like so:
./faceswap.py <head image> <face image>
If successful, a file `output.jpg` will be produced with the facial features
from `<head image>` replaced with the facial features from `<face image>`.
"""
import cv2
import dlib
import numpy
import sys
PREDICTOR_PATH = "/home/matt/dlib-18.16/shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def read_im_and_landmarks(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += 128 * (im2_blur <= 1.0)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
im1, landmarks1 = read_im_and_landmarks(sys.argv[1])
im2, landmarks2 = read_im_and_landmarks(sys.argv[2])
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
cv2.imwrite('output.jpg', output_im)
| mit | -4,521,035,938,372,268,500 | 33.127854 | 101 | 0.647444 | false | 3.140336 | false | false | false |
dan-passaro/django-recommend | src/django_recommend/models.py | 1 | 11423 | # coding: utf-8
"""Models for item-to-item collaborative filtering."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core import exceptions
from django.db import models
from django.db.models import signals as model_signals
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
import django_recommend
from . import conf
NO_RELATED_NAME = '+' # Try to clarify obscure Django syntax.
def respect_purge_setting(*args):
"""Raise or delete related objects based on settings.
This is a when_missing handler for
ObjectSimilarityQueryset.get_instances_for.
"""
if conf.settings.RECOMMEND_PURGE_MISSING_DATA:
django_recommend.forget_object(*args)
else:
raise
class ObjectSimilarityQueryset(models.QuerySet):
"""The custom manager used for the ObjectSimilarity class."""
def get_instances_for(self, obj, when_missing=respect_purge_setting):
"""Get the instances in this queryset that are not `obj`.
Returns a list.
when_missing:
a callback function to execute when an instance that should be
suggested is not present in the database (i.e. get() raises
ObjectDoesNotExist). This function will be called with two
parameters: the content type id, and the object id.
The default callback propagates the underlying ObjectDoesNotExist
exception.
If this method does not raise an exception, the triggering object
is simply ignored and not included in the result list. For this
reason it's possible for a queryset of 5 objects to only return,
say, 4 instances, if one of the objects referred to in an
ObjectSimilarity is in fact no longer present in the database.
"""
ctype = ContentType.objects.get_for_model(obj)
def get_object_from_ctype(contenttype, target_id):
"""The builtin method of doing this breaks with multiple DBs."""
return contenttype.model_class().objects.get(pk=target_id)
def get_object_params(sim_obj, num):
"""Get the content_type and PK of an object from sim_obj."""
prefix = 'object_{}_'.format(num)
target_id = getattr(sim_obj, prefix + 'id')
target_ctype = getattr(sim_obj, prefix + 'content_type')
return target_ctype, target_id
def get_other_object_params(sim_obj):
"""Get the content type and pk of the other object in sim_obj."""
same_id_as_1 = sim_obj.object_1_id == obj.pk
same_ctype_as_1 = sim_obj.object_1_content_type == ctype
if same_id_as_1 and same_ctype_as_1:
return get_object_params(sim_obj, 2)
return get_object_params(sim_obj, 1)
instances = []
for sim in self:
other_ctype, other_pk = get_other_object_params(sim)
try:
inst = get_object_from_ctype(other_ctype, other_pk)
except exceptions.ObjectDoesNotExist:
when_missing(other_ctype.pk, other_pk)
else:
instances.append(inst)
return instances
def __build_query(self, qset):
"""Get a lookup to match qset objects as either object_1 or object_2.
qset is any Django queryset.
"""
model = qset.model
ctype = ContentType.objects.get_for_model(model)
# Prevent cross-db joins
if qset.db != self.db:
ids = qset.values_list('id', flat=True)
# Forces the DB query to happen early
qset = list(ids)
lookup = ((Q(object_1_content_type=ctype) & Q(object_1_id__in=qset)) |
(Q(object_2_content_type=ctype) & Q(object_2_id__in=qset)))
return lookup
def exclude_objects(self, qset):
"""Exclude all similarities that include the given objects.
qset is a queryset of model instances to exclude. These should be the
types of objects stored in ObjectSimilarity/UserScore, **not**
ObjectSimilarity/UserScore themselves.
"""
return self.exclude(self.__build_query(qset))
def filter_objects(self, qset):
"""Find all similarities that include the given objects.
qset is a queryset of model instances to include. These should be the
types of objects stored in ObjectSimilarity/UserScore, **not**
ObjectSimilarity/UserScore themselves.
"""
return self.filter(self.__build_query(qset))
@python_2_unicode_compatible
class ObjectSimilarity(models.Model): # pylint: disable=model-missing-unicode
"""Similarity between two Django objects."""
object_1_id = models.IntegerField()
object_1_content_type = models.ForeignKey(ContentType,
related_name=NO_RELATED_NAME)
object_1 = GenericForeignKey('object_1_content_type', 'object_1_id')
object_2_id = models.IntegerField()
object_2_content_type = models.ForeignKey(ContentType,
related_name=NO_RELATED_NAME)
object_2 = GenericForeignKey('object_2_content_type', 'object_2_id')
# The actual similarity rating
score = models.FloatField()
objects = ObjectSimilarityQueryset.as_manager()
class Meta:
index_together = (
('object_1_id', 'object_1_content_type'),
('object_2_id', 'object_2_content_type'),
)
ordering = ['-score']
unique_together = (
'object_1_id', 'object_1_content_type', 'object_2_id',
'object_2_content_type',
)
def clean(self):
if (self.object_1_id == self.object_2_id and
self.object_1_content_type == self.object_2_content_type):
raise ValidationError('An object cannot be similar to itself.')
def save(self, *args, **kwargs):
self.full_clean()
super(ObjectSimilarity, self).save(*args, **kwargs)
@classmethod
def set(cls, obj_a, obj_b, score):
"""Set the similarity between obj_a and obj_b to score.
Returns the created ObjectSimilarity instance.
"""
# Always store the lower PKs as object_1, so the pair
# (object_1, object_2) has a distinct ordering, to prevent duplicate
# data.
def sort_key(obj):
"""Get a sortable tuple representing obj."""
return (ContentType.objects.get_for_model(obj).pk, obj.pk)
obj_a_key = sort_key(obj_a)
obj_b_key = sort_key(obj_b)
if obj_a_key < obj_b_key:
obj_1, obj_2 = obj_a, obj_b
else:
obj_1, obj_2 = obj_b, obj_a
inst_lookup = dict(
object_1_content_type=ContentType.objects.get_for_model(obj_1),
object_1_id=obj_1.pk,
object_2_content_type=ContentType.objects.get_for_model(obj_2),
object_2_id=obj_2.pk,
)
# Save space by not storing scores of 0.
if score == 0:
ObjectSimilarity.objects.filter(**inst_lookup).delete()
sim = None
else:
kwargs = dict(inst_lookup)
kwargs['defaults'] = {'score': score}
sim, _ = ObjectSimilarity.objects.update_or_create(**kwargs)
return sim
def __str__(self):
return '{}, {}: {}'.format(self.object_1_id, self.object_2_id,
self.score)
@python_2_unicode_compatible
class UserScore(models.Model):
"""Store a user's rating of an object.
"Rating" doesn't necessarily need to be e.g. 1-10 points or 1-5 star voting
system. It is often easy to treat e.g. object view as 1 point and object
bookmarking as 5 points, for example. This is called 'implicit feedback.'
"""
object_id = models.IntegerField()
object_content_type = models.ForeignKey(ContentType)
object = GenericForeignKey('object_content_type', 'object_id')
user = models.CharField(max_length=255, db_index=True)
score = models.FloatField()
class Meta:
index_together = ('object_id', 'object_content_type')
unique_together = ('object_id', 'object_content_type', 'user')
def save(self, *args, **kwargs):
self.full_clean()
super(UserScore, self).save(*args, **kwargs)
@classmethod
def __user_str(cls, user_or_str):
"""Coerce user_or_str params into a string."""
try:
user_id = user_or_str.pk
except AttributeError:
return user_or_str
return 'user:{}'.format(user_id)
@classmethod
def set(cls, user_or_str, obj, score):
"""Store the score for the given user and given object.
Returns the created UserScore instance.
"""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
inst_lookup = dict(
user=user, object_id=obj.pk, object_content_type=ctype)
if score:
kwargs = dict(inst_lookup)
kwargs['defaults'] = {'score': score}
inst, _ = cls.objects.update_or_create(**kwargs)
else:
inst = None
cls.objects.filter(**inst_lookup).delete()
return inst
@classmethod
def setdefault(cls, user_or_str, obj, score):
"""Store the user's score only if there's no existing score."""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
cls.objects.get_or_create(
user=user, object_id=obj.pk, object_content_type=ctype,
defaults={'score': score}
)
@classmethod
def get(cls, user_or_str, obj):
"""Get the score that user gave to obj.
Returns the actual score value, not the UserScore instance.
"Unrated" objects return 0.
"""
user = cls.__user_str(user_or_str)
ctype = ContentType.objects.get_for_model(obj)
try:
inst = cls.objects.get(user=user, object_id=obj.pk,
object_content_type=ctype)
return inst.score
except cls.DoesNotExist:
return 0
@classmethod
def scores_for(cls, obj):
"""Get all scores for the given object.
Returns a dictionary, not a queryset.
"""
ctype = ContentType.objects.get_for_model(obj)
scores = cls.objects.filter(object_content_type=ctype,
object_id=obj.pk)
return {score.user: score.score for score in scores}
def __str__(self):
return '{}, {}: {}'.format(self.user, self.object_id, self.score)
def call_handler(*args, **kwargs):
"""Proxy for the signal handler defined in tasks.
Prevents a circular import problem.
"""
from . import tasks
tasks.signal_handler(*args, **kwargs)
model_signals.post_save.connect(call_handler, UserScore,
dispatch_uid="recommend_post_save")
model_signals.post_delete.connect(call_handler, UserScore,
dispatch_uid="recommend_post_save")
| mit | -730,400,907,116,461,400 | 33.303303 | 79 | 0.60597 | false | 3.996851 | false | false | false |
romanoid/buck | python-dsl/buck_parser/buck.py | 2 | 77780 | # Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import abc
import collections
import contextlib
import functools
import imp
import inspect
import json
import optparse
import os
import os.path
import platform
import re
import sys
import time
import traceback
import types
from pathlib import Path, PurePath
from select import select as _select
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Pattern,
Set,
Tuple,
TypeVar,
Union,
)
import pywatchman
from pywatchman import WatchmanError
from six import PY3, iteritems, itervalues, string_types
# Python 2.6, 2.7, use iterator filter from Python 3
from six.moves import builtins, filter
from .deterministic_set import DeterministicSet
from .glob_internal import glob_internal
from .glob_watchman import SyncCookieState, glob_watchman
from .json_encoder import BuckJSONEncoder
from .module_whitelist import ImportWhitelistManager
from .profiler import Profiler, Tracer, emit_trace, scoped_trace, traced
from .select_support import SelectorList, SelectorValue
from .struct import create_struct_class, struct
from .util import (
Diagnostic,
cygwin_adjusted_path,
get_caller_frame,
is_in_dir,
is_special,
)
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
# Those tagged with @provide_as_native_rule will be present unless
# explicitly disabled by parser.native_rules_enabled_in_build_files
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
#
# "cell_name" - The cell name the build file is in.
BUILD_FUNCTIONS = [] # type: List[Callable]
NATIVE_FUNCTIONS = [] # type: List[Callable]
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 60.0 # type: float
# Globals that should not be copied from one module into another
_HIDDEN_GLOBALS = {"include_defs", "load"} # type: Set[str]
ORIGINAL_IMPORT = builtins.__import__
_LOAD_TARGET_PATH_RE = re.compile(
r"^(?P<root>(?P<cell>@?[\w\-.]+)?//)?(?P<package>.*):(?P<target>.*)$"
) # type: Pattern[str]
# matches anything equivalent to recursive glob on all dirs
# e.g. "**/", "*/**/", "*/*/**/"
_RECURSIVE_GLOB_PATTERN = re.compile("^(\*/)*\*\*/") # type: Pattern[str]
class AbstractContext(object):
"""Superclass of execution contexts."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def includes(self):
# type: () -> Set[str]
raise NotImplementedError()
@abc.abstractproperty
def used_configs(self):
# type: () -> Dict[str, Dict[str, str]]
raise NotImplementedError()
@abc.abstractproperty
def used_env_vars(self):
# type: () -> Dict[str, str]
raise NotImplementedError()
@abc.abstractproperty
def diagnostics(self):
# type: () -> List[Diagnostic]
raise NotImplementedError()
def merge(self, other):
# type: (AbstractContext) -> None
"""Merge the context of an included file into the current context.
:param AbstractContext other: the include context to merge.
:rtype: None
"""
self.includes.update(other.includes)
self.diagnostics.extend(other.diagnostics)
self.used_configs.update(other.used_configs)
self.used_env_vars.update(other.used_env_vars)
class BuildFileContext(AbstractContext):
"""The build context used when processing a build file."""
def __init__(
self,
project_root,
base_path,
path,
dirname,
cell_name,
allow_empty_globs,
ignore_paths,
watchman_client,
watchman_watch_root,
watchman_project_prefix,
sync_cookie_state,
watchman_glob_stat_results,
watchman_use_glob_generator,
implicit_package_symbols,
):
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
self.rules = {}
self.project_root = project_root
self.base_path = base_path
self.path = path
self.cell_name = cell_name
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
self.implicit_package_symbols = implicit_package_symbols
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
class IncludeContext(AbstractContext):
"""The build context used when processing an include."""
def __init__(self, cell_name, path):
# type: (str, str) -> None
"""
:param cell_name: a cell name of the current context. Note that this cell name can be
different from the one BUCK file is evaluated in, since it can load extension files
from other cells, which should resolve their loads relative to their own location.
"""
self.cell_name = cell_name
self.path = path
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
# Generic context type that should be used in places where return and parameter
# types are the same but could be either of the concrete contexts.
_GCT = TypeVar("_GCT", IncludeContext, BuildFileContext)
LoadStatement = Dict[str, Union[str, Dict[str, str]]]
BuildInclude = collections.namedtuple("BuildInclude", ["cell_name", "path"])
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
# type: (Callable) -> None
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({"build_env": self.build_env})
try:
return self.func(*args, **updated_kwargs)
except TypeError:
missing_args, extra_args = get_mismatched_args(
self.func, args, updated_kwargs
)
if missing_args or extra_args:
name = "[missing]"
if "name" in updated_kwargs:
name = updated_kwargs["name"]
elif len(args) > 0:
# Optimistically hope that name is the first arg. It generally is...
name = args[0]
raise IncorrectArgumentsException(
self.func.func_name, name, missing_args, extra_args
)
raise
HostInfoOs = collections.namedtuple(
"HostInfoOs", ["is_linux", "is_macos", "is_windows", "is_freebsd", "is_unknown"]
)
HostInfoArch = collections.namedtuple(
"HostInfoArch",
[
"is_aarch64",
"is_arm",
"is_armeb",
"is_i386",
"is_mips",
"is_mips64",
"is_mipsel",
"is_mipsel64",
"is_powerpc",
"is_ppc64",
"is_unknown",
"is_x86_64",
],
)
HostInfo = collections.namedtuple("HostInfo", ["os", "arch"])
__supported_oses = {
"darwin": "macos",
"windows": "windows",
"linux": "linux",
"freebsd": "freebsd",
} # type: Dict[str, str]
# Pulled from com.facebook.buck.util.environment.Architecture.java as
# possible values. amd64 and arm64 are remapped, but they may not
# actually be present on most systems
__supported_archs = {
"aarch64": "aarch64",
"arm": "arm",
"armeb": "armeb",
"i386": "i386",
"mips": "mips",
"mips64": "mips64",
"mipsel": "mipsel",
"mipsel64": "mipsel64",
"powerpc": "powerpc",
"ppc64": "ppc64",
"unknown": "unknown",
"x86_64": "x86_64",
"amd64": "x86_64",
"arm64": "aarch64",
} # type: Dict[str, str]
def host_info(platform_system=platform.system, platform_machine=platform.machine):
host_arch = __supported_archs.get(platform_machine().lower(), "unknown")
host_os = __supported_oses.get(platform_system().lower(), "unknown")
return HostInfo(
os=HostInfoOs(
is_linux=(host_os == "linux"),
is_macos=(host_os == "macos"),
is_windows=(host_os == "windows"),
is_freebsd=(host_os == "freebsd"),
is_unknown=(host_os == "unknown"),
),
arch=HostInfoArch(
is_aarch64=(host_arch == "aarch64"),
is_arm=(host_arch == "arm"),
is_armeb=(host_arch == "armeb"),
is_i386=(host_arch == "i386"),
is_mips=(host_arch == "mips"),
is_mips64=(host_arch == "mips64"),
is_mipsel=(host_arch == "mipsel"),
is_mipsel64=(host_arch == "mipsel64"),
is_powerpc=(host_arch == "powerpc"),
is_ppc64=(host_arch == "ppc64"),
is_unknown=(host_arch == "unknown"),
is_x86_64=(host_arch == "x86_64"),
),
)
_cached_host_info = host_info()
def get_mismatched_args(func, actual_args, actual_kwargs):
argspec = inspect.getargspec(func)
required_args = set()
all_acceptable_args = []
for i, arg in enumerate(argspec.args):
if i < (len(argspec.args) - len(argspec.defaults)):
required_args.add(arg)
all_acceptable_args.append(arg)
extra_kwargs = set(actual_kwargs) - set(all_acceptable_args)
for k in set(actual_kwargs) - extra_kwargs:
all_acceptable_args.remove(k)
not_supplied_args = all_acceptable_args[len(actual_args) :]
missing_args = [arg for arg in not_supplied_args if arg in required_args]
return missing_args, sorted(list(extra_kwargs))
class IncorrectArgumentsException(TypeError):
def __init__(self, func_name, name_arg, missing_args, extra_args):
self.missing_args = missing_args
self.extra_args = extra_args
message = "Incorrect arguments to %s with name %s:" % (func_name, name_arg)
if missing_args:
message += " Missing required args: %s" % (", ".join(missing_args),)
if extra_args:
message += " Extra unknown kwargs: %s" % (", ".join(extra_args),)
super(IncorrectArgumentsException, self).__init__(message)
class BuildFileFailError(Exception):
pass
def provide_as_native_rule(func):
# type: (Callable) -> Callable
NATIVE_FUNCTIONS.append(func)
return func
def provide_for_build(func):
# type: (Callable) -> Callable
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
# type: (Dict, BuildFileContext) -> None
"""Record a rule in the current context.
This should be invoked by rule functions generated by the Java code.
:param dict rule: dictionary of the rule's fields.
:param build_env: the current context.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `{}()` at the top-level of an included file.".format(
rule["buck.type"]
)
# Include the base path of the BUCK file so the reader consuming this
# output will know which BUCK file the rule came from.
if "name" not in rule:
raise ValueError("rules must contain the field 'name'. Found %s." % rule)
rule_name = rule["name"]
if not isinstance(rule_name, string_types):
raise ValueError("rules 'name' field must be a string. Found %s." % rule_name)
if rule_name in build_env.rules:
raise ValueError(
"Duplicate rule definition '%s' found. Found %s and %s"
% (rule_name, rule, build_env.rules[rule_name])
)
rule["buck.base_path"] = build_env.base_path
build_env.rules[rule_name] = rule
@traced(stats_key="Glob")
def glob(
includes, excludes=None, include_dotfiles=False, build_env=None, search_base=None
):
# type: (List[str], Optional[List[str]], bool, BuildFileContext, str) -> List[str]
if excludes is None:
excludes = []
assert isinstance(
build_env, BuildFileContext
), "Cannot use `glob()` at the top-level of an included file."
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(
includes, string_types
), "The first argument to glob() must be a list of strings."
assert not isinstance(
excludes, string_types
), "The excludes argument must be a list of strings."
if search_base is None:
search_base = Path(build_env.dirname)
if build_env.dirname == build_env.project_root and any(
_RECURSIVE_GLOB_PATTERN.match(pattern) for pattern in includes
):
fail(
"Recursive globs are prohibited at top-level directory", build_env=build_env
)
results = None
if not includes:
results = []
elif build_env.watchman_client:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client,
build_env.diagnostics,
build_env.watchman_glob_stat_results,
build_env.watchman_use_glob_generator,
)
if results:
# glob should consistently return paths of type str, but
# watchman client returns unicode in Python 2 instead.
# Extra check is added to make this conversion resilient to
# watchman API changes.
results = [
res.encode("utf-8") if not isinstance(res, str) else res
for res in results
]
if results is None:
results = glob_internal(
includes,
excludes,
build_env.ignore_paths,
include_dotfiles,
search_base,
build_env.project_root,
)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) "
+ "returned no results. (allow_empty_globs is set to false in the Buck "
+ "configuration)"
).format(includes=includes, excludes=excludes, include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, (
"Conflicting header files in header search paths. "
+ '"%s" maps to both "%s" and "%s".'
% (key, result[key], header_map[key])
)
result[key] = header_map[key]
return result
def single_subdir_glob(
dirpath, glob_pattern, excludes=None, prefix=None, build_env=None, search_base=None
):
if excludes is None:
excludes = []
results = {}
files = glob(
[os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base,
)
for f in files:
if dirpath:
key = f[len(dirpath) + 1 :]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# This method is called by tests for both posix style paths and
# windows style paths.
# When running tests, search_base is always set
# and happens to have the correct platform-specific Path type.
cls = PurePath if not search_base else type(search_base)
key = str(cls(prefix) / cls(key))
results[key] = f
return results
def subdir_glob(
glob_specs, excludes=None, prefix=None, build_env=None, search_base=None
):
"""
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
return a dict of sub-directory relative paths to full paths. Useful for
defining header maps for C/C++ libraries which should be relative the given
sub-directory.
If prefix is not None, prepends it it to each key in the dictionary.
"""
if excludes is None:
excludes = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
single_subdir_glob(
dirpath, glob_pattern, excludes, prefix, build_env, search_base
)
)
return merge_maps(*results)
def _get_package_name(func_name, build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
assert isinstance(build_env, BuildFileContext), (
"Cannot use `%s()` at the top-level of an included file." % func_name
)
return build_env.base_path
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("get_base_path", build_env=build_env)
@provide_for_build
def package_name(build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("package_name", build_env=build_env)
@provide_for_build
def fail(message, attr=None, build_env=None):
"""Raises a parse error.
:param message: Error message to display for the user.
The object is converted to a string.
:param attr: Optional name of the attribute that caused the error.
"""
attribute_prefix = "attribute " + attr + ": " if attr is not None else ""
msg = attribute_prefix + str(message)
raise BuildFileFailError(msg)
@provide_for_build
def get_cell_name(build_env=None):
"""Get the cell name of the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "cell". The return value will be "" if
the build file does not have a cell
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `get_cell_name()` at the top-level of an included file."
return build_env.cell_name
@provide_for_build
def select(conditions, no_match_message=None, build_env=None):
"""Allows to provide a configurable value for an attribute"""
return SelectorList([SelectorValue(conditions, no_match_message)])
@provide_as_native_rule
def repository_name(build_env=None):
"""
Get the repository (cell) name of the build file that was initially
evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "@cell". The return value will be "@" if
the build file is in the main (standalone) repository.
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `repository_name()` at the top-level of an included file."
return "@" + build_env.cell_name
@provide_as_native_rule
def rule_exists(name, build_env=None):
"""
:param name: name of the build rule
:param build_env: current build environment
:return: True if a rule with provided name has already been defined in
current file.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `rule_exists()` at the top-level of an included file."
return name in build_env.rules
def flatten_list_of_dicts(list_of_dicts):
"""Flatten the given list of dictionaries by merging l[1:] onto
l[0], one at a time. Key/Value pairs which appear in later list entries
will override those that appear in earlier entries
:param list_of_dicts: the list of dict objects to flatten.
:return: a single dict containing the flattened list
"""
return_value = {}
for d in list_of_dicts:
for k, v in iteritems(d):
return_value[k] = v
return return_value
@provide_for_build
def flatten_dicts(*args, **_):
"""Flatten the given list of dictionaries by merging args[1:] onto
args[0], one at a time.
:param *args: the list of dict objects to flatten.
:param **_: ignore the build_env kwarg
:return: a single dict containing the flattened list
"""
return flatten_list_of_dicts(args)
@provide_for_build
def depset(elements, build_env=None):
"""Creates an instance of sets with deterministic iteration order.
:param elements: the list of elements constituting the returned depset.
:rtype: DeterministicSet
"""
return DeterministicSet(elements)
GENDEPS_SIGNATURE = re.compile(
r"^#@# GENERATED FILE: DO NOT MODIFY ([a-f0-9]{40}) #@#\n$"
)
class BuildFileProcessor(object):
"""Handles the processing of a single build file.
:type _current_build_env: AbstractContext | None
"""
SAFE_MODULES_CONFIG = {
"os": ["environ", "getenv", "path", "sep", "pathsep", "linesep"],
"os.path": [
"basename",
"commonprefix",
"dirname",
"isabs",
"join",
"normcase",
"relpath",
"split",
"splitdrive",
"splitext",
"sep",
"pathsep",
],
"pipes": ["quote"],
}
def __init__(
self,
project_root,
cell_roots,
cell_name,
build_file_name,
allow_empty_globs,
watchman_client,
watchman_glob_stat_results,
watchman_use_glob_generator,
project_import_whitelist=None,
implicit_includes=None,
extra_funcs=None,
configs=None,
env_vars=None,
ignore_paths=None,
disable_implicit_native_rules=False,
warn_about_deprecated_syntax=True,
):
if project_import_whitelist is None:
project_import_whitelist = []
if implicit_includes is None:
implicit_includes = []
if extra_funcs is None:
extra_funcs = []
if configs is None:
configs = {}
if env_vars is None:
env_vars = {}
if ignore_paths is None:
ignore_paths = []
self._include_cache = {}
self._current_build_env = None
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._cell_roots = cell_roots
self._cell_name = cell_name
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_glob_stat_results = watchman_glob_stat_results
self._watchman_use_glob_generator = watchman_use_glob_generator
self._configs = configs
self._env_vars = env_vars
self._ignore_paths = ignore_paths
self._disable_implicit_native_rules = disable_implicit_native_rules
self._warn_about_deprecated_syntax = warn_about_deprecated_syntax
lazy_global_functions = {}
lazy_native_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_global_functions[func.__name__] = func_with_env
for func in NATIVE_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_native_functions[func.__name__] = func_with_env
self._global_functions = lazy_global_functions
self._native_functions = lazy_native_functions
self._native_module_class_for_extension = self._create_native_module_class(
self._global_functions, self._native_functions
)
self._native_module_class_for_build_file = self._create_native_module_class(
self._global_functions,
[] if self._disable_implicit_native_rules else self._native_functions,
)
self._import_whitelist_manager = ImportWhitelistManager(
import_whitelist=self._create_import_whitelist(project_import_whitelist),
safe_modules_config=self.SAFE_MODULES_CONFIG,
path_predicate=lambda path: is_in_dir(path, self._project_root),
)
# Set of helpers callable from the child environment.
self._default_globals_for_extension = self._create_default_globals(False, False)
self._default_globals_for_implicit_include = self._create_default_globals(
False, True
)
self._default_globals_for_build_file = self._create_default_globals(True, False)
def _create_default_globals(self, is_build_file, is_implicit_include):
# type: (bool) -> Dict[str, Callable]
return {
"include_defs": functools.partial(self._include_defs, is_implicit_include),
"add_build_file_dep": self._add_build_file_dep,
"read_config": self._read_config,
"implicit_package_symbol": self._implicit_package_symbol,
"allow_unsafe_import": self._import_whitelist_manager.allow_unsafe_import,
"glob": self._glob,
"subdir_glob": self._subdir_glob,
"load": functools.partial(self._load, is_implicit_include),
"struct": struct,
"provider": self._provider,
"host_info": self._host_info,
"native": self._create_native_module(is_build_file=is_build_file),
}
def _create_native_module(self, is_build_file):
"""
Creates a native module exposing built-in Buck rules.
This module allows clients to refer to built-in Buck rules using
"native.<native_rule>" syntax in their build files. For example,
"native.java_library(...)" will use a native Java library rule.
:return: 'native' module struct.
"""
native_globals = {}
self._install_builtins(native_globals, force_native_rules=not is_build_file)
assert "glob" not in native_globals
assert "host_info" not in native_globals
assert "implicit_package_symbol" not in native_globals
assert "read_config" not in native_globals
native_globals["glob"] = self._glob
native_globals["host_info"] = self._host_info
native_globals["implicit_package_symbol"] = self._implicit_package_symbol
native_globals["read_config"] = self._read_config
return (
self._native_module_class_for_build_file(**native_globals)
if is_build_file
else self._native_module_class_for_extension(**native_globals)
)
@staticmethod
def _create_native_module_class(global_functions, native_functions):
"""
Creates a native module class.
:return: namedtuple instance for native module
"""
return collections.namedtuple(
"native",
list(global_functions)
+ list(native_functions)
+ ["glob", "host_info", "read_config", "implicit_package_symbol"],
)
def _wrap_env_var_read(self, read, real):
"""
Return wrapper around function that reads an environment variable so
that the read is recorded.
"""
@functools.wraps(real)
def wrapper(varname, *arg, **kwargs):
self._record_env_var(varname, read(varname))
return real(varname, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _with_env_interceptor(self, read, obj, *attrs):
"""
Wrap a function, found at `obj.attr`, that reads an environment
variable in a new function which records the env var read.
"""
orig = []
for attr in attrs:
real = getattr(obj, attr)
wrapped = self._wrap_env_var_read(read, real)
setattr(obj, attr, wrapped)
orig.append((attr, real))
try:
yield
finally:
for attr, real in orig:
setattr(obj, attr, real)
@contextlib.contextmanager
def with_env_interceptors(self):
"""
Install environment variable read interceptors into all known ways that
a build file can access the environment.
"""
# Use a copy of the env to provide a function to get at the low-level
# environment. The wrappers will use this when recording the env var.
read = dict(os.environ).get
# Install interceptors into the main ways a user can read the env.
with self._with_env_interceptor(
read, os.environ, "__contains__", "__getitem__", "get"
):
yield
@staticmethod
def _merge_explicit_globals(src, dst, whitelist=None, whitelist_mapping=None):
# type: (types.ModuleType, Dict[str, Any], Tuple[str], Dict[str, str]) -> None
"""Copy explicitly requested global definitions from one globals dict to another.
If whitelist is set, only globals from the whitelist will be pulled in.
If whitelist_mapping is set, globals will be exported under the name of the keyword. For
example, foo="bar" would mean that a variable with name "bar" in imported file, will be
available as "foo" in current file.
"""
if whitelist is not None:
for symbol in whitelist:
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[symbol] = src.__dict__[symbol]
if whitelist_mapping is not None:
for exported_name, symbol in iteritems(whitelist_mapping):
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[exported_name] = src.__dict__[symbol]
def _merge_globals(self, mod, dst):
# type: (types.ModuleType, Dict[str, Any]) -> None
"""Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
keys = getattr(mod, "__all__", mod.__dict__.keys())
for key in keys:
# Block copying modules unless they were specified in '__all__'
block_copying_module = not hasattr(mod, "__all__") and isinstance(
mod.__dict__[key], types.ModuleType
)
if (
not key.startswith("_")
and key not in _HIDDEN_GLOBALS
and not block_copying_module
):
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in itervalues(self._global_functions):
function.build_env = build_env
for function in itervalues(self._native_functions):
function.build_env = build_env
def _install_builtins(self, namespace, force_native_rules=False):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in iteritems(self._global_functions):
namespace[name] = function.invoke
if not self._disable_implicit_native_rules or force_native_rules:
for name, function in iteritems(self._native_functions):
namespace[name] = function.invoke
@contextlib.contextmanager
def with_builtins(self, namespace):
"""
Installs the build functions for the duration of a `with` block.
"""
original_namespace = namespace.copy()
self._install_builtins(namespace)
try:
yield
finally:
namespace.clear()
namespace.update(original_namespace)
def _resolve_include(self, name):
# type: (str) -> BuildInclude
"""Resolve the given include def name to a BuildInclude metadata."""
match = re.match(r"^([A-Za-z0-9_]*)//(.*)$", name)
if match is None:
raise ValueError(
"include_defs argument {} should be in the form of "
"//path or cellname//path".format(name)
)
cell_name = match.group(1)
relative_path = match.group(2)
if len(cell_name) > 0:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"include_defs argument {} references an unknown cell named {} "
"known cells: {!r}".format(name, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(cell_root, relative_path)),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(self._project_root, relative_path)),
)
def _get_load_path(self, label):
# type: (str) -> BuildInclude
"""Resolve the given load function label to a BuildInclude metadata."""
match = _LOAD_TARGET_PATH_RE.match(label)
if match is None:
raise ValueError(
"load label {} should be in the form of "
"//path:file or cellname//path:file".format(label)
)
cell_name = match.group("cell")
if cell_name:
if cell_name.startswith("@"):
cell_name = cell_name[1:]
elif self._warn_about_deprecated_syntax:
self._emit_warning(
'{} has a load label "{}" that uses a deprecated cell format. '
'"{}" should instead be "@{}".'.format(
self._current_build_env.path, label, cell_name, cell_name
),
"load function",
)
else:
cell_name = self._current_build_env.cell_name
relative_path = match.group("package")
file_name = match.group("target")
label_root = match.group("root")
if not label_root:
# relative include. e.g. :foo.bzl
if "/" in file_name:
raise ValueError(
"Relative loads work only for files in the same directory. "
+ "Please use absolute label instead ([cell]//pkg[/pkg]:target)."
)
callee_dir = os.path.dirname(self._current_build_env.path)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(callee_dir, file_name)),
)
elif cell_name:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"load label {} references an unknown cell named {} "
"known cells: {!r}".format(label, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(cell_root, relative_path, file_name)
),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(self._project_root, relative_path, file_name)
),
)
def _read_config(self, section, field, default=None):
# type: (str, str, Any) -> Any
"""
Lookup a setting from `.buckconfig`.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
key = section, field
value = self._configs.get(key)
if value is not None and not isinstance(value, str):
# Python 2 returns unicode values from parsed JSON configs, but
# only str types should be exposed to clients
value = value.encode("utf-8")
# replace raw values to avoid decoding for frequently used configs
self._configs[key] = value
build_env.used_configs[section][field] = value
# If no config setting was found, return the default.
if value is None:
return default
return value
def _implicit_package_symbol(self, symbol, default=None):
# type: (str, Any) -> Any
"""
Gives access to a symbol that has been implicitly loaded for the package of the
build file that is currently being evaluated. If the symbol was not present,
`default` will be returned.
"""
build_env = self._current_build_env
return build_env.implicit_package_symbols.get(symbol, default)
def _glob(
self,
includes,
excludes=None,
include_dotfiles=False,
search_base=None,
exclude=None,
):
assert exclude is None or excludes is None, (
"Mixing 'exclude' and 'excludes' attributes is not allowed. Please replace your "
"exclude and excludes arguments with a single 'excludes = %r'."
% (exclude + excludes)
)
excludes = excludes or exclude
build_env = self._current_build_env # type: BuildFileContext
return glob(
includes,
excludes=excludes,
include_dotfiles=include_dotfiles,
search_base=search_base,
build_env=build_env,
)
def _subdir_glob(self, glob_specs, excludes=None, prefix=None, search_base=None):
build_env = self._current_build_env
return subdir_glob(
glob_specs,
excludes=excludes,
prefix=prefix,
search_base=search_base,
build_env=build_env,
)
def _record_env_var(self, name, value):
# type: (str, Any) -> None
"""
Record a read of an environment variable.
This method is meant to wrap methods in `os.environ` when called from
any files or includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
build_env.used_env_vars[name] = value
def _called_from_project_file(self):
# type: () -> bool
"""
Returns true if the function was called from a project file.
"""
frame = get_caller_frame(skip=[__name__])
filename = inspect.getframeinfo(frame).filename
return is_in_dir(filename, self._project_root)
def _include_defs(self, is_implicit_include, name, namespace=None):
# type: (bool, str, Optional[str]) -> None
"""Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._resolve_include(name)
inner_env, mod = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
if namespace is not None:
# If using a fresh namespace, create a fresh module to populate.
fresh_module = imp.new_module(namespace)
fresh_module.__file__ = mod.__file__
self._merge_globals(mod, fresh_module.__dict__)
frame.f_globals[namespace] = fresh_module
else:
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load(self, is_implicit_include, name, *symbols, **symbol_kwargs):
# type: (bool, str, *str, **str) -> None
"""Pull the symbols from the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
assert symbols or symbol_kwargs, "expected at least one symbol to load"
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(name)
inner_env, module = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
BuildFileProcessor._merge_explicit_globals(
module, frame.f_globals, symbols, symbol_kwargs
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load_package_implicit(self, build_env, package_implicit_load):
"""
Updates `build_env` to contain all symbols from `package_implicit_load`
Args:
build_env: The build environment on which to modify includes /
implicit_package_symbols properties
package_implicit_load: A dictionary with "load_path", the first part of the
a `load` statement, and "load_symbols", a dictionary
that works like the **symbols attribute of `load`
"""
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(package_implicit_load["load_path"])
inner_env, module = self._process_include(build_include, True)
# Validate that symbols that are requested explicitly by config are present
# in the .bzl file
for key, value in iteritems(package_implicit_load["load_symbols"]):
try:
build_env.implicit_package_symbols[key] = getattr(module, value)
except AttributeError:
raise BuildFileFailError(
"Could not find symbol '{}' in implicitly loaded extension '{}'".format(
value, package_implicit_load["load_path"]
)
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
@staticmethod
def _provider(doc="", fields=None):
# type: (str, Union[List[str], Dict[str, str]]) -> Callable
"""Creates a declared provider factory.
The return value of this function can be used to create "struct-like"
values. Example:
SomeInfo = provider()
def foo():
return 3
info = SomeInfo(x = 2, foo = foo)
print(info.x + info.foo()) # prints 5
Optional fields can be used to restrict the set of allowed fields.
Example:
SomeInfo = provider(fields=["data"])
info = SomeInfo(data="data") # valid
info = SomeInfo(foo="bar") # runtime exception
"""
if fields:
return create_struct_class(fields)
return struct
def _add_build_file_dep(self, name):
# type: (str) -> None
"""
Explicitly specify a dependency on an external file.
For instance, this can be used to specify a dependency on an external
executable that will be invoked, or some other external configuration
file.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
cell_name, path = self._resolve_include(name)
build_env.includes.add(path)
@staticmethod
def _host_info():
return _cached_host_info
@contextlib.contextmanager
def _set_build_env(self, build_env):
# type: (AbstractContext) -> Iterator[None]
"""Set the given build context as the current context, unsetting it upon exit."""
old_env = self._current_build_env
self._current_build_env = build_env
self._update_functions(self._current_build_env)
try:
yield
finally:
self._current_build_env = old_env
self._update_functions(self._current_build_env)
def _emit_warning(self, message, source):
# type: (str, str) -> None
"""
Add a warning to the current build_env's diagnostics.
"""
if self._current_build_env is not None:
self._current_build_env.diagnostics.append(
Diagnostic(
message=message, level="warning", source=source, exception=None
)
)
@staticmethod
def _create_import_whitelist(project_import_whitelist):
# type: (List[str]) -> Set[str]
"""
Creates import whitelist by joining the global whitelist with the project specific one
defined in '.buckconfig'.
"""
global_whitelist = [
"copy",
"re",
"functools",
"itertools",
"json",
"hashlib",
"types",
"string",
"ast",
"__future__",
"collections",
"operator",
"fnmatch",
"copy_reg",
]
return set(global_whitelist + project_import_whitelist)
def _file_access_wrapper(self, real):
"""
Return wrapper around function so that accessing a file produces warning if it is
not a known dependency.
"""
@functools.wraps(real)
def wrapper(filename, *arg, **kwargs):
# Restore original 'open' because it is used by 'inspect.currentframe()' in
# '_called_from_project_file()'
with self._wrap_file_access(wrap=False):
if self._called_from_project_file():
path = os.path.abspath(filename)
if path not in self._current_build_env.includes:
dep_path = "//" + os.path.relpath(path, self._project_root)
warning_message = (
"Access to a non-tracked file detected! {0} is not a ".format(
path
)
+ "known dependency and it should be added using 'add_build_file_dep' "
+ "function before trying to access the file, e.g.\n"
+ "'add_build_file_dep('{0}')'\n".format(dep_path)
+ "The 'add_build_file_dep' function is documented at "
+ "https://buckbuild.com/function/add_build_file_dep.html\n"
)
self._emit_warning(warning_message, "sandboxing")
return real(filename, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _wrap_fun_for_file_access(self, obj, attr, wrap=True):
"""
Wrap a function to check if accessed files are known dependencies.
"""
real = getattr(obj, attr)
if wrap:
# Don't wrap again
if not hasattr(real, "_real"):
wrapped = self._file_access_wrapper(real)
setattr(obj, attr, wrapped)
elif hasattr(real, "_real"):
# Restore real function if it was wrapped
setattr(obj, attr, real._real)
try:
yield
finally:
setattr(obj, attr, real)
def _wrap_file_access(self, wrap=True):
"""
Wrap 'open' so that they it checks if accessed files are known dependencies.
If 'wrap' is equal to False, restore original function instead.
"""
return self._wrap_fun_for_file_access(builtins, "open", wrap)
@contextlib.contextmanager
def _build_file_sandboxing(self):
"""
Creates a context that sandboxes build file processing.
"""
with self._wrap_file_access():
with self._import_whitelist_manager.allow_unsafe_import(False):
yield
@traced(stats_key="Process")
def _process(self, build_env, path, is_implicit_include, package_implicit_load):
# type: (_GCT, str, bool, Optional[LoadStatement]) -> Tuple[_GCT, types.ModuleType]
"""Process a build file or include at the given path.
:param build_env: context of the file to process.
:param path: target-like path to the file to process.
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
:package_implicit_load: if provided, a dictionary containing the path to
load for this given package, and the symbols to load
from that .bzl file.
:returns: build context (potentially different if retrieved from cache) and loaded module.
"""
if isinstance(build_env, IncludeContext):
default_globals = (
self._default_globals_for_implicit_include
if is_implicit_include
else self._default_globals_for_extension
)
else:
default_globals = self._default_globals_for_build_file
emit_trace(path)
# Install the build context for this input as the current context.
with self._set_build_env(build_env):
# Don't include implicit includes if the current file being
# processed is an implicit include
if not is_implicit_include:
for include in self._implicit_includes:
build_include = self._resolve_include(include)
inner_env, mod = self._process_include(build_include, True)
self._merge_globals(mod, default_globals)
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
if package_implicit_load:
self._load_package_implicit(build_env, package_implicit_load)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
# We don't open this file as binary, as we assume it's a textual source
# file.
with scoped_trace("IO", stats_key="IO"):
with self._wrap_file_access(wrap=False):
with open(path, "r") as f:
contents = f.read()
with scoped_trace("Compile", stats_key="Compile"):
# Enable absolute imports. This prevents the compiler from
# trying to do a relative import first, and warning that
# this module doesn't exist in sys.modules.
future_features = absolute_import.compiler_flag
code = compile(contents, path, "exec", future_features, 1)
# Execute code with build file sandboxing
with self._build_file_sandboxing():
exec(code, module.__dict__)
return build_env, module
def _process_include(self, build_include, is_implicit_include):
# type: (BuildInclude, bool) -> Tuple[AbstractContext, types.ModuleType]
"""Process the include file at the given path.
:param build_include: build include metadata (cell_name and path).
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
"""
# First check the cache.
cached = self._include_cache.get(build_include.path)
if cached is not None:
return cached
build_env = IncludeContext(
cell_name=build_include.cell_name, path=build_include.path
)
build_env, mod = self._process(
build_env,
build_include.path,
is_implicit_include=is_implicit_include,
package_implicit_load=None,
)
self._include_cache[build_include.path] = build_env, mod
return build_env, mod
def _process_build_file(
self, watch_root, project_prefix, path, package_implicit_load
):
# type: (str, str, str, Optional[LoadStatement]) -> Tuple[BuildFileContext, types.ModuleType]
"""Process the build file at the given path."""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(path, self._project_root).replace(
"\\", "/"
)
len_suffix = -len(self._build_file_name) - 1
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
self._project_root,
base_path,
path,
dirname,
self._cell_name,
self._allow_empty_globs,
self._ignore_paths,
self._watchman_client,
watch_root,
project_prefix,
self._sync_cookie_state,
self._watchman_glob_stat_results,
self._watchman_use_glob_generator,
{},
)
return self._process(
build_env,
path,
is_implicit_include=False,
package_implicit_load=package_implicit_load,
)
def process(
self, watch_root, project_prefix, path, diagnostics, package_implicit_load
):
# type: (str, Optional[str], str, List[Diagnostic], Optional[LoadStatement]) -> List[Dict[str, Any]]
"""Process a build file returning a dict of its rules and includes."""
build_env, mod = self._process_build_file(
watch_root,
project_prefix,
os.path.join(self._project_root, path),
package_implicit_load=package_implicit_load,
)
# Initialize the output object to a map of the parsed rules.
values = list(itervalues(build_env.rules))
# Add in tracked included files as a special meta rule.
values.append({"__includes": [path] + sorted(build_env.includes)})
# Add in tracked used config settings as a special meta rule.
values.append({"__configs": build_env.used_configs})
# Add in used environment variables as a special meta rule.
values.append({"__env": build_env.used_env_vars})
diagnostics.extend(build_env.diagnostics)
return values
class InvalidSignatureError(Exception):
pass
def format_traceback(tb):
formatted = []
for entry in traceback.extract_tb(tb):
(filename, line_number, function_name, text) = entry
formatted.append(
{
"filename": filename,
"line_number": line_number,
"function_name": function_name,
"text": text,
}
)
return formatted
def format_exception_info(exception_info):
(exc_type, exc_value, exc_traceback) = exception_info
formatted = {
"type": exc_type.__name__,
"value": str(exc_value),
"traceback": format_traceback(exc_traceback),
}
if exc_type is SyntaxError:
formatted["filename"] = exc_value.filename
formatted["lineno"] = exc_value.lineno
formatted["offset"] = exc_value.offset
formatted["text"] = exc_value.text
return formatted
def encode_result(values, diagnostics, profile):
# type: (List[Dict[str, object]], List[Diagnostic], Optional[str]) -> str
result = {
"values": [
{k: v for k, v in iteritems(value) if v is not None} for value in values
]
}
json_encoder = BuckJSONEncoder()
if diagnostics:
encoded_diagnostics = []
for d in diagnostics:
encoded = {"message": d.message, "level": d.level, "source": d.source}
if d.exception:
encoded["exception"] = format_exception_info(d.exception)
encoded_diagnostics.append(encoded)
result["diagnostics"] = encoded_diagnostics
if profile is not None:
result["profile"] = profile
try:
return json_encoder.encode(result)
except Exception as e:
# Try again without the values
result["values"] = []
if "diagnostics" not in result:
result["diagnostics"] = []
result["diagnostics"].append(
{
"message": str(e),
"level": "fatal",
"source": "parse",
"exception": format_exception_info(sys.exc_info()),
}
)
return json_encoder.encode(result)
def process_with_diagnostics(build_file_query, build_file_processor, to_parent):
start_time = time.time()
build_file = build_file_query.get("buildFile")
watch_root = build_file_query.get("watchRoot")
project_prefix = build_file_query.get("projectPrefix")
package_implicit_load = build_file_query.get("packageImplicitLoad")
build_file = cygwin_adjusted_path(build_file)
watch_root = cygwin_adjusted_path(watch_root)
if project_prefix is not None:
project_prefix = cygwin_adjusted_path(project_prefix)
diagnostics = []
values = []
try:
values = build_file_processor.process(
watch_root,
project_prefix,
build_file,
diagnostics=diagnostics,
package_implicit_load=package_implicit_load,
)
except BaseException as e:
# sys.exit() don't emit diagnostics.
if e is not SystemExit:
if isinstance(e, WatchmanError):
source = "watchman"
message = e.msg
else:
source = "parse"
message = str(e)
diagnostics.append(
Diagnostic(
message=message,
level="fatal",
source=source,
exception=sys.exc_info(),
)
)
raise
finally:
java_process_send_result(to_parent, values, diagnostics, None)
end_time = time.time()
return end_time - start_time
def java_process_send_result(to_parent, values, diagnostics, profile_result):
"""Sends result to the Java process"""
data = encode_result(values, diagnostics, profile_result)
if PY3:
# in Python 3 write expects bytes instead of string
data = data.encode("utf-8")
to_parent.write(data)
to_parent.flush()
def silent_excepthook(exctype, value, tb):
# We already handle all exceptions by writing them to the parent, so
# no need to dump them again to stderr.
pass
def _optparse_store_kv(option, opt_str, value, parser):
"""Optparse option callback which parses input as K=V, and store into dictionary.
:param optparse.Option option: Option instance
:param str opt_str: string representation of option flag
:param str value: argument value
:param optparse.OptionParser parser: parser instance
"""
result = value.split("=", 1)
if len(result) != 2:
raise optparse.OptionError(
"Expected argument of to be in the form of X=Y".format(opt_str), option
)
(k, v) = result
# Get or create the dictionary
dest_dict = getattr(parser.values, option.dest)
if dest_dict is None:
dest_dict = {}
setattr(parser.values, option.dest, dest_dict)
dest_dict[k] = v
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUCK file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUCK files under the project root. If no paths to BUCK files are
# specified, then it will traverse the project root for BUCK files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUCK files will be printed
# to stdout encoded in JSON. That means that printing out other information
# for debugging purposes will break the JSON encoding, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
# w instead of a mode is used because of https://bugs.python.org/issue27805
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), "wb")
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
"--project_root", action="store", type="string", dest="project_root"
)
parser.add_option(
"--cell_root",
action="callback",
type="string",
dest="cell_roots",
metavar="NAME=PATH",
help="Cell roots that can be referenced by includes.",
callback=_optparse_store_kv,
default={},
)
parser.add_option("--cell_name", action="store", type="string", dest="cell_name")
parser.add_option(
"--build_file_name", action="store", type="string", dest="build_file_name"
)
parser.add_option(
"--allow_empty_globs",
action="store_true",
dest="allow_empty_globs",
help="Tells the parser not to raise an error when glob returns no results.",
)
parser.add_option(
"--use_watchman_glob",
action="store_true",
dest="use_watchman_glob",
help="Invokes `watchman query` to get lists of files instead of globbing in-process.",
)
parser.add_option(
"--watchman_use_glob_generator",
action="store_true",
dest="watchman_use_glob_generator",
help="Uses Watchman glob generator to speed queries",
)
parser.add_option(
"--watchman_glob_stat_results",
action="store_true",
dest="watchman_glob_stat_results",
help="Invokes `stat()` to sanity check result of `watchman query`.",
)
parser.add_option(
"--watchman_socket_path",
action="store",
type="string",
dest="watchman_socket_path",
help="Path to Unix domain socket/named pipe as returned by `watchman get-sockname`.",
)
parser.add_option(
"--watchman_query_timeout_ms",
action="store",
type="int",
dest="watchman_query_timeout_ms",
help="Maximum time in milliseconds to wait for watchman query to respond.",
)
parser.add_option("--include", action="append", dest="include")
parser.add_option("--config", help="BuckConfig settings available at parse time.")
parser.add_option("--ignore_paths", help="Paths that should be ignored.")
parser.add_option(
"--quiet",
action="store_true",
dest="quiet",
help="Stifles exception backtraces printed to stderr during parsing.",
)
parser.add_option(
"--profile", action="store_true", help="Profile every buck file execution"
)
parser.add_option(
"--build_file_import_whitelist",
action="append",
dest="build_file_import_whitelist",
)
parser.add_option(
"--disable_implicit_native_rules",
action="store_true",
help="Do not allow native rules in build files, only included ones",
)
parser.add_option(
"--warn_about_deprecated_syntax",
action="store_true",
help="Warn about deprecated syntax usage.",
)
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
cell_roots = {
k: os.path.abspath(cygwin_adjusted_path(v))
for k, v in iteritems(options.cell_roots)
}
watchman_client = None
if options.use_watchman_glob:
client_args = {"sendEncoding": "json", "recvEncoding": "json"}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args["timeout"] = max(
0.0, options.watchman_query_timeout_ms / 1000.0
)
else:
client_args["timeout"] = DEFAULT_WATCHMAN_QUERY_TIMEOUT
if options.watchman_socket_path is not None:
client_args["sockpath"] = options.watchman_socket_path
client_args["transport"] = "local"
watchman_client = pywatchman.client(**client_args)
configs = {}
if options.config is not None:
with open(options.config, "rb") as f:
for section, contents in iteritems(json.load(f)):
for field, value in iteritems(contents):
configs[(section, field)] = value
ignore_paths = []
if options.ignore_paths is not None:
with open(options.ignore_paths, "rb") as f:
ignore_paths = [make_glob(i) for i in json.load(f)]
build_file_processor = BuildFileProcessor(
project_root,
cell_roots,
options.cell_name,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
options.watchman_glob_stat_results,
options.watchman_use_glob_generator,
project_import_whitelist=options.build_file_import_whitelist or [],
implicit_includes=options.include or [],
configs=configs,
ignore_paths=ignore_paths,
disable_implicit_native_rules=options.disable_implicit_native_rules,
warn_about_deprecated_syntax=options.warn_about_deprecated_syntax,
)
# While processing, we'll write exceptions as diagnostic messages
# to the parent then re-raise them to crash the process. While
# doing so, we don't want Python's default unhandled exception
# behavior of writing to stderr.
orig_excepthook = None
if options.quiet:
orig_excepthook = sys.excepthook
sys.excepthook = silent_excepthook
# Process the build files with the env var interceptors and builtins
# installed.
with build_file_processor.with_env_interceptors():
with build_file_processor.with_builtins(builtins.__dict__):
processed_build_file = []
profiler = None
if options.profile:
profiler = Profiler(True)
profiler.start()
Tracer.enable()
for build_file in args:
query = {
"buildFile": build_file,
"watchRoot": project_root,
"projectPrefix": project_root,
}
duration = process_with_diagnostics(
query, build_file_processor, to_parent
)
processed_build_file.append(
{"buildFile": build_file, "duration": duration}
)
# From https://docs.python.org/2/using/cmdline.html :
#
# Note that there is internal buffering in file.readlines()
# and File Objects (for line in sys.stdin) which is not
# influenced by this option. To work around this, you will
# want to use file.readline() inside a while 1: loop.
for line in wait_and_read_build_file_query():
if line == "":
break
build_file_query = json.loads(line)
if build_file_query.get("command") == "report_profile":
report_profile(options, to_parent, processed_build_file, profiler)
else:
duration = process_with_diagnostics(
build_file_query, build_file_processor, to_parent
)
processed_build_file.append(
{
"buildFile": build_file_query["buildFile"],
"duration": duration,
}
)
if options.quiet:
sys.excepthook = orig_excepthook
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
def wait_build_file_query():
_select([sys.stdin], [], [])
def wait_and_read_build_file_query():
def default_wait():
return
wait = default_wait
if sys.platform != "win32":
# wait_build_file_query() is useful to attribute time waiting for queries.
# Since select.select() is not supported on Windows, we currently don't have
# a reliable way to measure it on this platform. Then, we skip it.
wait = wait_build_file_query
while True:
wait()
line = sys.stdin.readline()
if not line:
return
yield line
def report_profile(options, to_parent, processed_build_file, profiler):
if options.profile:
try:
profiler.stop()
profile_result = profiler.generate_report()
extra_result = "Total: {:.2f} sec\n\n\n".format(profiler.total_time)
extra_result += "# Parsed {} files".format(len(processed_build_file))
processed_build_file.sort(
key=lambda current_child: current_child["duration"], reverse=True
)
# Only show the top ten buck files
if len(processed_build_file) > 10:
processed_build_file = processed_build_file[:10]
extra_result += ", {} slower BUCK files:\n".format(
len(processed_build_file)
)
else:
extra_result += "\n"
for info in processed_build_file:
extra_result += "Parsed {}: {:.2f} sec \n".format(
info["buildFile"], info["duration"]
)
extra_result += "\n\n"
profile_result = extra_result + profile_result
profile_result += Tracer.get_all_traces_and_reset()
java_process_send_result(to_parent, [], [], profile_result)
except Exception:
trace = traceback.format_exc()
print(str(trace))
raise
else:
java_process_send_result(to_parent, [], [], None)
def make_glob(pat):
# type: (str) -> str
if is_special(pat):
return pat
return pat + "/**"
# import autogenerated rule instances for effect.
try:
import generated_rules
except ImportError:
# If running directly or python tests of this code, this is not an error.
sys.stderr.write("Failed to load buck generated rules module.\n")
| apache-2.0 | 5,986,481,487,547,775,000 | 35.159926 | 108 | 0.600231 | false | 4.17118 | true | false | false |
yapdns/yapdns-client | vendor/github.com/elastic/beats/filebeat/tests/system/test_fields.py | 6 | 2499 | from filebeat import BaseTest
import os
import socket
"""
Tests for the custom fields functionality.
"""
class Test(BaseTest):
def test_custom_fields(self):
"""
Tests that custom fields show up in the output dict.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
fields={"hello": "world", "number": 2}
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
assert doc["fields.hello"] == "world"
assert doc["fields.number"] == 2
def test_custom_fields_under_root(self):
"""
Tests that custom fields show up in the output dict under
root when fields_under_root option is used.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
fields={
"hello": "world",
"type": "log2",
"timestamp": "2"
},
fieldsUnderRoot=True
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
print doc
assert doc["hello"] == "world"
assert doc["type"] == "log2"
assert doc["timestamp"] == 2
assert "fields" not in doc
def test_beat_fields(self):
"""
Checks that it's possible to set a custom shipper name. Also
tests that beat.hostname has values.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/test.log",
shipperName="testShipperName"
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
output = self.read_output()
doc = output[0]
assert doc["beat.name"] == "testShipperName"
assert doc["beat.hostname"] == socket.gethostname()
assert "fields" not in doc
| mit | 4,191,973,657,852,844,500 | 28.75 | 68 | 0.55102 | false | 3.815267 | true | false | false |
aldro61/kover | core/kover/learning/experiments/experiment_scm.py | 1 | 31666 | #!/usr/bin/env python
"""
Kover: Learn interpretable computational phenotyping models from k-merized genomic data
Copyright (C) 2015 Alexandre Drouin
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import numpy as np
from collections import defaultdict
from copy import deepcopy
from functools import partial
from itertools import product
from math import exp, log as ln, pi
from multiprocessing import Pool, cpu_count
from scipy.misc import comb
from ...dataset.ds import KoverDataset
from ..common.models import ConjunctionModel, DisjunctionModel
from ..common.rules import LazyKmerRuleList, KmerRuleClassifications
from ..learners.scm import SetCoveringMachine
from ...utils import _duplicate_last_element, _unpack_binary_bytes_from_ints, _parse_kmer_blacklist
from ..experiments.metrics import _get_binary_metrics
def _predictions(model, kmer_matrix, train_example_idx, test_example_idx, progress_callback=None):
"""Computes predictions by loading only the columns of the kmer matrix that are targetted by the model.
Parameters
----------
model: BaseModel
The model used for predicting.
kmer_matrix: BaseRuleClassifications
The matrix containing the classifications of each rule on each learning example.
train_example_idx: array-like, dtype=uint
The index of the rows of kmer_matrix corresponding to the training examples.
test_example_idx: array-like, dtype=uint
The index of the rows of kmer_matrix corresponding to the testing examples.
progress_callback: function with arguments task, percent_completed
A callback function used to keep track of the task's completion.
"""
if progress_callback is None:
progress_callback = lambda t, p: None
progress_callback("Testing", 0.0)
# We use h5py to load only the columns of the k-mer matrix targeted by the model. The indices passed to h5py need
# to be sorted. We change the kmer_idx of the rules in the model to be 0 ... n_rules, with the rule that initially had
# the smallest kmer_idx pointing to 0 and the one with the largest kmer_idx pointing to n_rules. We then load only
# the appropriate columns and apply the readdressed model to the data (in RAM).
columns_to_load = []
readdressed_model = deepcopy(model)
for i, rule_idx in enumerate(np.argsort([r.kmer_index for r in model.rules])):
rule = readdressed_model.rules[rule_idx]
columns_to_load.append(rule.kmer_index)
rule.kmer_index = i
# Load the columns targeted by the model and make predictions using the readdressed model
X = _unpack_binary_bytes_from_ints(kmer_matrix[:, columns_to_load])
train_predictions = readdressed_model.predict(X[train_example_idx])
progress_callback("Testing", 1.0 * len(train_example_idx) / (len(train_example_idx) + len(test_example_idx)))
test_predictions = readdressed_model.predict(X[test_example_idx])
progress_callback("Testing", 1.0)
return train_predictions, test_predictions
def _cv_score_hp(hp_values, max_rules, dataset_file, split_name, rule_blacklist):
model_type = hp_values[0]
p = hp_values[1]
dataset = KoverDataset(dataset_file)
folds = dataset.get_split(split_name).folds
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
def _iteration_callback(iteration_infos, tmp_model, test_predictions_by_model_length, test_example_idx):
tmp_model.add(iteration_infos["selected_rule"])
_, test_predictions = _predictions(tmp_model, dataset.kmer_matrix, [], test_example_idx)
test_predictions_by_model_length.append(test_predictions)
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
fold_score_by_model_length = np.zeros((len(folds), max_rules))
for i, fold in enumerate(folds):
logging.debug("Fold: %s" % fold.name)
rule_risks = np.hstack((fold.unique_risk_by_kmer[...],
fold.unique_risk_by_anti_kmer[...])) # Too bad that we need to load each time. Maybe invert the loops (all hp for each fold)
train_example_idx = fold.train_genome_idx
test_example_idx = fold.test_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
tiebreaker = partial(_tiebreaker, rule_risks=rule_risks, model_type=model_type)
test_predictions_by_model_length = []
tmp_model = ConjunctionModel() if model_type == "conjunction" else DisjunctionModel()
iteration_callback = partial(_iteration_callback,
tmp_model=tmp_model,
test_predictions_by_model_length=test_predictions_by_model_length,
test_example_idx=test_example_idx)
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=tiebreaker,
iteration_callback=iteration_callback)
test_predictions_by_model_length = np.array(_duplicate_last_element(test_predictions_by_model_length, max_rules))
fold_score_by_model_length[i] = _get_binary_metrics(predictions=test_predictions_by_model_length,
answers=dataset.phenotype.metadata[test_example_idx])["risk"]
score_by_model_length = np.mean(fold_score_by_model_length, axis=0)
best_score_idx = np.argmin(score_by_model_length)
best_hp_score = score_by_model_length[best_score_idx]
best_model_length = best_score_idx + 1
return (model_type, p, best_model_length), best_hp_score
def _cross_validation(dataset_file, split_name, model_types, p_values, max_rules, rule_blacklist,
n_cpu, progress_callback, warning_callback, error_callback):
"""
Returns the best parameter combination and its cv score
"""
n_hp_combinations = len(model_types) * len(p_values)
logging.debug("There are %d hyperparameter combinations to try." % n_hp_combinations)
logging.debug("Using %d CPUs." % n_cpu)
pool = Pool(processes=n_cpu)
hp_eval_func = partial(_cv_score_hp,
dataset_file=dataset_file,
split_name=split_name,
max_rules=max_rules,
rule_blacklist=rule_blacklist)
best_hp_score = 1.0
best_hp = {"model_type": None, "p": None, "max_rules": None}
n_completed = 0.0
progress_callback("Cross-validation", 0.0)
for hp, score in pool.imap_unordered(hp_eval_func, product(model_types, p_values)):
n_completed += 1
progress_callback("Cross-validation", n_completed / n_hp_combinations)
if (not np.allclose(score, best_hp_score) and score < best_hp_score) or \
(np.allclose(score, best_hp_score) and hp[2] < best_hp["max_rules"]) or \
(np.allclose(score, best_hp_score) and hp[2] == best_hp["max_rules"] and not np.allclose(hp[1], best_hp["p"]) and \
abs(1.0 - hp[1]) < abs(1.0 - best_hp["p"])):
best_hp["model_type"] = hp[0]
best_hp["p"] = hp[1]
best_hp["max_rules"] = hp[2]
best_hp_score = score
return best_hp_score, best_hp
def _full_train(dataset, split_name, model_type, p, max_rules, max_equiv_rules, rule_blacklist, random_generator, progress_callback):
full_train_progress = {"n_rules": 0.0}
def _iteration_callback(iteration_infos, model_type, equivalent_rules):
full_train_progress["n_rules"] += 1
progress_callback("Training", full_train_progress["n_rules"] / max_rules)
# Ensure that there are no more equivalent rules than the specified maximum
if len(iteration_infos["equivalent_rules_idx"]) > max_equiv_rules:
logging.debug("There are more equivalent rules than the allowed maximum. Subsampling %d rules." % max_equiv_rules)
random_idx = random_generator.choice(len(iteration_infos["equivalent_rules_idx"]), max_equiv_rules,
replace=False)
random_idx.sort()
iteration_infos["equivalent_rules_idx"] = iteration_infos["equivalent_rules_idx"][random_idx]
# Adjust and store the equivalent rule indices
if model_type == "disjunction":
n_kmers = rule_classifications.shape[1] / 2
iteration_infos["equivalent_rules_idx"] += n_kmers
iteration_infos["equivalent_rules_idx"] %= (2 * n_kmers)
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
else:
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
split = dataset.get_split(split_name)
train_example_idx = split.train_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
model_equivalent_rules = []
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
progress_callback("Training", 0)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=partial(_tiebreaker,
rule_risks=np.hstack((split.unique_risk_by_kmer[...],
split.unique_risk_by_anti_kmer[...])),
model_type=model_type),
iteration_callback=partial(_iteration_callback,
model_type=model_type,
equivalent_rules=model_equivalent_rules))
return predictor.model, predictor.rule_importances, model_equivalent_rules
def _bound(train_predictions, train_answers, train_example_idx, model, delta, max_genome_size, rule_classifications):
# Construct the smallest possible compression set (Chvatal greedy approx for minimum set cover)
logging.debug("Constructing the compression set.")
presence_by_example = rule_classifications.get_columns([r.kmer_index for r in model])[train_example_idx]
compression_set = []
while presence_by_example.shape[1] != 0:
score = presence_by_example.sum(axis=1)
best_example_relative_idx = np.argmax(score)
compression_set.append(best_example_relative_idx)
presence_by_example = presence_by_example[:, presence_by_example[best_example_relative_idx] == 0]
logging.debug("The compression set contains %d examples." % len(compression_set))
# Compute the bound value
logging.debug("Computing the bound value.")
h_card = float(len(model))
Z_card = float(len(compression_set) * max_genome_size)
m = float(len(train_answers))
mz = float(len(compression_set))
r = float((train_predictions != train_answers).sum() - (train_predictions[compression_set] != train_answers[compression_set]).sum())
return 1.0 - exp((-1.0 / (m - mz - r)) * (ln(comb(m, mz, exact=True)) +
ln(comb(m - mz, r, exact=True)) +
h_card * ln(2 * Z_card) +
ln(pi**6 *
(h_card + 1)**2 *
(r + 1)**2 *
(mz + 1)**2 /
(216 * delta))))
def _bound_score_hp(hp_values, max_rules, dataset_file, split_name, max_equiv_rules, rule_blacklist,
bound_delta, bound_max_genome_size, random_generator):
model_type = hp_values[0]
p = hp_values[1]
dataset = KoverDataset(dataset_file)
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
rule_classifications = KmerRuleClassifications(dataset.kmer_matrix, dataset.genome_count)
def _iteration_callback(iteration_infos, tmp_model, train_example_idx, train_answers, score_by_length,
model_by_length, equivalent_rules, rule_importances, rule_classifications):
tmp_model.add(iteration_infos["selected_rule"])
model_by_length.append(deepcopy(tmp_model))
rule_importances.append(iteration_infos["rule_importances"])
# Store equivalent rules
# Ensure that there are no more equivalent rules than the specified maximum
if len(iteration_infos["equivalent_rules_idx"]) > max_equiv_rules:
logging.debug("There are more equivalent rules than the allowed maximum. Subsampling %d rules." % max_equiv_rules)
random_idx = random_generator.choice(len(iteration_infos["equivalent_rules_idx"]), max_equiv_rules,
replace=False)
random_idx.sort()
iteration_infos["equivalent_rules_idx"] = iteration_infos["equivalent_rules_idx"][random_idx]
# Adjust and store the equivalent rule indices
if model_type == "disjunction":
n_kmers = rule_classifications.shape[1] / 2
iteration_infos["equivalent_rules_idx"] += n_kmers
iteration_infos["equivalent_rules_idx"] %= (2 * n_kmers)
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
else:
equivalent_rules.append(iteration_infos["equivalent_rules_idx"])
# Compute the bound value for the current model length
_, train_predictions = _predictions(tmp_model, dataset.kmer_matrix, [], train_example_idx)
score_by_length[iteration_infos["iteration_number"] - 1] = _bound(train_predictions=train_predictions,
train_answers=train_answers,
train_example_idx=train_example_idx,
model=tmp_model,
delta=bound_delta,
max_genome_size=bound_max_genome_size,
rule_classifications=rule_classifications)
def _tiebreaker(best_utility_idx, rule_risks, model_type):
logging.debug("There are %d candidate rules." % len(best_utility_idx))
tie_rule_risks = rule_risks[best_utility_idx]
if model_type == "conjunction":
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.min())]
else:
# Use max instead of min, since in the disjunction case the risks = 1.0 - conjunction risks (inverted ys)
result = best_utility_idx[np.isclose(tie_rule_risks, tie_rule_risks.max())]
return result
split = dataset.get_split(split_name)
rule_risks = np.hstack((split.unique_risk_by_kmer[...],
split.unique_risk_by_anti_kmer[...]))
train_example_idx = split.train_genome_idx
positive_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 1].reshape(-1)
negative_example_idx = train_example_idx[dataset.phenotype.metadata[train_example_idx] == 0].reshape(-1)
train_answers = dataset.phenotype.metadata[train_example_idx]
tiebreaker = partial(_tiebreaker, rule_risks=rule_risks, model_type=model_type)
tmp_model = ConjunctionModel() if model_type == "conjunction" else DisjunctionModel()
score_by_length = np.ones(max_rules)
model_by_length = []
equivalent_rules = []
rule_importances = []
iteration_callback = partial(_iteration_callback,
tmp_model=tmp_model,
train_example_idx=train_example_idx,
train_answers=train_answers,
score_by_length=score_by_length,
model_by_length=model_by_length,
equivalent_rules=equivalent_rules,
rule_importances=rule_importances,
rule_classifications=rule_classifications)
predictor = SetCoveringMachine(model_type=model_type, p=p, max_rules=max_rules)
predictor.fit(rules=rules,
rule_classifications=rule_classifications,
positive_example_idx=positive_example_idx,
negative_example_idx=negative_example_idx,
rule_blacklist=rule_blacklist,
tiebreaker=tiebreaker,
iteration_callback=iteration_callback,
iteration_rule_importances=True)
best_score_idx = np.argmin(score_by_length)
best_hp_score = score_by_length[best_score_idx]
best_model = model_by_length[best_score_idx]
best_rule_importances = rule_importances[best_score_idx]
best_equivalent_rules = equivalent_rules[: best_score_idx + 1]
best_model_length = best_score_idx + 1
return (model_type, p, best_model_length), best_hp_score, best_model, best_rule_importances, best_equivalent_rules
def _bound_selection(dataset_file, split_name, model_types, p_values, max_rules, max_equiv_rules, rule_blacklist,
bound_delta, bound_max_genome_size, n_cpu, random_generator, progress_callback, warning_callback,
error_callback):
n_hp_combinations = len(model_types) * len(p_values)
logging.debug("There are %d hyperparameter combinations to try." % n_hp_combinations)
logging.debug("Using %d CPUs." % n_cpu)
pool = Pool(processes=n_cpu)
hp_eval_func = partial(_bound_score_hp,
dataset_file=dataset_file,
split_name=split_name,
max_rules=max_rules,
max_equiv_rules=max_equiv_rules,
rule_blacklist=rule_blacklist,
bound_delta=bound_delta,
bound_max_genome_size=bound_max_genome_size,
random_generator=random_generator)
best_hp_score = 1.0
best_hp = {"model_type": None, "p": None, "max_rules": None}
n_completed = 0.0
progress_callback("Bound selection", 0.0)
for hp, score, model, rule_importances, equiv_rules in pool.imap_unordered(hp_eval_func,
product(model_types, p_values)):
n_completed += 1
progress_callback("Bound selection", n_completed / n_hp_combinations)
if (score < best_hp_score) or \
(score == best_hp_score and hp[2] < best_hp["max_rules"]) or \
(score == best_hp_score and hp[2] == best_hp["max_rules"] and abs(1.0 - hp[1]) < abs(1.0 - best_hp["p"])):
best_hp["model_type"] = hp[0]
best_hp["p"] = hp[1]
best_hp["max_rules"] = hp[2]
best_hp_score = score
best_model = model
best_equiv_rules = equiv_rules
best_rule_importances = rule_importances
return best_hp_score, best_hp, best_model, best_rule_importances, best_equiv_rules
def _find_rule_blacklist(dataset_file, kmer_blacklist_file, warning_callback):
"""
Finds the index of the rules that must be blacklisted.
"""
dataset = KoverDataset(dataset_file)
# Find all rules to blacklist
rule_blacklist = []
if kmer_blacklist_file is not None:
kmers_to_blacklist = _parse_kmer_blacklist(kmer_blacklist_file, dataset.kmer_length)
if kmers_to_blacklist:
# XXX: the k-mers are assumed to be upper-cased in the dataset
kmer_sequences = dataset.kmer_sequences[...].tolist()
kmer_by_matrix_column = dataset.kmer_by_matrix_column[...].tolist() # XXX: each k-mer is there only once (see wiki)
n_kmers = len(kmer_sequences)
kmers_not_found = []
for k in kmers_to_blacklist:
k = k.upper()
try:
presence_rule_idx = kmer_by_matrix_column.index(kmer_sequences.index(k))
absence_rule_idx = presence_rule_idx + n_kmers
rule_blacklist += [presence_rule_idx, absence_rule_idx]
except ValueError:
kmers_not_found.append(k)
if(len(kmers_not_found) > 0):
warning_callback("The following kmers could not be found in the dataset: " + ", ".join(kmers_not_found))
return rule_blacklist
def learn_SCM(dataset_file, split_name, model_type, p, kmer_blacklist_file,max_rules, max_equiv_rules,
parameter_selection, n_cpu, random_seed, authorized_rules, bound_delta=None, bound_max_genome_size=None,
progress_callback=None, warning_callback=None, error_callback=None):
"""
parameter_selection: bound, cv, none (use first value of each if multiple)
"""
# Execution callback functions
if warning_callback is None:
warning_callback = lambda w: logging.warning(w)
if error_callback is None:
def normal_raise(exception):
raise exception
error_callback = normal_raise
if progress_callback is None:
progress_callback = lambda t, p: None
if n_cpu is None:
n_cpu = cpu_count()
random_generator = np.random.RandomState(random_seed)
model_type = np.unique(model_type)
p = np.unique(p)
logging.debug("Searching for blacklisted rules.")
rule_blacklist = _find_rule_blacklist(dataset_file=dataset_file,
kmer_blacklist_file=kmer_blacklist_file,
warning_callback=warning_callback)
dataset = KoverDataset(dataset_file)
# Score the hyperparameter combinations
# ------------------------------------------------------------------------------------------------------------------
if parameter_selection == "bound":
if bound_delta is None or bound_max_genome_size is None:
error_callback(Exception("Bound selection cannot be performed without delta and the maximum genome length."))
# For bound selection, there is no need to retrain the algorithm after selecting the best hyperparameters.
# The model is already obtained from all the training data. This is why we save the model here.
best_hp_score, \
best_hp, \
best_model, \
best_rule_importances, \
best_predictor_equiv_rules = _bound_selection(dataset_file=dataset_file,
split_name=split_name,
model_types=model_type,
p_values=p,
max_rules=max_rules,
rule_blacklist=rule_blacklist,
max_equiv_rules=max_equiv_rules,
bound_delta=bound_delta,
bound_max_genome_size=bound_max_genome_size,
n_cpu=n_cpu,
random_generator=random_generator,
progress_callback=progress_callback,
warning_callback=warning_callback,
error_callback =error_callback)
elif parameter_selection == "cv":
n_folds = len(dataset.get_split(split_name).folds)
if n_folds < 1:
error_callback(Exception("Cross-validation cannot be performed on a split with no folds."))
best_hp_score, best_hp = _cross_validation(dataset_file=dataset_file,
split_name=split_name,
model_types=model_type,
p_values=p,
max_rules=max_rules,
rule_blacklist=rule_blacklist,
n_cpu=n_cpu,
progress_callback=progress_callback,
warning_callback=warning_callback,
error_callback=error_callback)
else:
# Use the first value provided for each parameter
best_hp = {"model_type": model_type[0], "p": p[0], "max_rules": max_rules}
best_hp_score = None
# Use the best hyperparameters to train/test on the split
# ------------------------------------------------------------------------------------------------------------------
if parameter_selection == "bound":
model = best_model
equivalent_rules = best_predictor_equiv_rules
rule_importances = best_rule_importances
else:
model, rule_importances, \
equivalent_rules = _full_train(dataset=dataset,
split_name=split_name,
model_type=best_hp["model_type"],
p=best_hp["p"],
max_rules=best_hp["max_rules"],
max_equiv_rules=max_equiv_rules,
rule_blacklist=rule_blacklist,
random_generator=random_generator,
progress_callback=progress_callback)
split = dataset.get_split(split_name)
train_example_idx = split.train_genome_idx
test_example_idx = split.test_genome_idx
train_predictions, test_predictions = _predictions(model=model,
kmer_matrix=dataset.kmer_matrix,
train_example_idx=train_example_idx,
test_example_idx=test_example_idx,
progress_callback=progress_callback)
train_answers = dataset.phenotype.metadata[train_example_idx]
train_metrics = _get_binary_metrics(train_predictions, train_answers)
# No need to recompute the bound if bound selection was used
if parameter_selection == "bound":
train_metrics["bound"] = best_hp_score
else:
train_metrics["bound"] = _bound(train_predictions=train_predictions,
train_answers=train_answers,
train_example_idx=train_example_idx,
model=model,
delta=bound_delta,
max_genome_size=bound_max_genome_size,
rule_classifications=KmerRuleClassifications(dataset.kmer_matrix,
dataset.genome_count))
# Test metrics are computed only if there is a testing set
if len(test_example_idx) > 0:
test_answers = dataset.phenotype.metadata[test_example_idx]
test_metrics = _get_binary_metrics(test_predictions, test_answers)
else:
test_metrics = None
# Get the idx of the training/testing examples that are correctly/incorrectly classified by the model
classifications = defaultdict(list)
classifications["train_correct"] = dataset.genome_identifiers[train_example_idx[train_predictions == \
train_answers].tolist()].tolist() if train_metrics["risk"][0] < 1.0 else []
classifications["train_errors"] = dataset.genome_identifiers[train_example_idx[train_predictions != \
train_answers].tolist()].tolist() if train_metrics["risk"][0] > 0 else []
if len(test_example_idx) > 0:
classifications["test_correct"] = dataset.genome_identifiers[test_example_idx[test_predictions == \
test_answers].tolist()].tolist() if test_metrics["risk"][0] < 1.0 else []
classifications["test_errors"] = dataset.genome_identifiers[test_example_idx[test_predictions != \
test_answers].tolist()].tolist() if test_metrics["risk"][0] > 0 else []
# Convert the equivalent rule indexes to rule objects
rules = LazyKmerRuleList(dataset.kmer_sequences, dataset.kmer_by_matrix_column)
model_equivalent_rules = [[rules[i] for i in equiv_idx] for equiv_idx in equivalent_rules]
return best_hp, best_hp_score, train_metrics, test_metrics, model, rule_importances, \
model_equivalent_rules, classifications
| gpl-3.0 | 8,289,923,641,412,694,000 | 52.671186 | 156 | 0.58836 | false | 4.123177 | true | false | false |
affan2/django-notification | notification/backends/onsite.py | 1 | 3820 | from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.contrib.auth.models import User
from django.template import Context
from django.utils.translation import ugettext
from django.utils import translation
from django.utils import timezone
from notification import backends
class OnSiteBackend(backends.BaseBackend):
spam_sensitivity = 0
def can_send(self, user, notice_type):
can_send = super(OnSiteBackend, self).can_send(user, notice_type)
if can_send:
return True
return False
def deliver(self, recipient, sender, notice_type, extra_context):
from notification.models import Notice
if 'disallow_notice' in extra_context:
if 'onsite' in extra_context['disallow_notice']:
return
recipient = User.objects.get(id=recipient.id)
language_code = 'en'
if 'language_code' in extra_context.keys():
for language_tuple in settings.LANGUAGES:
if extra_context['language_code'] in language_tuple:
language_code = language_tuple[0]
break
else:
try:
language_code = recipient.user_profile.default_language
except ObjectDoesNotExist:
language_code = 'en'
translation.activate(language_code)
if 'target' in extra_context and hasattr(extra_context['target'], 'translations'):
from general.utils import switch_language
target = extra_context['target']
extra_context['target'] = switch_language(target, language_code)
if 'pm_message' in extra_context:
sender = extra_context['pm_message'].sender
target_url = self.get_target_url(extra_context, sender, recipient)
context = Context({})
context.update({
"recipient": recipient,
"sender": sender,
"notice": ugettext(notice_type.past_tense),
'default_profile_photo': settings.DEFAULT_PROFILE_PHOTO,
'target_url': target_url,
})
context.update(extra_context)
try:
messages = self.get_formatted_messages((
"full.html",
), context['app_label'], context)
except:
messages = self.get_formatted_messages((
"full.html",
), notice_type.label, context)
if sender.__class__.__name__ == 'Company':
sender = sender.admin_primary if sender.admin_primary else sender.created_by
if recipient.is_active:
create_notice = False
if settings.PRODUCTION_SETTING or settings.DEVELOPMENT_SERVER:
try:
notice_obj = Notice.objects.filter(
recipient=recipient,
notice_type=notice_type,
sender=sender,
target_url=target_url,
on_site=True,
site_id=settings.SITE_ID
).order_by('-added')[0]
except IndexError:
notice_obj = None
create_notice = True
if notice_obj and (timezone.now()-notice_obj.added).seconds/60 > settings.TIME_INTERVAL_BTW_TWO_NOTIFICATION:
create_notice = True
if create_notice:
Notice.objects.create(
recipient=recipient,
notice_type=notice_type,
sender=sender,
message=messages['full.html'],
target_url=target_url,
on_site=True,
site_id=settings.SITE_ID
)
| mit | -4,104,252,907,369,937,000 | 36.45098 | 125 | 0.554712 | false | 4.745342 | false | false | false |
richbs/colourlens | colourlens/utils.py | 1 | 6035 | import colorsys
import webcolors
from cooperhewitt import swatchbook
from colormath.color_objects import RGBColor
from decimal import Decimal
COLOURS = {
'RED': ((255, 0, 0), (340, 17), (10, 100), (40, 100)),
'ORANGE': ((252, 106, 8), (18, 45), None, (66, 100)),
'YELLOW': ((255, 255, 0), (46, 66), None, (76, 100)),
'LIME': ((0, 255, 0), (67, 165), (15, 100), (66, 100)),
'CYAN': ((0, 255, 255), (166, 201), (15, 100), (66, 100)),
'BLUE': ((0, 0, 255), (202, 260), None, (66, 100)),
'MAGENTA': ((255, 0, 255), (261, 339), None, (66, 100)),
'MAROON': ((128, 0, 0), (340, 17), (20, 100), (24, 65)),
'BROWN': ((107, 48, 2), (18, 45), None, (26, 65)),
'OLIVE': ((128, 128, 0), (46, 66), (26, 100), (26, 81)),
'GREEN': ((0, 128, 0), (67, 165), None, (18, 65)),
'TEAL': ((0, 128, 128), (166, 201), None, (33, 65)),
'NAVY': ((0, 0, 128), (202, 260), None, (18, 65)),
'PURPLE': ((128, 0, 128), (261, 339), None, (33, 65)),
}
GREYSCALE = {
'BLACK': ((0, 0, 0), (0, 359), (0, 100), (0, 17)),
'WHITE': ((255, 255, 255), (0, 359), (0, 5), (90, 100)),
'SILVER': ((192, 192, 192), (0, 359), (0, 10), (61, 89)),
'GREY': ((128, 128, 128), (0, 359), (0, 10), (26, 60)),
}
DEFAULT_SAT = (25, 100)
DEFAUL_VAL = (50, 100)
TWOPLACES = Decimal(10) ** -2
class ArtColour:
hsv = ()
rgb = ()
hex_value = ()
css = ()
ansi = ()
ansi_rgb = ()
ansi_hsv = ()
_color = None
GREY = False
distance = None
prominence = None
def __init__(self, r, g, b, prominence):
self.rgb = (r, g, b)
self.prominence = prominence
(self.red, self.blue, self.green) = (r, g, b)
self.hsv = self.rgb_to_hsv(r, g, b)
(self.hue, self.sat, self.val) = \
(self.hsv[0], self.hsv[1], self.hsv[2])
self.ansi = self.ansi_number(r, g, b)
self.ansi_rgb = self.rgb_reduce(r, g, b)
self.ansi_hsv = self.rgb_to_hsv(*self.ansi_rgb)
self.hex_value = None
self.nearest_hex = None
def rgb_to_hsv(self, r, g, b):
fracs = [ch/255.0 for ch in (r, g, b)]
hsv = colorsys.rgb_to_hsv(*fracs)
return (int(round(hsv[0] * 360)),
int(round(hsv[1] * 100)),
int(round(hsv[2] * 100)))
def hsv_to_rgb(self, h, s, v):
rgb = colorsys.hsv_to_rgb(h/360.0, s/100.0, v/100.0)
return (int(round(rgb[0] * 255)),
int(round(rgb[1] * 255)),
int(round(rgb[2] * 255)))
def rgb_reduce(self, r, g, b):
reduced_rgb = [int(6 * float(val) / 256)
* (256/6) for val in (r, g, b)]
return tuple(reduced_rgb)
def spin(self, deg):
return (deg + 180) % 360 - 180
@property
def color(self):
if self._color is None:
self._color = self._get_color()
return self._color
def _get_color(self):
self.nearest = None
self.shortest_distance = 100
chosen_name = None
for color_dict in (COLOURS, GREYSCALE):
for name, color in color_dict.iteritems():
desired_rgb = color[0]
target = RGBColor(*desired_rgb)
cdist = target.delta_e(RGBColor(*self.rgb), method="cmc")
if self.nearest is None or cdist < self.shortest_distance:
self.nearest = name
self.nearest_rgb = desired_rgb
self.shortest_distance = cdist
self.distance = cdist
# print 'Checking', name
(hue_lo, hue_hi) = color[1]
if hue_lo > hue_hi:
h = self.spin(self.hue)
hue_lo = self.spin(hue_lo)
hue_hi = self.spin(hue_hi)
else:
h = self.hue
sat_range = color[2] or DEFAULT_SAT
val_range = color[3] or DEFAUL_VAL
if h in range(hue_lo, hue_hi + 1) and \
self.sat in range(sat_range[0], sat_range[1] + 1) and \
self.val in range(val_range[0], val_range[1] + 1):
# TODO set up desirable hues, sat and b per named colour
target = RGBColor(*desired_rgb)
self.distance = cdist
chosen_name = name
self.nearest_hex = webcolors.rgb_to_hex(self.nearest_rgb)
return chosen_name
return None
def ansi_number(self, r, g, b):
'''
Convert an RGB colour to 256 colour ANSI graphics.
'''
grey = False
poss = True
step = 2.5
while poss: # As long as the colour could be grey scale
if r < step or g < step or b < step:
grey = r < step and g < step and b < step
poss = False
step += 42.5
if grey:
colour = 232 + int(float(sum([r, g, b]) / 33.0))
else:
colour = sum([16] + [int((6 * float(val) / 256)) * mod
for val, mod in ((r, 36), (g, 6), (b, 1))])
return colour
def hex_me_up(self):
self.hex_value = webcolors.rgb_to_hex(self.rgb)
snapped, colour_name = swatchbook.closest_delta_e('css3', self.hex_value)
snapped_rgb = webcolors.hex_to_rgb(snapped)
hsv = self.rgb_to_hsv(*snapped_rgb)
target = RGBColor(*snapped_rgb)
original = RGBColor(*self.rgb)
cdist = target.delta_e(original, method="cmc")
prom = Decimal(self.prominence).quantize(TWOPLACES)
dist = Decimal(cdist).quantize(TWOPLACES)
ELITE = False
self.css = {
'r': self.rgb[0],
'g': self.rgb[1],
'b': self.rgb[2],
'hue': hsv[0],
'hex': snapped,
'name': colour_name,
'distance': float(dist),
'prominence': float(prom),
'elite': ELITE,
}
return self.css
| mit | 8,238,642,064,376,963,000 | 31.621622 | 81 | 0.479867 | false | 3.106022 | false | false | false |
callowayproject/Transmogrify | transmogrify/tests/test_utils.py | 1 | 1632 | """
Test the utils
"""
from transmogrify import utils
def test_is_tool():
assert utils.is_tool('date') is True
assert utils.is_tool('foo') is False
def test_purge_security_hash():
from hashlib import sha1
from transmogrify.settings import SECRET_KEY
security_hash = sha1('PURGE' + SECRET_KEY).hexdigest()
assert utils.is_valid_security('PURGE', security_hash) is True
def test_get_cached_files():
import os
from transmogrify import settings
from transmogrify.core import Transmogrify
testdata = os.path.abspath(settings.BASE_PATH)
t = Transmogrify('/horiz_img_r300x300.jpg?debug')
t.save()
result = utils.get_cached_files('/horiz_img.jpg', document_root=testdata)
filenames = [x.replace(testdata, '') for x in result]
assert '/horiz_img_r300x300.jpg' in filenames
def test_settings_stuff():
from transmogrify import settings
assert settings.bool_from_env('FOO', False) is False
assert settings.bool_from_env('FOO', 'False') is False
assert settings.bool_from_env('FOO', 'false') is False
assert settings.bool_from_env('FOO', 'F') is False
assert settings.bool_from_env('FOO', 'f') is False
assert settings.bool_from_env('FOO', '0') is False
assert settings.bool_from_env('FOO', 'True')
assert settings.bool_from_env('FOO', 'true')
assert settings.bool_from_env('FOO', 'T')
assert settings.bool_from_env('FOO', 't')
assert settings.bool_from_env('FOO', '1')
assert settings.list_from_env("FOO", '1,2,3,4') == ['1', '2', '3', '4']
assert settings.lists_from_env("FOO", '1,2:3,4') == [['1', '2'], ['3', '4']]
| apache-2.0 | -6,749,814,629,902,662,000 | 33.723404 | 80 | 0.664828 | false | 3.168932 | true | false | false |
theakholic/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 | 3,049,585,497,129,932,000 | 27.995177 | 78 | 0.636096 | false | 3.566344 | false | false | false |
ZeitOnline/briefkasten | deployment/jailhost.py | 2 | 3037 | # coding: utf-8
from fabric import api as fab
from fabric.api import env, task
from bsdploy.fabfile_mfsbsd import bootstrap as mfsbootstrap
from ploy.common import yesno
from ploy.config import value_asbool
AV = None
# hide stdout by default
# from fabric.state import output
# output['stdout'] = False
@task
def bootstrap(**kw):
mfsbootstrap(**kw)
def get_vars():
global AV
if AV is None:
hostname = env.host_string.split('@')[-1]
AV = dict(hostname=hostname, **env.instances[hostname].get_ansible_variables())
return AV
@task
def reset_cleansers(confirm=True):
"""destroys all cleanser slaves and their rollback snapshots, as well as the initial master
snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers."""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy any existing and or currently running cleanser jails.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
get_vars()
cleanser_count = AV['ploy_cleanser_count']
# make sure no workers interfere:
fab.run('ezjail-admin stop worker')
# stop and nuke the cleanser slaves
for cleanser_index in range(cleanser_count):
cindex = '{:02d}'.format(cleanser_index + 1)
fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
fab.run('zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'.format(cindex=cindex))
fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format(cindex=cindex))
fab.run('umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
fab.run('rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
# remove master snapshot
fab.run('zfs destroy -R tank/jails/cleanser@clonesource')
# restart worker and cleanser to prepare for subsequent ansible configuration runs
fab.run('ezjail-admin start worker')
fab.run('ezjail-admin stop cleanser')
fab.run('ezjail-admin start cleanser')
@task
def reset_jails(confirm=True, keep_cleanser_master=True):
""" stops, deletes and re-creates all jails.
since the cleanser master is rather large, that one is omitted by default.
"""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy all existing and or currently running jails on the host.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
reset_cleansers(confirm=False)
jails = ['appserver', 'webserver', 'worker']
if not value_asbool(keep_cleanser_master):
jails.append('cleanser')
with fab.warn_only():
for jail in jails:
fab.run('ezjail-admin delete -fw {jail}'.format(jail=jail))
# remove authorized keys for no longer existing key (they are regenerated for each new worker)
fab.run('rm /usr/jails/cleanser/usr/home/cleanser/.ssh/authorized_keys')
| bsd-3-clause | 8,950,279,406,958,018,000 | 35.590361 | 104 | 0.670728 | false | 3.519119 | false | false | false |
JoostvanPinxten/ConstraintPuzzler | gui/puzzlerepresentation/valuehexagon.py | 1 | 4856 | '''
Created on 30 dec. 2012
@author: Juice
'''
from PySide import QtGui, QtCore
from math import *
from structure.cell import Cell
from constraints import Constraint
def centerTextItem(text):
form = QtGui.QTextBlockFormat()
form.setAlignment(QtCore.Qt.AlignCenter)
cursor = text.textCursor()
cursor.select(QtGui.QTextCursor.Document)
cursor.mergeBlockFormat(form)
cursor.clearSelection()
class ValueHexagon(QtGui.QGraphicsPolygonItem):
def __init__(self, cell, cellSize, position, parent=None, edgeColor=QtCore.Qt.black):
# normalized hexagon
polygon = QtGui.QPolygonF(
[QtCore.QPointF(
cos(x*pi/3)+1,
sin(x*pi/3)+sqrt(3)/2
)*cellSize/2
for x in range(0,6)]
)
polygon.translate(position)
super(ValueHexagon, self).__init__(polygon, parent)
self.cell = cell
self.position = position
self.cellSize = cellSize
if isinstance(cell, Cell):
self.values = cell.getGrid().getPuzzle().getValues()
else:
self.values = []
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
self.hintValueItemMap = {}
self.edgeColor = edgeColor
pen = QtGui.QPen()
pen.setColor(edgeColor)
pen.setWidth(2)
self.setPen(pen)
self.hintsEnabled = True
self.instantiateRepresentation()
self.updateRepresentation()
def mousePressEvent(self, event):
return QtGui.QGraphicsRectItem.mousePressEvent(self, event)
def instantiateRepresentation(self):
# for each value, instantiate the hints, hidden by default
for val in self.values:
# this is the static calculation for a block of 3z3
off_x = (((val-1) % 3) + 0.7) * (self.cellSize/4)
off_y = (floor((val-1) / 3) + 0.25) * (self.cellSize/4)
t = QtGui.QGraphicsTextItem(str(val))
t.setParentItem(self)
t.setPos(self.position.x()+ off_x, self.position.y() + off_y)
t.setOpacity(0)
self.hintValueItemMap[val] = t
# add a big text item to show the set value, hidden by default
val = self.cell.getValue() if isinstance(self.cell, Cell) else self.cell.getTotalValue()
self.valueTextItem = QtGui.QGraphicsTextItem(str())
self.valueTextItem.setParentItem(self)
self.valueTextItem.setPos(self.position.x(), self.position.y() + self.cellSize/6)
f = QtGui.QFont("Sans serif", self.cellSize/3 ,200)
if isinstance(self.cell, Cell):
if(self.cell.isInferred()):
f.setWeight(0)
else:
self.valueTextItem.setDefaultTextColor(QtCore.Qt.blue)
self.valueTextItem.setFont(f)
self.valueTextItem.setTextWidth(self.cellSize)
# align to center of cell
centerTextItem(self.valueTextItem)
self.valueTextItem.setOpacity(0)
def updateRepresentation(self):
val = self.cell.getValue() if isinstance(self.cell, Cell) else self.cell.getTotalValue()
if(val <> None):
# first hide all the hints
self.hideHints()
# show value text
self.valueTextItem.setOpacity(1)
self.valueTextItem.setPlainText(str(val))
# re-align to middle of cell
centerTextItem(self.valueTextItem)
f = self.valueTextItem.font()
if(isinstance(self.cell, Constraint) or self.cell.isInferred()):
f.setWeight(0)
self.valueTextItem.setDefaultTextColor(QtCore.Qt.black)
else:
f.setWeight(200)
self.valueTextItem.setDefaultTextColor(QtCore.Qt.blue)
else:
self.valueTextItem.setOpacity(0)
# show all the possible values
vals = self.cell.getPossibleValues()
numValProcessed = 0
for val in self.values:
if(numValProcessed >= 9):
break
numValProcessed += 1
if self.hintsEnabled and val in vals:
self.hintValueItemMap[val].setOpacity(1)
else:
self.hintValueItemMap[val].setOpacity(0)
pass
def setHintsEnabled(self, hintsEnabled):
self.hintsEnabled = hintsEnabled
self.updateRepresentation()
def hideHints(self):
for val in self.values:
self.hintValueItemMap[val].setOpacity(0)
| mit | 6,221,386,427,122,202,000 | 35.787879 | 96 | 0.563633 | false | 4.104818 | false | false | false |
vicnet/weboob | modules/myedenred/browser.py | 1 | 3412 | # -*- coding: utf-8 -*-
# Copyright(C) 2017 Théo Dorée
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.browser import LoginBrowser, URL, need_login
from weboob.exceptions import BrowserIncorrectPassword
from weboob.tools.capabilities.bank.transactions import merge_iterators
from .pages import LoginPage, AccountsPage, AccountDetailsPage, TransactionsPage
class MyedenredBrowser(LoginBrowser):
BASEURL = 'https://www.myedenred.fr'
login = URL(r'/ctr\?Length=7',
r'/ExtendedAccount/Logon', LoginPage)
accounts = URL(r'/$', AccountsPage)
accounts_details = URL(r'/ExtendedHome/ProductLine\?benId=(?P<token>\d+)', AccountDetailsPage)
transactions = URL('/Card/TransactionSet', TransactionsPage)
def __init__(self, *args, **kwargs):
super(MyedenredBrowser, self).__init__(*args, **kwargs)
self.docs = {}
def do_login(self):
self.login.go(data={'Email': self.username, 'Password': self.password, 'RememberMe': 'false',
'X-Requested-With': 'XMLHttpRequest', 'ReturnUrl': '/'})
self.accounts.go()
if self.login.is_here():
raise BrowserIncorrectPassword
@need_login
def iter_accounts(self):
for acc_id in self.accounts.stay_or_go().get_accounts_id():
yield self.accounts_details.go(headers={'X-Requested-With': 'XMLHttpRequest'},
token=acc_id).get_account()
@need_login
def iter_history(self, account):
def iter_transactions_by_type(type):
history = self.transactions.go(data={'command': 'Charger les 10 transactions suivantes',
'ErfBenId': account._product_token,
'ProductCode': account._product_type,
'SortBy': 'DateOperation',
'StartDate': '',
'EndDate': '',
'PageNum': 10,
'OperationType': type,
'failed': 'false',
'X-Requested-With': 'XMLHttpRequest'
})
return history.iter_transactions(subid=account.id)
if account.id not in self.docs:
iterator = merge_iterators(iter_transactions_by_type(type='Debit'), iter_transactions_by_type(type='Credit'))
self.docs[account.id] = list(iterator)
return self.docs[account.id]
| lgpl-3.0 | -382,926,120,379,920,060 | 44.466667 | 121 | 0.582111 | false | 4.440104 | false | false | false |
tommybobbins/7persecond | 7cell_brief5.py | 1 | 3293 | import pygame, sys, re
from time import sleep
from pygame.locals import *
from random import shuffle
clock = pygame.time.Clock()
FPS = 30
playtime = 0.0
counter = 0
pygame.font.init()
font = pygame.font.Font(None, 30)
batch_size = 7 # 7 Squares displayed (e.g 7 of 16*9 = 7/144 )
tiles = {}
sprite_currently_displayed = False
##### Adjust these sleep times to suit###################
sleep_time_for_none_icons = 1 # A sprite is not displayed
sleep_time_for_icons = 1 # A sprite is displayed
##########################################################
pygame.init()
#Framebuffer size: 1776 x 952
sizex=1776
sizey=952
xdivision = 16
ydivision = 9
xshuf = [i for i in range(xdivision*ydivision)]
unitx = sizex/xdivision
unity = sizey/ydivision
import os
ins = open( "sprite_positions.txt", "r" )
for line in ins:
print line
try:
m = re.search('^(\w+)\_(\d+)\_(\d+)\.png: (\d+), (\d+)',line)
except:
print ("Cannot match regexp on %s " % line)
(spritename, spritex, spritey, extentx, extenty) = (m.group(1), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)))
# print ("%s %f %f %i %i" % (spritename, spritex, spritey, extentx, extenty))
spriteboxx = int(spritex%xdivision)
spriteboxy = int(spritey%ydivision)
print ("spriteboxx = %i spriteboxy= %i" % (spriteboxx, spriteboxy))
spriteboxnumber = int((spriteboxy*xdivision)+spriteboxx)
print ("spriteboxnumber = %i " % spriteboxnumber)
tiles[spriteboxnumber] = ( spritename, spritex, spritey, extentx, extenty)
ins.close()
for key in tiles.keys():
( spritename, spritex, spritey, extentx, extenty) = tiles[key]
# print ("%i %s %i %i" % (key, spritename, spritex, spritey))
screen = pygame.display.set_mode((sizex, sizey))
background = pygame.image.load('data/plastic_reality_bg.png').convert()
im2= pygame.Surface(screen.get_size())
#im2.fill((0, 0, 0))
im2 = pygame.image.load('data/all_on_one_no_bg.png').convert_alpha()
screen.blit(background,(0,0))
pygame.display.flip()
while True:
milliseconds = clock.tick(FPS)
playtime += milliseconds / 1000.0
shuffle(xshuf)
for i in range(0,7):
random_value = xshuf[i]
print ("Random value %i " % random_value)
try:
( spritename, spritex, spritey, extentx, extenty) = tiles[random_value]
except:
spritename = False
if (spritename):
randomx = spritex
randomy = spritey
print ("%s %f,%f, %f, %f" % (spritename, randomx,randomy, extentx, extenty))
# screen.blit(background, (0, 0))
screen.blit(im2, (randomx, randomy), pygame.Rect(randomx, randomy, extentx, extenty))
#text_surface = font.render("FPS: %f Playtime: %f " % (clock.get_fps(),playtime), True, (255,255,255))
#screen.blit(text_surface, (10, 10))
pygame.display.flip()
# sleep(1)
#sleep(sleep_time_for_icons)
sprite_currently_displayed = True
else:
# print ('.')
# sleep(1)
sleep(sleep_time_for_none_icons)
if (sprite_currently_displayed == True):
screen.blit(background, (0, 0))
pygame.display.flip()
sprite_currently_displayed = False
| gpl-2.0 | 1,781,229,823,855,516,000 | 33.302083 | 143 | 0.605831 | false | 3.163305 | false | false | false |
sheepslinky/franklin | server/driver.py | 1 | 134244 | #!/usr/bin/python3
# vim: set foldmethod=marker fileencoding=utf8 :
# Python parts of the host side driver for Franklin. {{{
# Copyright 2014-2016 Michigan Technological University
# Copyright 2016 Bas Wijnen <wijnen@debian.org>
# Author: Bas Wijnen <wijnen@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# }}}
show_own_debug = False
#show_own_debug = True
# Constants {{{
C0 = 273.15 # Conversion between K and °C
WAIT = object() # Sentinel for blocking functions.
NUM_SPACES = 3
# Space types
TYPE_CARTESIAN = 0
TYPE_DELTA = 1
TYPE_POLAR = 2
TYPE_EXTRUDER = 3
TYPE_FOLLOWER = 4
record_format = '=Bidddddddd' # type, tool, X, Y, Z, E, f, F, time, dist
# }}}
# Imports. {{{
import fhs
import websocketd
from websocketd import log
import serial
import time
import math
import struct
import os
import re
import sys
import wave
import sys
import io
import base64
import json
import fcntl
import select
import subprocess
import traceback
import protocol
import mmap
import random
import errno
import shutil
# }}}
config = fhs.init(packagename = 'franklin', config = { # {{{
'cdriver': None,
'allow-system': None,
'uuid': None,
'local': False,
'arc': True
})
# }}}
# Enable code trace. {{{
if False:
def trace(frame, why, arg):
if why == 'call':
code = frame.f_code
log('call: %d %s' % (code.co_firstlineno, code.co_name))
sys.settrace(trace)
# }}}
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
def dprint(x, data): # {{{
if show_own_debug:
log('%s: %s' % (x, ' '.join(['%02x' % c for c in data])))
# }}}
# Decorator for functions which block.
def delayed(f): # {{{
def ret(self, *a, **ka):
#log('delayed called with args %s,%s' % (repr(a), repr(ka)))
def wrap(id):
#log('wrap called with id %s' % (repr(id)))
return f(self, id, *a, **ka)
return (WAIT, wrap)
return ret
# }}}
# Call cdriver running on same machine.
class Driver: # {{{
def __init__(self):
#log(repr(config))
self.driver = subprocess.Popen((config['cdriver'],), stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
fcntl.fcntl(self.driver.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self.buffer = b''
def available(self):
return len(self.buffer) > 0
def write(self, data):
self.driver.stdin.write(data)
self.driver.stdin.flush()
def read(self, length):
while True:
if len(self.buffer) >= length:
ret = self.buffer[:length]
self.buffer = self.buffer[length:]
return ret
try:
r = os.read(self.driver.stdout.fileno(), 4096)
except IOError:
r = self.buffer[:length]
self.buffer = self.buffer[length:]
return r
except OSError as exc:
if exc.errno == errno.EAGAIN:
r = self.buffer[:length]
self.buffer = self.buffer[length:]
return r
raise
if r == b'':
log('EOF!')
self.close()
self.buffer += r
def close(self):
log('Closing machine driver; exiting.')
sys.exit(0)
def fileno(self):
return self.driver.stdout.fileno()
# }}}
# Reading and writing pins to and from ini files. {{{
def read_pin(machine, pin):
extra = 0
if pin.startswith('X'):
pin = pin[1:]
if pin == '':
return 0
else:
extra += 256
if pin.startswith('-'):
extra += 512
pin = pin[1:]
try:
pin = int(pin)
except:
log('incorrect pin %s' % pin)
return 0
if pin >= len(machine.pin_names):
machine.pin_names.extend([[0xf, '(Pin %d)' % i] for i in range(len(machine.pin_names), pin + 1)])
return pin + extra
def write_pin(pin):
if pin == 0:
return 'X'
ret = ''
if pin >= 512:
ret += '-'
pin -= 512
if pin >= 256:
pin -= 256
else:
ret = 'X' + ret
return ret + '%d' % pin
# }}}
class Machine: # {{{
# Internal stuff. {{{
def _read_data(self, data): # {{{
cmd, s, m, e, f = struct.unpack('=BLLLd', data[:21])
return cmd, s, m, f, e, data[21:]
# }}}
def _send(self, *data): # {{{
#log('writing to server: %s' % repr(data))
sys.stdout.write(json.dumps(data) + '\n')
sys.stdout.flush()
# }}}
def _refresh_queue(self):
if self.uuid is None:
return
spool = fhs.read_spool(self.uuid, dir = True, opened = False)
if spool is None:
return
gcode = os.path.join(spool, 'gcode')
audio = os.path.join(spool, 'audio')
probe = fhs.read_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False)
if probe is not None:
try:
# Map = [[targetx, targety, x0, y0, w, h], [nx, ny], [[...], [...], ...]]
size = struct.calcsize('@ddddddddLLd')
targetx, targety, x0, y0, w, h, sina, cosa, nx, ny, self.targetangle = struct.unpack('@ddddddddLLd', probe.read(size))
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
sina, cosa = self.gcode_angle
limits = [targetx, targety, x0, y0, w, h]
nums = [nx, ny, self.targetangle]
if not (0 < nx < 1000 and 0 < ny < 1000):
raise ValueError('probe map too large; probably invalid')
probes = [[None for x in range(nx + 1)] for y in range(ny + 1)]
for y in range(ny + 1):
for x in range(nx + 1):
probes[y][x] = struct.unpack('@d', probe.read(struct.calcsize('@d')))[0]
self.probemap = [limits, nums, probes]
except:
log('Failed to load probe map')
self._globals_update()
if os.path.isdir(gcode):
self.jobqueue = {}
for filename in os.listdir(gcode):
name, ext = os.path.splitext(filename)
if ext != os.extsep + 'bin':
log('skipping %s' % filename)
continue
try:
#log('opening %s' % filename)
with open(os.path.join(gcode, filename), 'rb') as f:
f.seek(-8 * 8, os.SEEK_END)
self.jobqueue[name] = struct.unpack('=' + 'd' * 8, f.read())
except:
traceback.print_exc()
log('failed to open gcode file %s' % os.path.join(gcode, filename))
sortable_queue = [(q, self.jobqueue[q]) for q in self.jobqueue]
sortable_queue.sort()
self._broadcast(None, 'queue', sortable_queue)
if os.path.isdir(audio):
self.audioqueue = {}
for filename in os.listdir(audio):
name, ext = os.path.splitext(filename)
if ext != os.extsep + 'bin':
log('skipping %s' % filename)
continue
try:
#log('opening audio %s' % filename)
self.audioqueue[name] = os.stat(os.path.join(audio, filename)).st_size
except:
traceback.print_exc()
log('failed to stat audio file %s' % os.path.join(audio, filename))
sortable_queue = list(self.audioqueue.keys())
sortable_queue.sort()
self._broadcast(None, 'audioqueue', sortable_queue)
def __init__(self, allow_system): # {{{
self.initialized = False
self.connected = False
self.uuid = config['uuid']
# Start a block because the next line has an accidental end marker. {{{
self.user_interface = '{Dv2m(Blocker:){Dv2m(No Connection:){dv3m{dv3m{dv3m[0:*Controls:{Dh60%{Dv12m{Dv5m{dh11m(Job Control:)(Buttons:)}(Position:)}{Dh85%(XY Map:)(Z Map:)}}{Dv4m(Abort:){Dv6m(Multipliers:){Dv2m(Gpios:){Dv9m(Temps:)(Temp Graph:)}}}}}Setup:{Dv2m(Save Profile:)[0:*Profile:(Profile Setup:)Probe:(Probe Setup:)Globals:(Globals Setup:)Axes:(Axis Setup:)Motors:(Motor Setup:)Type:{Dv3m(Type Setup:){Dh50%(Cartesian Setup:){Dh50%(Delta Setup:)(Polar Setup:)}}}Extruder:(Extruder Setup:)Follower:(Follower Setup:)GPIO:(Gpio Setup:)Temps:(Temp Setup:)]}](Confirmation:)}(Message:)}(State:)}}}'
self.pin_names = []
self.machine = Driver()
self.allow_system = allow_system
self.probemap = None
self.job_current = None
self.job_id = None
self.confirm_id = 0
self.confirm_message = None
self.confirm_axes = None
self.confirmer = None
self.position_valid = False
self.probing = False
self.probe_pending = False
self.parking = False
self.home_phase = None
self.home_target = None
self.home_cb = [False, self._do_home]
self.probe_cb = [False, None]
self.probe_speed = 3.
self.gcode_file = False
self.gcode_map = None
self.gcode_id = None
self.gcode_waiting = 0
self.audio_id = None
self.queue = []
self.queue_pos = 0
self.queue_info = None
self.confirm_waits = set()
self.gpio_waits = {}
self.total_time = [float('nan'), float('nan')]
self.resuming = False
self.flushing = False
self.debug_buffer = None
self.machine_buffer = ''
self.command_buffer = ''
self.bed_id = -1
self.fan_id = -1
self.spindle_id = -1
self.probe_dist = 1000
self.probe_offset = 0
self.probe_safe_dist = 10
self.num_probes = 1
self.unit_name = 'mm'
self.park_after_job = True
self.sleep_after_job = True
self.cool_after_job = True
self.spi_setup = []
# Set up state.
self.spaces = [self.Space(self, i) for i in range(NUM_SPACES)]
self.temps = []
self.gpios = []
self.probe_time_dist = [float('nan'), float('nan')]
self.sending = False
self.paused = False
self.limits = [{} for s in self.spaces]
self.wait = False
self.movewait = 0
self.movecb = []
self.tempcb = []
self.alarms = set()
self.targetx = 0.
self.targety = 0.
self.targetangle = 0.
self.zoffset = 0.
self.store_adc = False
self.temp_scale_min = 0
self.temp_scale_max = 250
self.multipliers = []
self.current_extruder = 0
try:
assert self.uuid is not None # Don't try reading if there is no uuid given.
with fhs.read_data(os.path.join(self.uuid, 'info' + os.extsep + 'txt')) as pfile:
self.name = pfile.readline().rstrip('\n')
self.profile = pfile.readline().rstrip('\n')
#log('profile is %s' % self.profile)
except:
#log("No default profile; using 'default'.")
self.name = self.uuid
self.profile = 'default'
profiles = self.list_profiles()
if self.profile not in profiles and len(profiles) > 0:
self.profile = profiles[0]
#log('Profile does not exist; using %s instead' % self.profile)
self.default_profile = self.profile
# Globals.
self.queue_length = 0
self.num_pins = 0
self.led_pin = 0
self.stop_pin = 0
self.probe_pin = 0
self.spiss_pin = 0
self.timeout = 0
self.bed_id = -1
self.fan_id = -1
self.spindle_id = -1
self.feedrate = 1
self.max_deviation = 0
self.max_v = float('inf')
self.current_extruder = 0
self.targetx = 0.
self.targety = 0.
self.targetangle = 0.
self.zoffset = 0.
# Other things don't need to be initialized, because num_* == 0.
# Fill job queue.
self.jobqueue = {}
self.audioqueue = {}
self._refresh_queue()
try:
self.load(update = False)
except:
log('Failed to import initial settings')
traceback.print_exc()
global show_own_debug
if show_own_debug is None:
show_own_debug = True
# }}}
# Constants. {{{
# Single-byte commands.
single = {'OK': b'\xb3', 'WAIT': b'\xad' }
# }}}
def _broadcast(self, *a): # {{{
self._send(None, 'broadcast', *a)
# }}}
def _close(self, notify = True): # {{{
log('disconnecting')
self.connected = False
if notify:
self._send(None, 'disconnect')
self._globals_update()
# }}}
def _machine_read(self, *a, **ka): # {{{
while True:
try:
return self.machine.read(*a, **ka)
except:
log('error reading')
traceback.print_exc()
sys.exit(0)
# }}}
def _machine_write(self, data): # {{{
#log('writing %s' % ' '.join(['%02x' % x for x in data]))
while True:
try:
self.machine.write(data)
return
except:
log('error writing')
traceback.print_exc()
sys.exit(0)
# }}}
def _command_input(self): # {{{
data = sys.stdin.read()
if data == '':
log('End of file detected on command input; exiting.')
sys.exit(0)
self.command_buffer += data
die = None
#log('cmd buf %s' % repr(self.command_buffer))
while '\n' in self.command_buffer:
pos = self.command_buffer.index('\n')
id, func, a, ka = json.loads(self.command_buffer[:pos])
self.command_buffer = self.command_buffer[pos + 1:]
try:
#log('command: %s(%s %s)' % (func, a, ka))
assert not any(func.startswith(x + '_') for x in ('benjamin', 'admin', 'expert', 'user'))
role = a.pop(0) + '_'
if hasattr(self, role + func):
func = role + func
elif role == 'benjamin_' and hasattr(self, 'admin_' + func):
func = 'admin_' + func
elif role in ('benjamin_', 'admin_') and hasattr(self, 'expert_' + func):
func = 'expert_' + func
ret = getattr(self, func)(*a, **ka)
if isinstance(ret, tuple) and len(ret) == 2 and ret[0] is WAIT:
# The function blocks; it will send its own reply later.
if ret[1] is WAIT:
# Special case: request to die.
die = id
else:
ret[1](id)
continue
except SystemExit:
sys.exit(0)
except:
log('error handling command input')
traceback.print_exc()
self._send(id, 'error', repr(sys.exc_info()))
continue
if ret != (WAIT, WAIT):
#log('returning %s' % repr(ret))
self._send(id, 'return', ret)
if die is not None:
self._send(die, 'return', None)
sys.exit(0)
# }}}
def _trigger_movewaits(self, num, done = True): # {{{
#traceback.print_stack()
#log('trigger %s' % repr(self.movecb))
#log('movecbs: %d/%d' % (num, self.movewait))
if self.movewait < num:
log('More cbs received than requested!')
self.movewait = 0
else:
#log('movewait %d/%d' % (num, self.movewait))
self.movewait -= num
if self.movewait == 0:
#log('running cbs: %s' % repr(self.movecb))
call_queue.extend([(x[1], [done]) for x in self.movecb])
self.movecb = []
if self.flushing and self.queue_pos >= len(self.queue):
#log('done flushing')
self.flushing = 'done'
#else:
# log('cb seen, but waiting for more')
# }}}
def _machine_input(self, reply = False): # {{{
while True:
if len(self.machine_buffer) == 0:
r = self._machine_read(1)
dprint('(1) read', r)
if r == b'':
return ('no data', None)
if r == self.single['WAIT']:
return ('wait', None)
if r == self.single['OK']:
return ('ok', None)
# Regular packet.
self.machine_buffer = r
packet_len = self.machine_buffer[0]
while True:
r = self._machine_read(packet_len - len(self.machine_buffer))
dprint('rest of packet read', r)
if r == '':
return (None, None)
self.machine_buffer += r
if len(self.machine_buffer) >= packet_len:
break
if not self.machine.available():
#log('waiting for more data (%d/%d)' % (len(self.machine_buffer), packet_len))
ret = select.select([self.machine], [], [self.machine], 1)
if self.machine not in ret[0]:
log('broken packet?')
return (None, None)
#log('writing ok')
self.machine.write(self.single['OK'])
cmd, s, m, f, e, data = self._read_data(self.machine_buffer[1:])
#log('received command: %s' % repr((cmd, s, m, f, e, data)))
self.machine_buffer = ''
# Handle the asynchronous events.
if cmd == protocol.rcommand['MOVECB']:
#log('movecb %d/%d (%d in queue)' % (s, self.movewait, len(self.movecb)))
self._trigger_movewaits(s)
continue
if cmd == protocol.rcommand['TEMPCB']:
self.alarms.add(s)
t = 0
while t < len(self.tempcb):
if self.tempcb[t][0] is None or self.tempcb[t][0] in self.alarms:
call_queue.append((self.tempcb.pop(t)[1], []))
else:
t += 1
continue
elif cmd == protocol.rcommand['CONTINUE']:
# Move continue.
self.wait = False
#log('resuming queue %d' % len(self.queue))
call_queue.append((self._do_queue, []))
if self.flushing is None:
self.flushing = False
continue
elif cmd == protocol.rcommand['LIMIT']:
if s < len(self.spaces) and m < len(self.spaces[s].motor):
self.limits[s][m] = f
#log('limit; %d waits' % e)
self._trigger_movewaits(self.movewait, False)
continue
elif cmd == protocol.rcommand['TIMEOUT']:
self.position_valid = False
call_queue.append((self._globals_update, ()))
for i, t in enumerate(self.temps):
if not math.isnan(t.value):
t.value = float('nan')
call_queue.append((self._temp_update, (i,)))
for i, g in enumerate(self.gpios):
if g.state != g.reset:
g.state = g.reset
call_queue.append((self._gpio_update, (i,)))
continue
elif cmd == protocol.rcommand['PINCHANGE']:
self.gpios[s].value = m
call_queue.append((self._gpio_update, (s,)))
if s in self.gpio_waits:
for id in self.gpio_waits[s]:
self._send(id, 'return', None)
del self.gpio_waits[s]
continue
elif cmd == protocol.rcommand['HOMED']:
call_queue.append((self._do_home, [True]))
continue
elif cmd == protocol.rcommand['DISCONNECT']:
self._close()
# _close returns after reconnect.
continue
elif cmd == protocol.rcommand['UPDATE_TEMP']:
if s < len(self.temps):
self.temps[s].value = f - C0
self._temp_update(s)
else:
log('Ignoring updated invalid temp %d' % s)
continue
elif cmd == protocol.rcommand['UPDATE_PIN']:
self.gpios[s].state = m
call_queue.append((self._gpio_update, (s,)))
continue
elif cmd == protocol.rcommand['CONFIRM']:
if s and self.probemap is not None:
self.probe_pending = True
call_queue.append((self.request_confirmation(data.decode('utf-8', 'replace') or 'Continue?')[1], (False,)))
continue
elif cmd == protocol.rcommand['PARKWAIT']:
def cb():
self._send_packet(bytes((protocol.command['RESUME'],)))
call_queue.append((self.park(cb = cb, abort = False)[1], (None,)))
continue
elif cmd == protocol.rcommand['FILE_DONE']:
call_queue.append((self._job_done, (True, 'completed')))
continue
elif cmd == protocol.rcommand['PINNAME']:
if s >= len(self.pin_names):
self.pin_names.extend([[0xf, '(Pin %d)' % i] for i in range(len(self.pin_names), s + 1)])
self.pin_names[s] = [data[0], data[1:].decode('utf-8', 'replace')] if len(data) >= 1 else [0, '']
#log('pin name {} = {}'.format(s, self.pin_names[s]))
continue
elif cmd == protocol.rcommand['CONNECTED']:
def sync():
# Get the machine state.
self._write_globals(update = False)
for i, s in enumerate(self.spaces):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_SPACE_INFO'], i) + s.write_info())
for a in range(len(s.axis)):
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_AXIS'], i, a) + s.write_axis(a))
for m in range(len(s.motor)):
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], i, m) + s.write_motor(m))
for i, t in enumerate(self.temps):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_TEMP'], i) + t.write())
# Disable heater.
self.settemp(i, float('nan'), update = False)
# Disable heater alarm.
self.waittemp(i, None, None)
for i, g in enumerate(self.gpios):
self._send_packet(struct.pack('=BB', protocol.command['WRITE_GPIO'], i) + g.write())
# The machine may still be doing things. Pause it and send a move; this will discard the queue.
self.pause(True, False, update = False)
if self.spi_setup:
self._spi_send(self.spi_setup)
self.connected = True
self._globals_update()
call_queue.append((sync, ()))
continue
if reply:
return ('packet', (cmd, s, m, f, e, data))
log('unexpected packet %02x' % cmd)
raise AssertionError('Received unexpected reply packet')
# }}}
def _send_packet(self, data, move = False): # {{{
if len(data) + 2 >= 0x8000:
log('Message too long (%d >= %d)' % (len(data) + 2, 0x8000))
return
# Pack length as big endian, so first byte never has bit 7 set.
data = struct.pack('>H', len(data) + 2) + data
dprint('(1) writing', data);
self._machine_write(data)
if not move:
return
start_time = time.time()
while True:
if not self.machine.available():
ret = select.select([self.machine], [], [self.machine], 1)
if self.machine not in ret[0] and self.machine not in ret[2]:
# No response; keep waiting.
log('no response yet: %s' % repr(ret))
assert time.time() - start_time < 10
continue
ret = self._machine_input()
if ret[0] == 'wait':
#log('wait')
self.wait = True
return
elif ret[0] == 'ok':
return
#log('no response yet')
# }}}
def _get_reply(self, cb = False): # {{{
#traceback.print_stack()
while True:
if not self.machine.available():
ret = select.select([self.machine], [], [self.machine], 3)
if len(ret[0]) == 0 and len(ret[2]) == 0:
log('no reply received')
#traceback.print_stack()
continue
ret = self._machine_input(reply = True)
#log('reply input is %s' % repr(ret))
if ret[0] == 'packet' or (cb and ret[0] == 'no data'):
return ret[1]
#log('no response yet waiting for reply')
# }}}
def _read(self, cmd, channel, sub = None): # {{{
if cmd == 'SPACE':
info = self._read('SPACE_INFO', channel)
self.spaces[channel].type = struct.unpack('=B', info[:1])[0]
info = info[1:]
if self.spaces[channel].type == TYPE_CARTESIAN:
num_axes = struct.unpack('=B', info)[0]
num_motors = num_axes
elif self.spaces[channel].type == TYPE_DELTA:
self.spaces[channel].delta = [{}, {}, {}]
for a in range(3):
self.spaces[channel].delta[a]['axis_min'], self.spaces[channel].delta[a]['axis_max'], self.spaces[channel].delta[a]['rodlength'], self.spaces[channel].delta[a]['radius'] = struct.unpack('=dddd', info[32 * a:32 * (a + 1)])
self.spaces[channel].delta_angle = struct.unpack('=d', info[32 * 3:])[0]
num_axes = 3
num_motors = 3
elif self.spaces[channel].type == TYPE_POLAR:
self.spaces[channel].polar_max_r = struct.unpack('=d', info)[0]
num_axes = 3
num_motors = 3
elif self.spaces[channel].type == TYPE_EXTRUDER:
num_axes = struct.unpack('=B', info[:1])[0]
num_motors = num_axes
self.spaces[channel].extruder = []
for a in range(num_axes):
dx, dy, dz = struct.unpack('=ddd', info[1 + 24 * a:1 + 24 * (a + 1)])
self.spaces[channel].extruder.append({'dx': dx, 'dy': dy, 'dz': dz})
elif self.spaces[channel].type == TYPE_FOLLOWER:
num_axes = struct.unpack('=B', info[:1])[0]
num_motors = num_axes
self.spaces[channel].follower = []
for a in range(num_axes):
space, motor = struct.unpack('=BB', info[1 + 2 * a:1 + 2 * (a + 1)])
self.spaces[channel].follower.append({'space': space, 'motor': motor})
else:
log('invalid type %s' % repr(self.spaces[channel].type))
raise AssertionError('invalid space type')
return ([self._read('SPACE_AXIS', channel, axis) for axis in range(num_axes)], [self._read('SPACE_MOTOR', channel, motor) for motor in range(num_motors)])
if cmd == 'GLOBALS':
packet = struct.pack('=B', protocol.command['READ_' + cmd])
elif sub is not None and cmd.startswith('SPACE'):
packet = struct.pack('=BBB', protocol.command['READ_' + cmd], channel, sub)
else:
packet = struct.pack('=BB', protocol.command['READ_' + cmd], channel)
self._send_packet(packet)
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['DATA']
return data
# }}}
def _read_globals(self, update = True): # {{{
data = self._read('GLOBALS', None)
if data is None:
return False
self.queue_length, self.num_pins, num_temps, num_gpios = struct.unpack('=BBBB', data[:4])
self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, self.timeout, self.bed_id, self.fan_id, self.spindle_id, self.feedrate, self.max_deviation, self.max_v, self.current_extruder, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc = struct.unpack('=HHHHHhhhdddBdddd?', data[4:])
while len(self.temps) < num_temps:
self.temps.append(self.Temp(len(self.temps)))
if update:
data = self._read('TEMP', len(self.temps) - 1)
self.temps[-1].read(data)
self.temps = self.temps[:num_temps]
while len(self.gpios) < num_gpios:
self.gpios.append(self.Gpio(len(self.gpios)))
if update:
data = self._read('GPIO', len(self.gpios) - 1)
self.gpios[-1].read(data)
self.gpios = self.gpios[:num_gpios]
return True
# }}}
def _write_globals(self, nt = None, ng = None, update = True): # {{{
if nt is None:
nt = len(self.temps)
if ng is None:
ng = len(self.gpios)
dt = nt - len(self.temps)
dg = ng - len(self.gpios)
data = struct.pack('=BBHHHHHhhhdddBdddd?', nt, ng, self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, int(self.timeout), self.bed_id, self.fan_id, self.spindle_id, self.feedrate, self.max_deviation, self.max_v, self.current_extruder, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc)
self._send_packet(struct.pack('=B', protocol.command['WRITE_GLOBALS']) + data)
self._read_globals(update = True)
if update:
self._globals_update()
for t in range(dt):
self._temp_update(nt - dt + t)
for g in range(dg):
self._gpio_update(ng - dg + g)
return True
# }}}
def _mangle_spi(self): # {{{
ret = []
for bits, data in self.spi_setup:
ret.append('%d:%s' % (bits, ','.join('%02x' % x for x in data)))
return ';'.join(ret)
# }}}
def _unmangle_spi(self, data): # {{{
ret = []
if len(data) > 0:
for p in data.split(';'):
bits, data = p.split(':')
bits = int(bits)
data = [int(x, 16) for x in data.split(',')]
ret.append([bits, data])
return ret
# }}}
def _globals_update(self, target = None): # {{{
if not self.initialized:
return
self._broadcast(target, 'globals_update', [self.name, self.profile, len(self.temps), len(self.gpios), self.user_interface, self.pin_names, self.led_pin, self.stop_pin, self.probe_pin, self.spiss_pin, self.probe_dist, self.probe_offset, self.probe_safe_dist, self.bed_id, self.fan_id, self.spindle_id, self.unit_name, self.timeout, self.feedrate, self.max_deviation, self.max_v, self.targetx, self.targety, self.targetangle, self.zoffset, self.store_adc, self.park_after_job, self.sleep_after_job, self.cool_after_job, self._mangle_spi(), self.temp_scale_min, self.temp_scale_max, self.probemap, self.connected, not self.paused and (None if self.gcode_map is None and not self.gcode_file else True)])
# }}}
def _space_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.spaces):
# This can happen if this function is scheduled before changing the number of spaces.
return
self._broadcast(target, 'space_update', which, self.spaces[which].export())
# }}}
def _temp_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.temps):
# This can happen if this function is scheduled before changing the number of temps.
return
self._broadcast(target, 'temp_update', which, self.temps[which].export())
# }}}
def _gpio_update(self, which, target = None): # {{{
if not self.initialized:
return
if which >= len(self.gpios):
# This can happen if this function is scheduled before changing the number of gpios.
return
self._broadcast(target, 'gpio_update', which, self.gpios[which].export())
# }}}
def _gcode_close(self): # {{{
self.gcode_strings = []
self.gcode_map.close()
os.close(self.gcode_fd)
self.gcode_map = None
self.gcode_fd = -1
# }}}
def _job_done(self, complete, reason): # {{{
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 0, 0, 0, 0xff, 0))
if self.gcode_map is not None:
log(reason)
self._gcode_close()
self.gcode_file = False
#traceback.print_stack()
if self.queue_info is None and self.gcode_id is not None:
log('Job done (%d): %s' % (complete, reason))
self._send(self.gcode_id, 'return', (complete, reason))
self.gcode_id = None
if self.audio_id is not None:
log('Audio done (%d): %s' % (complete, reason))
self._send(self.audio_id, 'return', (complete, reason))
self.audio_id = None
if self.queue_info is None and self.job_current is not None:
if self.job_id is not None:
self._send(self.job_id, 'return', (complete, reason))
self.job_id = None
self.job_current = None
if complete:
self._finish_done()
while self.queue_pos < len(self.queue):
axes, e, f0, f1, v0, v1, which, single, rel = self.queue[self.queue_pos]
self.queue_pos += 1
if id is not None:
self._send(id, 'error', 'aborted')
self.queue = []
self.queue_pos = 0
if self.home_phase is not None:
#log('killing homer')
self.home_phase = None
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
if self.home_cb in self.movecb:
self.movecb.remove(self.home_cb)
if self.home_id is not None:
self._send(self.home_id, 'return', None)
if self.probe_cb in self.movecb:
#log('killing prober')
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
self._globals_update()
# }}}
def _finish_done(self): # {{{
if self.cool_after_job:
for t in range(len(self.temps)):
self.settemp(t, float('nan'))
def maybe_sleep():
if self.sleep_after_job:
self.sleep()
if self.park_after_job:
self.park(cb = maybe_sleep)[1](None)
else:
maybe_sleep()
# }}}
def _unpause(self): # {{{
if self.gcode_file:
self._send_packet(bytes((protocol.command['RESUME'],))) # Just in case.
if self.queue_info is None:
return
#log('doing resume to %d/%d' % (self.queue_info[0], len(self.queue_info[2])))
self.queue = self.queue_info[2]
self.queue_pos = self.queue_info[0]
self.movecb = self.queue_info[3]
self.flushing = self.queue_info[4]
self.resuming = False
self.queue_info = None
self.paused = False
self._globals_update()
# }}}
def _queue_add(self, filename, name): # {{{
name = os.path.splitext(os.path.split(name)[1])[0]
origname = name
i = 0
while name == '' or name in self.jobqueue:
name = '%s-%d' % (origname, i)
i += 1
infilename = filename.encode('utf-8', 'replace')
outfiledir = fhs.write_spool(os.path.join(self.uuid, 'gcode'), dir = True)
if not os.path.isdir(outfiledir):
os.makedirs(outfiledir)
outfilename = os.path.join(outfiledir, name + os.path.extsep + 'bin').encode('utf-8', 'replace')
self._broadcast(None, 'blocked', 'Parsing g-code')
self._send_packet(struct.pack('=BH', protocol.command['PARSE_GCODE'], len(infilename)) + infilename + outfilename)
self._get_reply()
self._refresh_queue()
self._broadcast(None, 'blocked', None)
# }}}
def _audio_add(self, f, name): # {{{
name = os.path.splitext(os.path.split(name)[1])[0]
origname = name
i = 0
while name == '' or name in self.audioqueue:
name = '%s-%d' % (origname, i)
i += 1
try:
wav = wave.open(f)
except:
return 'Unable to open audio file'
rate = wav.getframerate()
channels = wav.getnchannels()
self._broadcast(None, 'blocked', 'Parsing audio')
data = wav.readframes(wav.getnframes())
# Data is 16 bit signed ints per channel, but it is read as bytes. First convert it to 16 bit numbers.
data = [(h << 8) + l if h < 128 else(h << 8) + l -(1 << 16) for l, h in zip(data[::2 * channels], data[1::2 * channels])]
bit = 0
byte = 0
with fhs.write_spool(os.path.join(self.uuid, 'audio', name + os.path.extsep + 'bin'), text = False) as dst:
dst.write(struct.pack('@d', rate))
for t, sample in enumerate(data):
if sample > 0:
byte |= 1 << bit
bit += 1
if bit >= 8:
dst.write(bytes((byte,)))
byte = 0
bit = 0
self.audioqueue[os.path.splitext(name)[0]] = wav.getnframes()
self._broadcast(None, 'blocked', '')
self._broadcast(None, 'audioqueue', list(self.audioqueue.keys()))
return ''
# }}}
def _do_queue(self): # {{{
#log('queue %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
if self.paused and not self.resuming and len(self.queue) == 0:
#log('queue is empty')
return
while not self.wait and (self.queue_pos < len(self.queue) or self.resuming):
#log('queue not empty %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
if self.queue_pos >= len(self.queue):
self._unpause()
#log('unpaused, %d %d' % (self.queue_pos, len(self.queue)))
if self.queue_pos >= len(self.queue):
break
axes, f0, f1, v0, v1, probe, single, rel = self.queue[self.queue_pos]
#log('queueing %s' % repr((axes, f0, f1, probe)))
self.queue_pos += 1
# Turn sequences into a dict.
if isinstance(axes, (list, tuple)):
adict = {}
for s, data in enumerate(axes):
adict[s] = data
axes = adict
# Make sure the keys are ints.
adict = {}
#log('axes: %s' % repr(axes))
for k in axes:
adict[int(k)] = axes[k]
axes = adict
a = {}
a0 = 0
for i, sp in enumerate(self.spaces):
# Only handle spaces that are specified.
if i not in axes or axes[i] is None:
a0 += len(sp.axis)
continue
# Handle sequences.
if isinstance(axes[i], (list, tuple)):
for ij, axis in enumerate(axes[i]):
if ij >= len(sp.axis):
log('ignoring nonexistent axis %d %d' % (i, ij))
continue
if axis is not None and not math.isnan(axis):
if i == 1 and ij != self.current_extruder:
#log('setting current extruder to %d' % ij)
self.current_extruder = ij
self._write_globals()
if rel:
axis += sp.get_current_pos(ij)
# Limit values for axis.
if axis > sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds max' % (i, ij, axis))
axis = sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset)
if axis < sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds min' % (i, ij, axis))
axis = sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset)
a[a0 + ij] = axis
else:
for j, axis in tuple(axes[i].items()):
ij = int(j)
if ij >= len(sp.axis):
log('ignoring nonexistent axis %d %d' % (i, ij))
continue
if axis is not None and not math.isnan(axis):
if i == 1 and ij != self.current_extruder:
log('Setting current extruder to %d' % ij)
self.current_extruder = ij
self._write_globals(len(self.temps), len(self.gpios))
if rel:
axis += sp.get_current_pos(ij)
# Limit values for axis.
if axis > sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds max' % (i, ij, axis))
axis = sp.axis[ij]['max'] - (0 if i != 0 or ij != 2 else self.zoffset)
if axis < sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset):
log('limiting %d %d to %f because it exceeds min' % (i, ij, axis))
axis = sp.axis[ij]['min'] - (0 if i != 0 or ij != 2 else self.zoffset)
log('new value: %f' % axis)
a[a0 + ij] = axis
a0 += len(sp.axis)
targets = [0] * (((2 + a0 - 1) >> 3) + 1)
axes = a
args = b''
# Set defaults for feedrates.
if v0 is not None:
assert f0 is None
f0 = -v0
elif f0 is None:
f0 = float('inf')
if v1 is not None:
assert f1 is None
f1 = -v1
elif f1 is None:
f1 = f0
assert f0 != 0 or f1 != 0
# If feedrates are equal to firmware defaults, don't send them.
if f0 != float('inf'):
targets[0] |= 1 << 0
args += struct.pack('=d', f0)
if f1 != f0:
targets[0] |= 1 << 1
args += struct.pack('=d', f1)
a = list(axes.keys())
a.sort()
#log('f0: %f f1: %f' %(f0, f1))
for axis in a:
if math.isnan(axes[axis]):
continue
targets[(axis + 2) >> 3] |= 1 << ((axis + 2) & 0x7)
args += struct.pack('=d', axes[axis])
#log('axis %d: %f' %(axis, axes[axis]))
if probe:
p = bytes((protocol.command['PROBE'],))
elif single:
p = bytes((protocol.command['SINGLE'],))
else:
p = bytes((protocol.command['LINE'],))
self.movewait += 1
#log('movewait +1 -> %d' % self.movewait)
#log('queueing %s' % repr((axes, f0, f1, self.flushing)))
self._send_packet(p + bytes(targets) + args, move = True)
if self.flushing is None:
self.flushing = False
#log('queue done %s' % repr((self.queue_pos, len(self.queue), self.resuming, self.wait)))
# }}}
def _do_home(self, done = None): # {{{
#log('do_home: %s %s' % (self.home_phase, done))
# 0: Prepare for next order.
# 1: Move to limits. (enter from loop after 2).
# 2: Finish moving to limits; loop home_order; move slowly away from switch.
# 3: Set current position; move delta and followers.
# 4: Move within limits.
# 5: Return.
#log('home %s %s' % (self.home_phase, repr(self.home_target)))
#traceback.print_stack()
home_v = 50 / self.feedrate
def mktarget():
ret = {}
for s, m in self.home_target:
if s not in ret:
ret[s] = {}
ret[s][m] = self.home_target[(s, m)]
return ret
if self.home_phase is None:
#log('_do_home ignored because home_phase is None')
return
if self.home_phase == 0:
if done is not None:
# Continuing call received after homing was aborted; ignore.
return
# Initial call; start homing.
self.home_phase = 1
# If it is currently moving, doing the things below without pausing causes stall responses.
self.pause(True, False)
self.sleep(False)
self.home_limits = [(a['min'], a['max']) for a in self.spaces[0].axis]
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = float('-inf'), max = float('inf'))
self.home_orig_type = self.spaces[0].type
self.expert_set_space(0, type = TYPE_CARTESIAN)
n = set()
for s in self.spaces:
for m in s.motor:
if self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin']):
n.add(m['home_order'])
if len(n) == 0:
self.home_phase = 4
else:
self.home_order = min(n)
# Fall through.
if self.home_phase == 1:
# Move to limit.
self.home_phase = 2
self.home_motors = []
for s, sp in enumerate(self.spaces):
for i, m in enumerate(sp.motor):
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and m['home_order'] == self.home_order:
self.home_motors.append((s, i, sp.axis[i], m))
self.limits[s].clear()
self.home_target = {}
dist = 1000 #TODO: use better value.
for s, i, a, m in self.home_motors:
self.spaces[s].set_current_pos(i, 0)
if self._pin_valid(m['limit_max_pin']):
self.home_target[(s, i)] = dist - (0 if s != 0 or i != 2 else self.zoffset)
else:
self.home_target[(s, i)] = -dist - (0 if s != 0 or i != 2 else self.zoffset)
if len(self.home_target) > 0:
self.home_cb[0] = [(s, k) for s, k in self.home_target.keys()]
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), f0 = home_v / dist, force = True, single = True)
return
# Fall through.
if self.home_phase == 2:
# Continue moving to find limit switch.
found_limits = False
for s, sp in enumerate(self.spaces):
for a in self.limits[s].keys():
if (s, a) in self.home_target:
#log('found limit %d %d' % (s, a))
self.home_target.pop((s, a))
found_limits = True
# Make sure no attempt is made to move through the limit switch (not even by rounding errors).
sp.set_current_pos(a, sp.get_current_pos(a))
# Repeat until move is done, or all limits are hit.
if (not done or found_limits) and len(self.home_target) > 0:
self.home_cb[0] = list(self.home_target.keys())
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("0 t %s" % (self.home_target))
k = tuple(self.home_target.keys())[0]
dist = abs(self.home_target[k] - self.spaces[k[0]].get_current_pos(k[1]))
if dist > 0:
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), f0 = home_v / dist, force = True, single = True)
return
# Fall through.
if len(self.home_target) > 0:
log('Warning: not all limits were found during homing')
n = set()
for s in self.spaces:
for m in s.motor:
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and m['home_order'] > self.home_order:
n.add(m['home_order'])
if len(n) > 0:
self.home_phase = 1
self.home_order = min(n)
return self._do_home()
# Move away slowly.
data = b''
num = 0
for s, spc in enumerate(self.spaces):
for m in spc.motor:
if self._pin_valid(m['limit_max_pin']):
data += b'\xff'
num += 1
elif self._pin_valid(m['limit_min_pin']):
data += b'\x01'
num += 1
else:
data += b'\x00'
self.home_phase = 3
if num > 0:
dprint('homing', data)
self._send_packet(bytes((protocol.command['HOME'],)) + data)
return
# Fall through.
if self.home_phase == 3:
# Move followers and delta into alignment.
self.home_return = []
for s, sp in enumerate(self.spaces):
self.home_return.append([])
for i, m in enumerate(sp.motor):
if i in self.limits[s]:
if not math.isnan(m['home_pos']):
#log('set %d %d %f' % (s, i, m['home_pos']))
self.home_return[-1].append(m['home_pos'] - sp.get_current_pos(i))
sp.set_current_pos(i, m['home_pos'])
else:
#log('limited zeroset %d %d' % (s, i))
self.home_return[-1].append(-sp.get_current_pos(i))
sp.set_current_pos(i, 0)
else:
if (self._pin_valid(m['limit_min_pin']) or self._pin_valid(m['limit_max_pin'])) and not math.isnan(m['home_pos']):
#log('defset %d %d %f' % (s, i, m['home_pos']))
self.home_return[-1].append(m['home_pos'] - sp.get_current_pos(i))
sp.set_current_pos(i, m['home_pos'])
else:
#log('unlimited zeroset %d %d' % (s, i))
self.home_return[-1].append(-sp.get_current_pos(i))
sp.set_current_pos(i, 0)
# Pre-insert delta axes as followers to align.
groups = ([], [], []) # min limits; max limits; just move.
if self.home_orig_type == TYPE_DELTA:
groups[1].append([])
for i, m in enumerate(self.spaces[0].motor):
groups[1][-1].append((0, i))
# Align followers.
for i, m in enumerate(self.spaces[2].motor):
fs = self.spaces[2].follower[i]['space']
fm = self.spaces[2].follower[i]['motor']
# Use 2, not len(self.spaces), because following followers is not supported.
if not 0 <= fs < 2 or not 0 <= fm < len(self.spaces[fs].motor):
continue
if self._pin_valid(m['limit_max_pin']):
if not self._pin_valid(self.spaces[fs].motor[fm]['limit_max_pin']) and self._pin_valid(self.spaces[fs].motor[fm]['limit_min_pin']):
# Opposite limit pin: don't compare values.
groups[2].append((2, i))
continue
for g in groups[1]:
if (fs, fm) in g:
g.append((2, i))
break
else:
groups[1].append([(2, i), (fs, fm)])
elif self._pin_valid(m['limit_min_pin']):
if self._pin_valid(self.spaces[fs].motor[fm]['limit_max_pin']):
# Opposite limit pin: don't compare values.
groups[2].append((2, i))
continue
for g in groups[0]:
if (fs, fm) in g:
g.append((2, i))
break
else:
groups[0].append([(2, i), (fs, fm)])
self.home_target = {}
for g in groups[0]:
target = max(g, key = lambda x: self.spaces[x[0]].motor[x[1]]['home_pos'])
target = self.spaces[target[0]].motor[target[1]]['home_pos']
for s, m in g:
if target != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = target - offset
for g in groups[1]:
target = min(g, key = lambda x: self.spaces[x[0]].motor[x[1]]['home_pos'])
target = self.spaces[target[0]].motor[target[1]]['home_pos']
for s, m in g:
if target != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = target - offset
for s, m in groups[2]:
fs = self.spaces[s].follower[m]['space']
fm = self.spaces[s].follower[m]['motor']
if self.spaces[fs].motor[fm]['home_pos'] != self.spaces[s].motor[m]['home_pos']:
offset = (0 if s != 0 or m != 2 else self.zoffset)
self.home_target[(s, m)] = self.spaces[fs].motor[fm]['home_pos'] - offset
self.home_phase = 4
if len(self.home_target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, self.home_target))
self.line(mktarget(), force = True, single = True)
return
# Fall through.
if self.home_phase == 4:
# Reset space type and move to pos2.
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
target = {}
for s, sp in enumerate(self.spaces[:2]):
for i, a in enumerate(sp.axis):
if not math.isnan(a['home_pos2']):
offset = (0 if s != 0 or i != 2 else self.zoffset)
if s not in target:
target[s] = {}
target[s][i] = a['home_pos2'] - offset
self.home_phase = 5
if len(target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, target))
self.line(target, force = True)
return
# Fall through.
if self.home_phase == 5:
# Move within bounds.
target = {}
for s, sp in enumerate(self.spaces[:2]):
for i, a in enumerate(sp.axis):
current = sp.get_current_pos(i)
offset = (0 if s != 0 or i != 2 else self.zoffset)
if current > a['max'] - offset:
if s not in target:
target[s] = {}
target[s][i] = a['max'] - offset
elif current < a['min'] - offset:
if s not in target:
target[s] = {}
target[s][i] = a['min'] - offset
self.home_phase = 6
if len(target) > 0:
self.home_cb[0] = False
if self.home_cb not in self.movecb:
self.movecb.append(self.home_cb)
#log("home phase %d target %s" % (self.home_phase, target))
self.line(target, force = True)
#log('movecb: ' + repr(self.movecb))
return
# Fall through.
if self.home_phase == 6:
self.home_phase = None
self.position_valid = True
if self.home_id is not None:
self._send(self.home_id, 'return', self.home_return)
self.home_return = None
if self.home_done_cb is not None:
call_queue.append((self.home_done_cb, []))
self.home_done_cb = None
return
log('Internal error: invalid home phase')
# }}}
def _handle_one_probe(self, good): # {{{
if good is None:
return
pos = self.get_axis_pos(0)
self._send_packet(struct.pack('=Bddd', protocol.command['ADJUSTPROBE'], pos[0], pos[1], pos[2] + self.zoffset))
self.probe_cb[1] = lambda good: self.request_confirmation("Continue?")[1](False) if good is not None else None
self.movecb.append(self.probe_cb)
self.line([{2: self.probe_safe_dist}], relative = True)
# }}}
def _one_probe(self): # {{{
self.probe_cb[1] = self._handle_one_probe
self.movecb.append(self.probe_cb)
z = self.get_axis_pos(0, 2)
z_low = self.spaces[0].axis[2]['min']
self.line([{2: z_low}], f0 = float(self.probe_speed) / (z - z_low) if z > z_low else float('inf'), probe = True)
# }}}
def _do_probe(self, id, x, y, z, phase = 0, good = True): # {{{
#log('probe %d %s' % (phase, good))
# Map = [[x0, y0, x1, y1], [nx, ny, angle], [[...], [...], ...]]
if good is None:
# This means the probe has been aborted.
#log('abort probe')
self.probing = False
if id is not None:
self._send(id, 'error', 'aborted')
#self._job_done(False, 'Probe aborted')
return
self.probing = True
if not self.position_valid:
self.home(cb = lambda: self._do_probe(id, x, y, z, phase, True), abort = False)[1](None)
return
p = self.probemap
if phase == 0:
if y > p[1][1]:
# Done.
self.probing = False
self._check_probemap()
if id is not None:
self._send(id, 'return', p)
for y, c in enumerate(p[2]):
for x, o in enumerate(c):
log('map %f %f %f' % (p[0][0] + p[0][2] * x / p[1][0], p[0][1] + p[0][3] * y / p[1][1], o))
sys.stderr.write('\n')
return
# Goto x,y
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 1, good)
self.movecb.append(self.probe_cb)
px = p[0][2] + p[0][4] * x / p[1][0]
py = p[0][3] + p[0][5] * y / p[1][1]
log(repr((p, px, py, x, y, self.gcode_angle)))
self.line([[p[0][0] + px * self.gcode_angle[1] - py * self.gcode_angle[0], p[0][1] + py * self.gcode_angle[1] + px * self.gcode_angle[0]]])
elif phase == 1:
# Probe
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 2, good)
if self._pin_valid(self.probe_pin):
self.movecb.append(self.probe_cb)
z_low = self.spaces[0].axis[2]['min']
self.line([{2: z_low}], f0 = float(self.probe_speed) / (z - z_low) if z > z_low else float('inf'), probe = True)
else:
#log('confirm probe')
self.request_confirmation('Please move the tool to the surface')[1](False)
else:
# Record result
if good:
log('Warning: probe did not hit anything')
z = self.spaces[0].get_current_pos(2)
p[2][y][x].append(z + self.zoffset)
if len(p[2][y][x]) >= self.num_probes:
p[2][y][x].sort()
trash = self.num_probes // 3
if trash == 0:
p[2][y][x] = sum(p[2][y][x]) / len(p[2][y][x])
else:
p[2][y][x] = sum(p[2][y][x][trash:-trash]) / (len(p[2][y][x]) - 2 * trash)
if y & 1:
x -= 1
if x < 0:
x = 0
y += 1
else:
x += 1
if x > p[1][0]:
x = p[1][0]
y += 1
z += self.probe_safe_dist
self.probe_cb[1] = lambda good: self._do_probe(id, x, y, z, 0, good)
self.movecb.append(self.probe_cb)
# Retract
self.line([{2: z}])
# }}}
def _check_probemap(self): # {{{
'''Check the probemap, and save it if it is valid; discard it otherwise.
@returns: True if the probemap is valid, False otherwise.'''
if not isinstance(self.probemap, (list, tuple)) or len(self.probemap) != 3:
log('probemap check failed: not a list of length 3')
self.probemap = None
self._globals_update()
return False
limits, nums, probes = self.probemap
if not isinstance(limits, (list, tuple)) or not isinstance(nums, (list, tuple)) or len(limits) != 6 or len(nums) != 3:
log('probemap check failed: first lists are not length 6 and 3')
self.probemap = None
self._globals_update()
return False
if not all(isinstance(e, (float, int)) and not math.isnan(e) for e in limits):
log('probemap check failed: limits must be numbers')
self.probemap = None
self._globals_update()
return False
if not all(isinstance(e, t) and not math.isnan(e) for e, t in zip(nums, (int, int, (float, int)))):
log('probemap check failed: nums and angle must be numbers')
self.probemap = None
self._globals_update()
return False
nx, ny, angle = nums
if len(probes) != ny + 1 or not all(isinstance(e, (list, tuple)) and len(e) == nx + 1 for e in probes):
log('probemap check failed: probe map is incorrect size')
self.probemap = None
self._globals_update()
return False
if not all(all(isinstance(e, (float, int)) and not math.isnan(e) for e in f) for f in probes):
log('probemap check failed: probe points must all be numbers')
self.probemap = None
self._globals_update()
return False
with fhs.write_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False) as probemap_file:
# Map = [[x, y, w, h], [nx, ny], [[...], [...], ...]]
sina, cosa = self.gcode_angle
targetx, targety, x0, y0, w, h = self.probemap[0]
probemap_file.write(struct.pack('@ddddddddLLd', targetx, targety, x0, y0, w, h, sina, cosa, *self.probemap[1]))
for y in range(self.probemap[1][1] + 1):
for x in range(self.probemap[1][0] + 1):
probemap_file.write(struct.pack('@d', self.probemap[2][y][x]))
self._globals_update()
return True
# }}}
def _start_job(self, paused): # {{{
# Set all extruders to 0.
for i, e in enumerate(self.spaces[1].axis):
self.set_axis_pos(1, i, 0)
def cb():
#log('start job %s' % self.job_current)
self._gcode_run(self.job_current, abort = False, paused = paused)
if not self.position_valid:
self.park(cb = cb, abort = False)[1](None)
else:
cb()
self.gcode_id = None
# }}}
def _gcode_run(self, src, abort = True, paused = False): # {{{
if self.parking:
return
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
if 0 <= self.bed_id < len(self.temps):
self.btemp = self.temps[self.bed_id].value
else:
self.btemp = float('nan')
if abort:
self._unpause()
self._job_done(False, 'aborted by starting new job')
self.queue_info = None
# Disable all alarms.
for i in range(len(self.temps)):
self.waittemp(i, None, None)
self.paused = paused
self._globals_update()
self.sleep(False)
if len(self.spaces) > 1:
for e in range(len(self.spaces[1].axis)):
self.set_axis_pos(1, e, 0)
filename = fhs.read_spool(os.path.join(self.uuid, 'gcode', src + os.extsep + 'bin'), text = False, opened = False)
self.total_time = self.jobqueue[src][-2:]
self.gcode_fd = os.open(filename, os.O_RDONLY)
self.gcode_map = mmap.mmap(self.gcode_fd, 0, prot = mmap.PROT_READ)
filesize = os.fstat(self.gcode_fd).st_size
bboxsize = 8 * struct.calcsize('=d')
def unpack(format, pos):
return struct.unpack(format, self.gcode_map[pos:pos + struct.calcsize(format)])
num_strings = unpack('=I', filesize - bboxsize - struct.calcsize('=I'))[0]
self.gcode_strings = []
sizes = [unpack('=I', filesize - bboxsize - struct.calcsize('=I') * (num_strings + 1 - x))[0] for x in range(num_strings)]
first_string = filesize - bboxsize - struct.calcsize('=I') * (num_strings + 1) - sum(sizes)
pos = 0
for x in range(num_strings):
self.gcode_strings.append(self.gcode_map[first_string + pos:first_string + pos + sizes[x]].decode('utf-8', 'replace'))
pos += sizes[x]
self.gcode_num_records = first_string / struct.calcsize(record_format)
if self.probemap is None:
encoded_probemap_filename = b''
else:
encoded_probemap_filename = fhs.read_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'), text = False, opened = False).encode('utf-8')
self.gcode_file = True
self._globals_update()
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 1 if not paused and self.confirmer is None else 0, self.gcode_angle[0], self.gcode_angle[1], 0xff, len(encoded_probemap_filename)) + filename.encode('utf-8') + encoded_probemap_filename)
# }}}
def _gcode_parse(self, src, name): # {{{
assert len(self.spaces) > 0
self._broadcast(None, 'blocked', 'Parsing g-code')
errors = []
mode = None
message = None
bbox = [None] * 6
bbox_last = [None] * 6
strings = ['']
unit = 1.
arc_normal = (0, 0, 1)
rel = False
erel = False
pos = [[float('nan') for a in range(6)], [0., 0.], float('inf')]
time_dist = [0., 0.]
pending = []
arc = [] # center, r, diff, angle_start, angle_diff
tool_changed = False
def add_timedist(type, nums):
if type == protocol.parsed['LINE']:
if nums[-2] == float('inf'):
extra = sum((nums[2 * i + 1] - nums[2 * i + 2]) ** 2 for i in range(3)) ** .5
if not math.isnan(extra):
time_dist[1] += extra
else:
extra = 2 / (nums[-2] + nums[-1])
if not math.isnan(extra):
time_dist[0] += extra
elif type == protocol.parsed['ARC']:
pass # TODO: add time+dist.
elif type == protocol.parsed['WAIT']:
time_dist[0] += nums[1]
return nums + time_dist
with fhs.write_spool(os.path.join(self.uuid, 'gcode', os.path.splitext(name)[0] + os.path.extsep + 'bin'), text = False) as dst:
epsilon = .5 # TODO: check if this should be configurable
aepsilon = math.radians(36) # TODO: check if this should be configurable
rlimit = 500 # TODO: check if this should be configurable
def center(a, b, c):
'''Given 3 points, determine center, radius, angles of points on circle, deviation of polygon from circle.'''
try:
x0, y0, z0 = a
x1, y1, z1 = b
x2, y2, z2 = c
xc = ((y0 - y1) * (y0 ** 2 - y2 ** 2 + x0 ** 2 - x2 ** 2) - (y0 - y2) * (x0 ** 2 - x1 ** 2 + y0 ** 2 - y1 ** 2)) / (2 * (-x0 * y1 - x2 * y0 + x2 * y1 + x1 * y0 + x0 * y2 - x1 * y2))
yc = ((x0 - x1) * (x0 ** 2 - x2 ** 2 + y0 ** 2 - y2 ** 2) - (x0 - x2) * (y0 ** 2 - y1 ** 2 + x0 ** 2 - x1 ** 2)) / (2 * (-y0 * x1 - y2 * x0 + y2 * x1 + y1 * x0 + y0 * x2 - y1 * x2))
r = ((xc - x0) ** 2 + (yc - y0) ** 2) ** .5
except ZeroDivisionError:
#log('div by 0: %s' % repr((a, b, c)))
return (None, None, None, float('inf'))
angles = []
ref = math.atan2(b[1] - yc, b[0] - xc)
for p in a, b, c:
angle = math.atan2(p[1] - yc, p[0] - xc)
angles.append((angle - ref + math.pi) % (2 * math.pi) + ref - math.pi)
mid = [(p2 + p1) / 2 for p1, p2 in zip(a, c)]
amid = (angles[0] + angles[2]) / 2
cmid = [math.cos(amid) * r + xc, math.sin(amid) * r + yc]
#log('for diff center (%f %f) mids %s %s amid %f angles %s' % (xc, yc, mid, cmid, amid, angles))
diff = sum([(p2 - p1) ** 2 for p1, p2 in zip(mid, cmid)])
#log('center returns %s' % repr(((xc, yc, z0), r, angles, diff)))
return ((xc, yc, z0), r, angles, diff)
def add_record(type, nums = None, force = False):
if nums is None:
nums = []
if isinstance(nums, dict):
nums = [nums['T'], nums['X'], nums['Y'], nums['Z'], nums['E'], nums['f'], nums['F']]
nums += [0] * (7 - len(nums))
if not force and type == protocol.parsed['LINE']:
# Update bounding box.
for i in range(3):
value = nums[i + 1]
if math.isnan(value):
continue
if bbox[2 * i] is None or value < bbox[2 * i]:
#log('new min bbox %f: %f from %f' % (i, value / 25.4, float('nan' if bbox[2 * i] is None else bbox[2 * i] / 25.4)))
bbox[2 * i] = value
if bbox[2 * i + 1] is None or value > bbox[2 * i + 1]:
#log('new max bbox %f: %f from %f' % (i, value / 25.4, float('nan' if bbox[2 * i + 1] is None else bbox[2 * i + 1] / 25.4)))
bbox[2 * i + 1] = value
# Analyze this move in combination with pending moves.
if len(pending) == 0:
pending.append([0, pos[0][0], pos[0][1], pos[0][2], pos[1][nums[0]], pos[2], pos[2]])
pending.append(nums)
if len(pending) == 2:
if not config['arc'] or pending[0][3] != pending[1][3]:
#log('non equal z')
flush_pending()
return
return
if len(pending) == 3:
# If the points are not on a circle with equal angles, or the angle is too large, or the radius is too large, push pending[1] through to output.
# Otherwise, record settings.
#log('center for arc start')
arc_ctr, arc_r, angles, arc_diff = center(pending[0][1:4], pending[1][1:4], pending[2][1:4])
if arc_diff > epsilon or abs(angles[1] - angles[0] - angles[2] + angles[1]) > aepsilon or arc_r > rlimit:
#log('not arc: %s' % repr((arc_ctr, arc_r, angles, arc_diff)))
dst.write(struct.pack('=Bl' + 'd' * 8, protocol.parsed['LINE'], *add_timedist(type, pending[1])))
pending.pop(0)
return
arc[:] = [arc_ctr, arc_r, arc_diff, angles[0], (angles[2] - angles[0]) / 2]
return
current_angle = arc[4] * (len(pending) - 1)
a = arc[3] + current_angle
p = [arc[0][0] + math.cos(a) * arc[1], arc[0][1] + math.sin(a) * arc[1]]
# If new point doesn't fit on circle, push pending as circle to output.
# It should allow up to 360, but be safe and break those in two; also makes generating svgs easier.
if current_angle >= math.radians(180):
#log('flush: more than 180 degrees')
flush_pending()
elif (p[0] - pending[-1][1]) ** 2 + (p[1] - pending[-1][2]) ** 2 > epsilon ** 2:
#log('flush: point too far from prediction (%s %s)' % (p, pending[-1][1:3]))
flush_pending()
elif pending[0][3] != pending[-1][3]:
#log('flush: non equal z')
flush_pending()
return
#if not force:
#log('non-line %s' % type)
flush_pending()
#log('force or other ' + repr((type, nums, add_timedist(type, nums))))
dst.write(struct.pack('=Bl' + 'd' * 8, type, *add_timedist(type, nums)))
def flush_pending():
if len(pending) >= 6:
#log('arc')
flush_arc()
#else:
#log('no arc %d' % len(pending))
tmp = pending[1:]
pending[:] = []
for p in tmp:
add_record(protocol.parsed['LINE'], p, True)
def flush_arc():
start = pending[0]
end = pending[-2]
tmp = pending[-1]
#log('center for flush')
arc_ctr, arc_r, angles, arc_diff = center(start[1:4], pending[len(pending) // 2][1:4], end[1:4])
if arc_diff < 2 * epsilon or arc_ctr is None:
#log('refuse arc: %s' % repr((arc_ctr, arc_diff, epsilon, arc_r, angles)))
# This is really a line, or it is not detected as an arc; don't turn it into an arc.
return
pending[:] = []
add_record(protocol.parsed['PRE_ARC'], {'X': arc_ctr[0], 'Y': arc_ctr[1], 'Z': start[3], 'E': 0, 'f': 0, 'F': 1 if arc[4] > 0 else -1, 'T': 0}, True)
add_record(protocol.parsed['ARC'], {'X': end[1], 'Y': end[2], 'Z': end[3], 'E': pos[1][current_extruder], 'f': -pos[2], 'F': -pos[2], 'T': current_extruder}, True)
pending.append(end)
pending.append(tmp)
def add_string(string):
if string is None:
return 0
if string not in strings:
strings.append(string)
return strings.index(string)
current_extruder = 0
for lineno, origline in enumerate(src):
line = origline.strip()
origline = line
#log('parsing %s' % line)
# Get rid of line numbers and checksums.
if line.startswith('N'):
r = re.match(r'N(\d+)\s+(.*?)\*\d+\s*$', line)
if not r:
r = re.match(r'N(\d+)\s+(.*?)\s*$', line)
if not r:
# Invalid line; ignore it.
errors.append('%d:ignoring invalid gcode: %s' % (lineno, origline))
continue
lineno = int(r.group(1))
line = r.group(2)
else:
lineno += 1
comment = ''
while '(' in line:
b = line.index('(')
e = line.find(')', b)
if e < 0:
errors.append('%d:ignoring line with unterminated comment: %s' % (lineno, origline))
continue
comment = line[b + 1:e].strip()
line = line[:b] + ' ' + line[e + 1:].strip()
if ';' in line:
p = line.index(';')
comment = line[p + 1:].strip()
line = line[:p].strip()
if comment.upper().startswith('MSG,'):
message = comment[4:].strip()
elif comment.startswith('SYSTEM:'):
if not re.match(self.allow_system, comment[7:]):
errors.append('%d:Warning: system command %s is forbidden and will not be run' % (lineno, comment[7:]))
add_record(protocol.parsed['SYSTEM'], [add_string(comment[7:])])
continue
if line == '':
continue
line = line.split()
while len(line) > 0:
if mode is None or line[0][0] in 'GMTDS':
if len(line[0]) < 2:
errors.append('%d:ignoring unparsable line: %s' % (lineno, origline))
break
try:
cmd = line[0][0], int(line[0][1:])
except:
errors.append('%d:parse error in line: %s' % (lineno, origline))
traceback.print_exc()
break
line = line[1:]
else:
cmd = mode
args = {}
success = True
for i, a in enumerate(line):
if a[0] in 'GMD':
line = line[i:]
break
try:
args[a[0]] = float(a[1:])
except:
errors.append('%d:ignoring invalid gcode: %s' % (lineno, origline))
success = False
break
else:
line = []
if not success:
break
if cmd == ('M', 2):
# Program end.
break
elif cmd[0] == 'T':
target = cmd[1]
if target >= len(pos[1]):
pos[1].extend([0.] * (target - len(pos[1]) + 1))
current_extruder = target
# Force update of extruder.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
continue
elif cmd == ('G', 17):
arc_normal = (0, 0, 1)
continue
elif cmd == ('G', 18):
arc_normal = (0, 1, 0)
continue
elif cmd == ('G', 19):
arc_normal = (1, 0, 0)
continue
elif cmd == ('G', 20):
unit = 25.4
continue
elif cmd == ('G', 21):
unit = 1.
continue
elif cmd == ('G', 90):
rel = False
erel = False
continue
elif cmd == ('G', 91):
rel = True
erel = True
continue
elif cmd == ('M', 82):
erel = False
continue
elif cmd == ('M', 83):
erel = True
continue
elif cmd == ('M', 84):
for e in range(len(pos[1])):
pos[1][e] = 0.
elif cmd == ('G', 92):
if 'E' not in args:
continue
args['E'] *= unit
pos[1][current_extruder] = args['E']
elif cmd[0] == 'M' and cmd[1] in (104, 109, 116):
args['E'] = int(args['T']) if 'T' in args else current_extruder
if cmd == ('M', 140):
cmd = ('M', 104)
args['E'] = -1
elif cmd == ('M', 190):
cmd = ('M', 109)
args['E'] = -1
elif cmd == ('M', 6):
# Tool change: park and remember to probe.
cmd = ('G', 28)
tool_changed = True
if cmd == ('G', 28):
nums = [current_extruder]
if len(self.spaces) > 1 and len(self.spaces[1].axis) > current_extruder:
pos[1][current_extruder] = 0.
add_record(protocol.parsed['PARK'])
for a in range(len(pos[0])):
if len(self.spaces[0].axis) > a and not math.isnan(self.spaces[0].axis[a]['park']):
pos[0][a] = float('nan')
elif cmd[0] == 'G' and cmd[1] in (0, 1, 81):
if cmd[1] != 0:
mode = cmd
components = {'X': None, 'Y': None, 'Z': None, 'A': None, 'B': None, 'C': None, 'E': None, 'F': None, 'R': None}
for c in args:
if c not in components:
errors.append('%d:invalid component %s' % (lineno, c))
continue
assert components[c] is None
components[c] = args[c]
f0 = pos[2]
if components['F'] is not None:
pos[2] = components['F'] * unit / 60
oldpos = pos[0][:], pos[1][:]
if cmd[1] != 81:
if components['E'] is not None:
if erel:
estep = components['E'] * unit
else:
estep = components['E'] * unit - pos[1][current_extruder]
pos[1][current_extruder] += estep
else:
estep = 0
else:
estep = 0
if components['R'] is not None:
if rel:
r = pos[0][2] + components['R'] * unit
else:
r = components['R'] * unit
for axis in range(6):
value = components['XYZABC'[axis]]
if value is not None:
if rel:
pos[0][axis] += value * unit
else:
pos[0][axis] = value * unit
if axis == 2:
z = pos[0][2]
if cmd[1] != 81:
dist = sum([0] + [(pos[0][x] - oldpos[0][x]) ** 2 for x in range(3) if not math.isnan(pos[0][x] - oldpos[0][x])]) ** .5
if dist > 0:
#if f0 is None:
# f0 = pos[1][current_extruder]
f0 = pos[2] # Always use new value.
if f0 == 0:
f0 = float('inf')
if math.isnan(dist):
dist = 0
if all((math.isnan(pos[0][i]) and math.isnan(oldpos[0][i])) or pos[0][i] == oldpos[0][i] for i in range(3, 6)):
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': f0 / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'F': pos[2] / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'T': current_extruder})
else:
add_record(protocol.parsed['PRE_LINE'], {'X': pos[0][3], 'Y': pos[0][4], 'Z': pos[0][5], 'E': float('NaN'), 'f': float('NaN'), 'F': float('NaN'), 'T': current_extruder})
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': f0 / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'F': pos[2] / dist if dist > 0 and cmd[1] == 1 else float('inf'), 'T': current_extruder})
else:
# If old pos is unknown, use safe distance.
if math.isnan(oldpos[0][2]):
oldpos[0][2] = r
# Drill cycle.
# Only support OLD_Z (G90) retract mode; don't support repeats(L).
# goto x,y
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# goto r
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': r, 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# goto z; this is always straight down, because the move before and after it are also vertical.
if z != r:
f0 = pos[2] / abs(z - r)
if math.isnan(f0):
f0 = float('inf')
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': z, 'E': 0, 'f': f0, 'F': f0, 'T': current_extruder})
# retract; this is always straight up, because the move before and after it are also non-horizontal.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# empty move; this makes sure the previous move is entirely vertical.
add_record(protocol.parsed['LINE'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': oldpos[0][2], 'E': 0, 'f': float('inf'), 'F': float('inf'), 'T': current_extruder})
# Set up current z position so next G81 will work.
pos[0][2] = oldpos[0][2]
elif cmd[0] == 'G' and cmd[1] in (2, 3):
# Arc.
mode = cmd
components = {'X': None, 'Y': None, 'Z': None, 'E': None, 'F': None, 'I': None, 'J': None, 'K': None}
for c in args:
if c not in components:
errors.append('%d:invalid arc component %s' % (lineno, c))
continue
assert components[c] is None
components[c] = args[c]
f0 = pos[2]
if components['F'] is not None:
pos[2] = components['F'] * unit / 60
oldpos = pos[0][:], pos[1][:]
if components['E'] is not None:
if erel:
estep = components['E'] * unit - pos[1][current_extruder]
else:
estep = components['E'] * unit
pos[1][current_extruder] += estep
else:
estep = 0
center = [None] * 3
for axis in range(3):
value = components[chr(b'X'[0] + axis)]
if value is not None:
if rel:
pos[0][axis] += value * unit
else:
pos[0][axis] = value * unit
if axis == 2:
z = pos[0][2]
value = components[chr(b'I'[0] + axis)]
if value is not None:
center[axis] = oldpos[0][axis] + value
else:
center[axis] = oldpos[0][axis]
s = -1 if cmd[1] == 2 else 1
add_record(protocol.parsed['PRE_ARC'], {'X': center[0], 'Y': center[1], 'Z': center[2], 'E': s * arc_normal[0], 'f': s * arc_normal[1], 'F': s * arc_normal[2], 'T': 0})
add_record(protocol.parsed['ARC'], {'X': pos[0][0], 'Y': pos[0][1], 'Z': pos[0][2], 'E': pos[1][current_extruder], 'f': -f0, 'F': -pos[2], 'T': current_extruder})
elif cmd == ('G', 4):
add_record(protocol.parsed['WAIT'], [0, float(args['S']) if 'S' in args else float(args['P']) / 1000 if 'P' in args else 0])
elif cmd == ('G', 92):
add_record(protocol.parsed['SETPOS'], [current_extruder, args['E']])
elif cmd == ('G', 94):
# Set feedrate to units per minute; this is always used, and it shouldn't raise an error.
pass
elif cmd == ('M', 0):
add_record(protocol.parsed['CONFIRM'], [add_string(message), 1 if tool_changed else 0])
tool_changed = False
elif cmd == ('M', 3):
# Spindle on, clockwise.
add_record(protocol.parsed['GPIO'], [-3, 1])
elif cmd == ('M', 4):
# Spindle on, counterclockwise.
add_record(protocol.parsed['GPIO'], [-3, 1])
elif cmd == ('M', 5):
add_record(protocol.parsed['GPIO'], [-3, 0])
elif cmd == ('M', 9):
# Coolant off: ignore.
pass
elif cmd == ('M', 42):
if 'P' in args and 'S' in args:
add_record(protocol.parsed['GPIO'], [int(args['P']), args.get('S')])
else:
errors.append('%d:invalid M42 request (needs P and S)' % lineno)
elif cmd == ('M', 84):
# Don't sleep, but set all extruder positions to 0.
for e in range(len(pos[1])):
add_record(protocol.parsed['SETPOS'], [e, 0])
elif cmd == ('M', 104):
if args['E'] >= len(self.temps):
errors.append('%d:ignoring M104 for invalid temp %d' % (lineno, args['E']))
elif 'S' not in args:
errors.append('%d:ignoring M104 without S' % lineno)
else:
add_record(protocol.parsed['SETTEMP'], [int(args['E']), args['S'] + C0])
elif cmd == ('M', 106):
add_record(protocol.parsed['GPIO'], [-2, 1])
elif cmd == ('M', 107):
add_record(protocol.parsed['GPIO'], [-2, 0])
elif cmd == ('M', 109):
if 'S' in args:
add_record(protocol.parsed['SETTEMP'], [int(args['E']), args['S'] + C0])
add_record(protocol.parsed['WAITTEMP'], [int(args['E'])])
elif cmd == ('M', 116):
add_record(protocol.parsed['WAITTEMP'], [-2])
elif cmd[0] == 'S':
# Spindle speed; not supported, but shouldn't error.
pass
else:
errors.append('%d:invalid gcode command %s' % (lineno, repr((cmd, args))))
message = None
flush_pending()
stringmap = []
size = 0
for s in strings:
us = s.encode('utf-8')
stringmap.append(len(us))
dst.write(us)
size += len(us)
for s in stringmap:
dst.write(struct.pack('=L', s))
ret = bbox
if any(x is None for x in bbox[:4]):
bbox = bbox_last
ret = bbox
if any(x is None for x in bbox[:4]):
bbox = [0] * 6
ret = None
if any(x is None for x in bbox):
for t, b in enumerate(bbox):
if b is None:
bbox[t] = 0;
dst.write(struct.pack('=L' + 'd' * 8, len(strings), *(bbox + time_dist)))
self._broadcast(None, 'blocked', None)
return ret and ret + time_dist, '\n'.join(errors)
# }}}
def _reset_extruders(self, axes): # {{{
for i, sp in enumerate(axes):
for a, pos in enumerate(sp):
# Assume motor[a] corresponds to axis[a] if it exists.
if len(self.spaces[i].motor) > a and not self._pin_valid(self.spaces[i].motor[a]['limit_max_pin']) and not self._pin_valid(self.spaces[i].motor[a]['limit_min_pin']):
self.set_axis_pos(i, a, pos)
# }}}
def _pin_valid(self, pin): # {{{
return (pin & 0x100) != 0
# }}}
def _spi_send(self, data): # {{{
for bits, p in data:
shift = (8 - bits % 8) % 8
if shift > 0:
p = [(p[b] << shift | p[b + 1] >> (8 - shift)) & 0xff for b in range(len(p) - 1)] + [(p[-1] << shift) & 0xff]
self._send_packet(struct.pack('=BB', protocol.command['SPI'], bits) + b''.join(struct.pack('=B', b) for b in p))
# }}}
def admin_connect(self, port, run_id): # {{{
self._send_packet(struct.pack('=B', protocol.command['CONNECT']) + bytes([ord(x) for x in run_id]) + port.encode('utf-8') + b'\0')
# The rest happens in response to the CONNECTED reply.
# }}}
def admin_reconnect(self, port): # {{{
pass
# }}}
# Subclasses. {{{
class Space: # {{{
def __init__(self, machine, id):
self.name = ['position', 'extruders', 'followers'][id]
self.type = [TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER][id]
self.machine = machine
self.id = id
self.axis = []
self.motor = []
self.delta = [{'axis_min': 0., 'axis_max': 0., 'rodlength': 0., 'radius': 0.} for t in range(3)]
self.delta_angle = 0
self.polar_max_r = float('inf')
self.extruder = []
self.follower = []
def read(self, data):
axes, motors = data
if self.id == 1:
self.machine.multipliers = (self.machine.multipliers + [1.] * len(axes))[:len(axes)]
if len(axes) > len(self.axis):
def nm(i):
if self.id == 0:
if i < 3:
return chr(ord('x') + i)
elif i < 6:
return chr(ord('a') + i - 3)
else:
return 'Axis %d' % i
elif self.id == 1:
return 'extruder %d' % i
else:
return 'follower %d' % i
self.axis += [{'name': nm(i), 'home_pos2': float('nan')} for i in range(len(self.axis), len(axes))]
else:
self.axis[len(axes):] = []
for a in range(len(axes)):
self.axis[a]['park'], self.axis[a]['park_order'], self.axis[a]['min'], self.axis[a]['max'] = struct.unpack('=dBdd', axes[a])
if len(motors) > len(self.motor):
self.motor += [{} for i in range(len(self.motor), len(motors))]
else:
self.motor[len(motors):] = []
for m in range(len(motors)):
self.motor[m]['step_pin'], self.motor[m]['dir_pin'], self.motor[m]['enable_pin'], self.motor[m]['limit_min_pin'], self.motor[m]['limit_max_pin'], self.motor[m]['steps_per_unit'], self.motor[m]['home_pos'], self.motor[m]['limit_v'], self.motor[m]['limit_a'], self.motor[m]['home_order'] = struct.unpack('=HHHHHddddB', motors[m])
if self.id == 1 and m < len(self.machine.multipliers):
self.motor[m]['steps_per_unit'] /= self.machine.multipliers[m]
def write_info(self, num_axes = None):
data = struct.pack('=B', self.type)
if self.type == TYPE_CARTESIAN:
data += struct.pack('=B', num_axes if num_axes is not None else len(self.axis))
elif self.type == TYPE_DELTA:
for a in range(3):
data += struct.pack('=dddd', self.delta[a]['axis_min'], self.delta[a]['axis_max'], self.delta[a]['rodlength'], self.delta[a]['radius'])
data += struct.pack('=d', self.delta_angle)
elif self.type == TYPE_POLAR:
data += struct.pack('=d', self.polar_max_r)
elif self.type == TYPE_EXTRUDER:
num = num_axes if num_axes is not None else len(self.axis)
data += struct.pack('=B', num)
for a in range(num):
if a < len(self.extruder):
data += struct.pack('=ddd', self.extruder[a]['dx'], self.extruder[a]['dy'], self.extruder[a]['dz'])
else:
data += struct.pack('=ddd', 0, 0, 0)
elif self.type == TYPE_FOLLOWER:
num = num_axes if num_axes is not None else len(self.axis)
data += struct.pack('=B', num)
for a in range(num):
if a < len(self.follower):
data += struct.pack('=BB', self.follower[a]['space'], self.follower[a]['motor'])
else:
data += struct.pack('=BB', 0xff, 0xff)
else:
log('invalid type')
raise AssertionError('invalid space type')
return data
def write_axis(self, axis):
if self.id == 0:
return struct.pack('=dBdd', self.axis[axis]['park'], int(self.axis[axis]['park_order']), self.axis[axis]['min'], self.axis[axis]['max'])
else:
return struct.pack('=dBdd', float('nan'), 0, float('-inf'), float('inf'))
def write_motor(self, motor):
if self.id == 2:
if self.follower[motor]['space'] >= len(self.machine.spaces) or self.follower[motor]['motor'] >= len(self.machine.spaces[self.follower[motor]['space']].motor):
#log('write motor for follower %d with fake base' % motor)
base = {'steps_per_unit': 1, 'limit_v': float('inf'), 'limit_a': float('inf')}
else:
#log('write motor for follower %d with base %s' % (motor, self.machine.spaces[0].motor))
base = self.machine.spaces[self.follower[motor]['space']].motor[self.follower[motor]['motor']]
else:
base = self.motor[motor]
return struct.pack('=HHHHHddddB', self.motor[motor]['step_pin'], self.motor[motor]['dir_pin'], self.motor[motor]['enable_pin'], self.motor[motor]['limit_min_pin'], self.motor[motor]['limit_max_pin'], base['steps_per_unit'] * (1. if self.id != 1 or motor >= len(self.machine.multipliers) else self.machine.multipliers[motor]), self.motor[motor]['home_pos'], base['limit_v'], base['limit_a'], int(self.motor[motor]['home_order']))
def set_current_pos(self, axis, pos):
#log('setting pos of %d %d to %f' % (self.id, axis, pos))
self.machine._send_packet(struct.pack('=BBBd', protocol.command['SETPOS'], self.id, axis, pos))
def get_current_pos(self, axis):
#log('getting current pos %d %d' % (self.id, axis))
self.machine._send_packet(struct.pack('=BBB', protocol.command['GETPOS'], self.id, axis))
cmd, s, m, f, e, data = self.machine._get_reply()
assert cmd == protocol.rcommand['POS']
#log('get current pos %d %d: %f' % (self.id, axis, f))
return f
def motor_name(self, i):
if self.type in (TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER):
return self.axis[i]['name']
elif self.type == TYPE_DELTA:
return chr(ord('u') + i)
elif self.type == TYPE_POLAR:
return ['r', 'θ', 'z'][i]
else:
log('invalid type')
raise AssertionError('invalid space type')
def export(self):
std = [self.name, self.type, [[a['name'], a['park'], a['park_order'], a['min'], a['max'], a['home_pos2']] for a in self.axis], [[self.motor_name(i), m['step_pin'], m['dir_pin'], m['enable_pin'], m['limit_min_pin'], m['limit_max_pin'], m['steps_per_unit'], m['home_pos'], m['limit_v'], m['limit_a'], m['home_order']] for i, m in enumerate(self.motor)], None if self.id != 1 else self.machine.multipliers]
if self.type == TYPE_CARTESIAN:
return std
elif self.type == TYPE_DELTA:
return std + [[[a['axis_min'], a['axis_max'], a['rodlength'], a['radius']] for a in self.delta] + [self.delta_angle]]
elif self.type == TYPE_POLAR:
return std + [self.polar_max_r]
elif self.type == TYPE_EXTRUDER:
return std + [[[a['dx'], a['dy'], a['dz']] for a in self.extruder]]
elif self.type == TYPE_FOLLOWER:
return std + [[[a['space'], a['motor']] for a in self.follower]]
else:
log('invalid type')
raise AssertionError('invalid space type')
def export_settings(self):
# Things to handle specially while homing:
# * self.home_limits = [(a['min'], a['max']) for a in self.spaces[0].axis]
# * self.home_orig_type = self.spaces[0].type
ret = '[space %d]\r\n' % self.id
type = self.type if self.id != 0 or self.machine.home_phase is None else self.machine.home_orig_type
if self.id == 0:
ret += 'type = %d\r\n' % type
if type == TYPE_CARTESIAN:
ret += 'num_axes = %d\r\n' % len(self.axis)
elif type == TYPE_DELTA:
ret += 'delta_angle = %f\r\n' % self.delta_angle
for i in range(3):
ret += '[delta %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %f\r\n' % (x, self.delta[i][x]) for x in ('rodlength', 'radius', 'axis_min', 'axis_max')])
elif type == TYPE_POLAR:
ret += 'polar_max_r = %f\r\n' % self.polar_max_r
elif type == TYPE_EXTRUDER:
ret += 'num_axes = %d\r\n' % len(self.axis)
for i in range(len(self.extruder)):
ret += '[extruder %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %f\r\n' % (x, self.extruder[i][x]) for x in ('dx', 'dy', 'dz')])
elif type == TYPE_FOLLOWER:
ret += 'num_axes = %d\r\n' % len(self.axis)
for i in range(len(self.follower)):
ret += '[follower %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %d\r\n' % (x, self.follower[i][x]) for x in ('space', 'motor')])
else:
log('invalid type')
raise AssertionError('invalid space type')
for i, a in enumerate(self.axis):
ret += '[axis %d %d]\r\n' % (self.id, i)
ret += 'name = %s\r\n' % a['name']
if self.id == 0:
ret += ''.join(['%s = %f\r\n' % (x, a[x]) for x in ('park', 'park_order', 'home_pos2')])
if self.machine.home_phase is None:
ret += ''.join(['%s = %f\r\n' % (x, a[x]) for x in ('min', 'max')])
else:
ret += ''.join(['%s = %f\r\n' % (x, y) for x, y in zip(('min', 'max'), self.machine.home_limits[self.id])])
for i, m in enumerate(self.motor):
ret += '[motor %d %d]\r\n' % (self.id, i)
ret += ''.join(['%s = %s\r\n' % (x, write_pin(m[x])) for x in ('step_pin', 'dir_pin', 'enable_pin')])
if self.id != 1:
ret += ''.join(['%s = %s\r\n' % (x, write_pin(m[x])) for x in ('limit_min_pin', 'limit_max_pin')])
ret += ''.join(['%s = %f\r\n' % (x, m[x]) for x in ('home_pos',)])
ret += ''.join(['%s = %d\r\n' % (x, m[x]) for x in ('home_order',)])
if self.id != 2:
ret += ''.join(['%s = %f\r\n' % (x, m[x]) for x in ('steps_per_unit', 'limit_v', 'limit_a')])
return ret
# }}}
class Temp: # {{{
def __init__(self, id):
self.name = 'temp %d' % id
self.id = id
self.value = float('nan')
def read(self, data):
self.R0, self.R1, logRc, Tc, self.beta, self.heater_pin, self.fan_pin, self.thermistor_pin, fan_temp, self.fan_duty, heater_limit_l, heater_limit_h, fan_limit_l, fan_limit_h, self.hold_time = struct.unpack('=dddddHHHddddddd', data)
try:
self.Rc = math.exp(logRc)
except:
self.Rc = float('nan')
self.Tc = Tc - C0
self.heater_limit_l = heater_limit_l - C0
self.heater_limit_h = heater_limit_h - C0
self.fan_limit_l = fan_limit_l - C0
self.fan_limit_h = fan_limit_h - C0
self.fan_temp = fan_temp - C0
self.fan_pin ^= 0x200
def write(self):
try:
logRc = math.log(self.Rc)
except:
logRc = float('nan')
return struct.pack('=dddddHHHddddddd', self.R0, self.R1, logRc, self.Tc + C0, self.beta, self.heater_pin, self.fan_pin ^ 0x200, self.thermistor_pin, self.fan_temp + C0, self.fan_duty, self.heater_limit_l + C0, self.heater_limit_h + C0, self.fan_limit_l + C0, self.fan_limit_h + C0, self.hold_time)
def export(self):
return [self.name, self.R0, self.R1, self.Rc, self.Tc, self.beta, self.heater_pin, self.fan_pin, self.thermistor_pin, self.fan_temp, self.fan_duty, self.heater_limit_l, self.heater_limit_h, self.fan_limit_l, self.fan_limit_h, self.hold_time, self.value]
def export_settings(self):
ret = '[temp %d]\r\n' % self.id
ret += 'name = %s\r\n' % self.name
ret += ''.join(['%s = %s\r\n' % (x, write_pin(getattr(self, x))) for x in ('heater_pin', 'fan_pin', 'thermistor_pin')])
ret += ''.join(['%s = %f\r\n' % (x, getattr(self, x)) for x in ('fan_temp', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time')])
return ret
# }}}
class Gpio: # {{{
def __init__(self, id):
self.name = 'gpio %d' % id
self.id = id
self.state = 3
self.reset = 3
self.value = False
self.duty = 1.
def read(self, data):
self.pin, state, self.duty = struct.unpack('=HBd', data)
self.state = state & 0x3
self.reset = (state >> 2) & 0x3
def write(self):
return struct.pack('=HBd', self.pin, self.state | (self.reset << 2), self.duty)
def export(self):
return [self.name, self.pin, self.state, self.reset, self.duty, self.value if self.state >= 2 else self.state == 1]
def export_settings(self):
ret = '[gpio %d]\r\n' % self.id
ret += 'name = %s\r\n' % self.name
ret += 'pin = %s\r\n' % write_pin(self.pin)
ret += 'reset = %d\r\n' % self.reset
ret += 'duty = %f\r\n' % self.duty
return ret
# }}}
# }}}
# }}}
# Useful commands. {{{
def admin_reset_uuid(self): # {{{
uuid = protocol.new_uuid(string = False)
self._send_packet(struct.pack('=B', protocol.command['SET_UUID']) + bytes(uuid))
self.uuid = protocol.new_uuid(uuid = uuid, string = True)
if not self.name:
self.name = self.uuid
return self.uuid
# }}}
def expert_die(self, reason): # {{{
'''Kill this machine, including all files on disk.
'''
log('%s dying as requested by host (%s).' % (self.uuid, reason))
# Clean up spool.
dirname = fhs.write_spool(self.uuid, dir = True, opened = False)
if os.path.isdir(dirname):
try:
shutil.rmtree(dirname, ignore_errors = False)
except:
log('Could not remove %d' % dirname)
# Clean up profiles.
for dirname in fhs.read_data(self.uuid, dir = True, multiple = True, opened = False):
try:
shutil.rmtree(dirname, ignore_errors = False)
except:
log('Could not remove %d' % dirname)
return (WAIT, WAIT)
# }}}
@delayed
def flush(self, id): # {{{
'''Wait for currently scheduled moves to finish.
'''
#log('flush start')
def cb(w):
#log('flush done')
if id is not None:
self._send(id, 'return', w)
self.movecb.append((False, cb))
if self.flushing is not True:
self.line()
#log('end flush preparation')
# }}}
@delayed
def probe(self, id, area, speed = 3.): # {{{
'''Run a probing routine.
This moves over the given area and probes a grid of points less
than max_probe_distance apart.
If the probe pin is valid, it will be used for the probe.
If it is invalid, a confirmation is required for every point.
'''
if area is None:
try:
fhs.remove_spool(os.path.join(self.uuid, 'probe' + os.extsep + 'bin'))
except:
log('Failed to remove probe file.')
traceback.print_exc()
self.probemap = None
self._globals_update()
if id is not None:
self._send(id, 'return', None)
return
if len(self.spaces[0].axis) < 3 or not self.probe_safe_dist > 0:
if id is not None:
self._send(id, 'return', None)
return
log(repr(area))
density = [int(area[t + 4] / self.probe_dist) + 1 for t in range(2)] + [self.targetangle]
self.probemap = [area, density, [[[] for x in range(density[0] + 1)] for y in range(density[1] + 1)]]
self.gcode_angle = math.sin(self.targetangle), math.cos(self.targetangle)
self.probe_speed = speed
self._do_probe(id, 0, 0, self.get_axis_pos(0, 2))
# }}}
def line(self, moves = (), f0 = None, f1 = None, v0 = None, v1 = None, relative = False, probe = False, single = False, force = False): # {{{
'''Move the tool in a straight line.
'''
#log('line %s %s %s %d %d' % (repr(moves), f0, f1, probe))
#log('speed %s' % f0)
#traceback.print_stack()
if not force and self.home_phase is not None and not self.paused:
log('ignoring line during home')
return
self.queue.append((moves, f0, f1, v0, v1, probe, single, relative))
if not self.wait:
self._do_queue()
# }}}
@delayed
def line_cb(self, id, moves = (), f0 = None, f1 = None, v0 = None, v1 = None, relative = False, probe = False, single = False): # {{{
'''Move the tool in a straight line; return when done.
'''
if self.home_phase is not None and not self.paused:
log('ignoring linecb during home')
if id is not None:
self._send(id, 'return', None)
return
self.line(moves, f0, f1, v0, v1, relative, probe, single)
self.wait_for_cb()[1](id)
# }}}
def move_target(self, dx, dy): # {{{
'''Move the target position.
Using this function avoids a round trip to the driver.
'''
self.set_globals(targetx = self.targetx + dx, targety = self.targety + dy)
# }}}
def sleep(self, sleeping = True, update = True, force = False): # {{{
'''Put motors to sleep, or wake them up.
'''
if sleeping:
if self.home_phase is not None or (not force and not self.paused and (self.gcode_map is not None or self.gcode_file)):
return
self.position_valid = False
if update:
self._globals_update()
self._send_packet(struct.pack('=BB', protocol.command['SLEEP'], sleeping))
# }}}
def settemp(self, channel, temp, update = True): # {{{
'''Set target temperature.
'''
channel = int(channel)
self.temps[channel].value = temp
if update:
self._temp_update(channel)
self._send_packet(struct.pack('=BBd', protocol.command['SETTEMP'], channel, temp + C0 if not math.isnan(self.temps[channel].beta) else temp))
if self.gcode_waiting > 0 and any(channel == x[0] for x in self.tempcb):
self.waittemp(channel, temp)
# }}}
def waittemp(self, channel, min, max = None): # {{{
'''Set temperature alarm values.
Note that this function returns immediately; it does not wait
for the temperature to be reached.
'''
channel = int(channel)
if min is None:
min = float('nan')
if max is None:
max = float('nan')
self._send_packet(struct.pack('=BBdd', protocol.command['WAITTEMP'], channel, min + C0 if not math.isnan(self.temps[channel].beta) else min, max + C0 if not math.isnan(self.temps[channel].beta) else max))
# }}}
def readtemp(self, channel): # {{{
'''Read current temperature.
'''
channel = int(channel)
if channel >= len(self.temps):
log('Trying to read invalid temp %d' % channel)
return float('nan')
self._send_packet(struct.pack('=BB', protocol.command['READTEMP'], channel))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TEMP']
return f - (C0 if not math.isnan(self.temps[channel].beta) else 0)
# }}}
def readpower(self, channel): # {{{
'''Read power recordings.
The return value is a tuple of the time it has been on since
this function was last called, and the current time, both in
milliseconds.
To use, this function must be called at least twice; the first
call only the time is recorded. The second call the new time
is recorded and the elapsed time is computed and used in
combination with the time it was on.
'''
channel = int(channel)
if channel >= len(self.temps):
log('Trying to read invalid power %d' % channel)
return float('nan')
self._send_packet(struct.pack('=BB', protocol.command['READPOWER'], channel))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['POWER']
return s, m
# }}}
def readpin(self, pin): # {{{
'''Read current value of a gpio pin.
'''
self._send_packet(struct.pack('=BB', protocol.command['READPIN'], pin))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['PIN']
return bool(s)
# }}}
def load(self, profile = None, update = True): # {{{
'''Load a profile.
'''
filenames = fhs.read_data(os.path.join(self.uuid, 'profiles', ((profile and profile.strip()) or self.profile) + os.extsep + 'ini'), opened = False, multiple = True)
if profile and self.profile != profile.strip():
#log('setting profile to %s' % profile.strip())
self.profile = profile.strip()
if update:
self._globals_update()
if len(filenames) > 0:
with open(filenames[0]) as f:
log('loading profile {}'.format(filenames[0]))
self.expert_import_settings(f.read(), update = update)
else:
log('not loading nonexistent profile')
# }}}
def admin_save(self, profile = None): # {{{
'''Save a profile.
If the profile name is not given, it saves the current profile.
'''
if profile and self.profile != profile.strip():
log('setting profile to %s' % profile.strip())
self.profile = profile.strip()
self._globals_update()
with fhs.write_data(os.path.join(self.uuid, 'profiles', (profile.strip() or self.profile) + os.extsep + 'ini')) as f:
f.write(self.export_settings())
# }}}
def list_profiles(self): # {{{
'''Get a list of all available profiles.
'''
dirnames = fhs.read_data(os.path.join(self.uuid, 'profiles'), dir = True, multiple = True, opened = False)
ret = []
for d in dirnames:
for f in os.listdir(d):
name = os.path.splitext(f)[0].strip()
if name not in ret:
ret.append(name)
ret.sort()
return ret
# }}}
def admin_remove_profile(self, profile): # {{{
'''Remove a profile.
'''
filename = fhs.write_data(os.path.join(self.uuid, 'profiles', (profile.strip() or self.profile) + os.extsep + 'ini'), opened = False)
if os.path.exists(filename):
os.unlink(filename)
return True
return False
# }}}
def admin_set_default_profile(self, profile): # {{{
'''Set a profile as default.
'''
self.default_profile = profile
with fhs.write_data(os.path.join(self.uuid, 'info' + os.extsep + 'txt')) as f:
f.write(self.name + '\n')
f.write(profile + '\n')
# }}}
def abort(self): # {{{
'''Abort the current job.
'''
for t, temp in enumerate(self.temps):
self.settemp(t, float('nan'))
self.pause(store = False)
for g, gpio in enumerate(self.gpios):
self.set_gpio(g, state = gpio.reset)
self._job_done(False, 'aborted by user')
# Sleep doesn't work as long as home_phase is non-None, so do it after _job_done.
self.sleep(force = True);
# }}}
def pause(self, pausing = True, store = True, update = True): # {{{
'''Pause or resume the machine.
'''
was_paused = self.paused
if pausing:
self._send_packet(struct.pack('=BB', protocol.command['QUEUED'], True))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['QUEUE']:
log('invalid reply to queued command')
return
self.movewait = 0
self.wait = False
self.paused = pausing
if not self.paused:
if was_paused:
# Go back to pausing position.
# First reset all axes that don't have a limit switch.
if self.queue_info is not None:
self._reset_extruders(self.queue_info[1])
self.line(self.queue_info[1])
# TODO: adjust extrusion of current segment to shorter path length.
#log('resuming')
self.resuming = True
#log('sending resume')
self._send_packet(bytes((protocol.command['RESUME'],)))
self._do_queue()
else:
#log('pausing')
if not was_paused:
#log('pausing %d %d %d %d %d' % (store, self.queue_info is None, len(self.queue), self.queue_pos, s))
if store and self.queue_info is None and ((len(self.queue) > 0 and self.queue_pos - s >= 0) or self.gcode_file):
if self.home_phase is not None:
#log('killing homer')
self.home_phase = None
self.expert_set_space(0, type = self.home_orig_type)
for a, ax in enumerate(self.spaces[0].axis):
self.expert_set_axis((0, a), min = self.home_limits[a][0], max = self.home_limits[a][1])
if self.home_cb in self.movecb:
self.movecb.remove(self.home_cb)
if self.home_id is not None:
self._send(self.home_id, 'return', None)
store = False
if self.probe_cb in self.movecb:
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
store = False
#log('pausing gcode %d/%d/%d' % (self.queue_pos, s, len(self.queue)))
if self.flushing is None:
self.flushing = False
if store:
self.queue_info = [len(self.queue) if self.gcode_file else self.queue_pos - s, [[s.get_current_pos(a) for a in range(len(s.axis))] for s in self.spaces], self.queue, self.movecb, self.flushing]
else:
#log('stopping')
self.paused = False
if self.probe_cb in self.movecb:
self.movecb.remove(self.probe_cb)
self.probe_cb[1](None)
if len(self.movecb) > 0:
call_queue.extend([(x[1], [False]) for x in self.movecb])
self.queue = []
self.movecb = []
self.flushing = False
self.queue_pos = 0
if update:
self._globals_update()
# }}}
def queued(self): # {{{
'''Get the number of currently queued segments.
'''
self._send_packet(struct.pack('=BB', protocol.command['QUEUED'], False))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['QUEUE']:
log('invalid reply to queued command')
return None
return s
# }}}
@delayed
def home(self, id, speed = 5, cb = None, abort = True): # {{{
'''Recalibrate the position with its limit switches.
'''
if self.home_phase is not None and not self.paused:
log("ignoring request to home because we're already homing")
if id is not None:
self._send(id, 'return', None)
return
# Abort only if it is requested, and the job is not paused.
if abort and self.queue_info is None:
self._job_done(False, 'aborted by homing')
self.home_phase = 0
self.home_id = id
self.home_return = None
self.home_speed = speed
self.home_done_cb = cb
for i, e in enumerate(self.spaces[1].axis):
self.set_axis_pos(1, i, 0)
self._do_home()
# }}}
@delayed
def park(self, id, cb = None, abort = True, order = 0, aborted = False): # {{{
'''Go to the park position.
Home first if the position is unknown.
'''
if aborted:
if id is not None:
self._send(id, 'error', 'aborted')
return
#log('parking with cb %s' % repr(cb))
if abort and self.queue_info is None:
self._job_done(False, 'aborted by parking')
self.parking = True
if not self.position_valid:
#log('homing')
self.home(cb = lambda: self.park(cb, abort = False)[1](id), abort = False)[1](None)
return
next_order = None
for s in self.spaces:
topark = [a['park_order'] for a in s.axis if not math.isnan(a['park']) and a['park_order'] >= order]
if len(topark) > 0 and (next_order is None or min(topark) > next_order):
next_order = min(topark)
if next_order is None:
#log('done parking; cb = %s' % repr(cb))
self.parking = False
if cb:
def wrap_cb(done):
call_queue.append((cb, []))
if id is not None:
self._send(id, 'return', None)
self.movecb.append((False, wrap_cb))
self.line()
else:
if id is not None:
self._send(id, 'return', None)
return
self.movecb.append((False, lambda done: self.park(cb, False, next_order + 1, not done)[1](id)))
self.line([[a['park'] - (0 if si != 0 or ai != 2 else self.zoffset) if a['park_order'] == next_order else float('nan') for ai, a in enumerate(s.axis)] for si, s in enumerate(self.spaces)])
# }}}
@delayed
def benjamin_audio_play(self, id, name, motor = 2): # {{{
self.audio_id = id
self.sleep(False)
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
self._send_packet(struct.pack('=BBddBB', protocol.command['RUN_FILE'], 1, 0, 0, motor, 0) + filename.encode('utf8'))
# }}}
def benjamin_audio_add_POST(self, filename, name): # {{{
with open(filename, 'rb') as f:
self._audio_add(f, name)
# }}}
def benjamin_audio_del(self, name): # {{{
assert name in self.audioqueue
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
os.unlink(filename)
del self.audioqueue[name]
self._broadcast(None, 'audioqueue', tuple(self.audioqueue.keys()))
# }}}
def audio_list(self): # {{{
return self.audioqueue
# }}}
@delayed
def wait_for_cb(self, id): # {{{
'''Block until the move queue is empty.
'''
ret = lambda w: id is None or self._send(id, 'return', w)
if self.movewait == 0:
#log('not delaying with wait_for_cb, because there is no cb waiting')
ret(self.movewait == 0)
else:
#log('waiting for cb')
self.movecb.append((True, ret))
# }}}
def waiting_for_cb(self): # {{{
'''Check if any process is waiting for the move queue to be empty.
'''
return self.movewait > 0
# }}}
@delayed
def wait_for_temp(self, id, which = None): # {{{
'''Wait for a temp to trigger its alarm.
'''
def cb():
if id is not None:
self._send(id, 'return', None)
return
self.gcode_waiting -= 1
if(which is None and len(self.alarms) > 0) or which in self.alarms:
cb()
else:
self.tempcb.append((which, cb))
# }}}
def clear_alarm(self, which = None): # {{{
'''Disable a temp alarm.
If which is None, disable all temp alarms.
'''
if which is None:
self.alarms.clear()
else:
self.alarms.discard(which)
# }}}
def get_limits(self, space, motor = None): # {{{
'''Return all limits that were hit since they were cleared.
'''
if motor is None:
return self.limits[space]
if motor in self.limits[space]:
return self.limits[space][motor]
return None
# }}}
def clear_limits(self): # {{{
'''Clear all recorded limits.
'''
for s in range(len(self.spaces)):
self.limits[s].clear()
# }}}
def valid(self): # {{{
'''Return whether the position of the motors is known.
'''
return self.position_valid
# }}}
def export_settings(self): # {{{
'''Export the current settings.
The resulting string can be imported back.
'''
message = '[general]\r\n'
for t in ('temps', 'gpios'):
message += 'num_%s = %d\r\n' % (t, len(getattr(self, t)))
message += 'pin_names = %s\r\n' % ','.join(('%d' % p[0]) + p[1] for p in self.pin_names)
message += 'unit_name = %s\r\n' % self.unit_name
message += 'spi_setup = %s\r\n' % self._mangle_spi()
message += ''.join(['%s = %s\r\n' % (x, write_pin(getattr(self, x))) for x in ('led_pin', 'stop_pin', 'probe_pin', 'spiss_pin')])
message += ''.join(['%s = %d\r\n' % (x, getattr(self, x)) for x in ('bed_id', 'fan_id', 'spindle_id', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'timeout')])
message += ''.join(['%s = %f\r\n' % (x, getattr(self, x)) for x in ('probe_dist', 'probe_offset', 'probe_safe_dist', 'temp_scale_min', 'temp_scale_max', 'max_deviation', 'max_v')])
message += 'user_interface = %s\r\n' % self.user_interface
for i, s in enumerate(self.spaces):
message += s.export_settings()
for i, t in enumerate(self.temps):
message += t.export_settings()
for i, g in enumerate(self.gpios):
message += g.export_settings()
return message
# }}}
def expert_import_settings(self, settings, filename = None, update = True): # {{{
'''Import new settings.
settings is a string as created by export_settings.
The filename is ignored.
'''
self._broadcast(None, 'blocked', 'importing settings')
self.sleep(update = update)
section = 'general'
index = None
obj = None
regexp = re.compile('\s*\[(general|(space|temp|gpio|(extruder|axis|motor|delta|follower)\s+(\d+))\s+(\d+))\]\s*$|\s*(\w+)\s*=\s*(.*?)\s*$|\s*(?:#.*)?$')
#1: (general|(space|temp|gpio|(axis|motor|delta)\s+(\d+))\s+(\d+)) 1 section
#2: (space|temp|gpio|(extruder|axis|motor|delta)\s+(\d+)) 2 section with index
#3: (extruder|axis|motor|delta) 3 sectionname with two indices
#4: (\d+) 4 index of space
#5: (\d+) 5 only or component index
#6: (\w+) 6 identifier
#7: (.*?) 7 value
errors = []
globals_changed = True
changed = {'space': set(), 'temp': set(), 'gpio': set(), 'axis': set(), 'motor': set(), 'extruder': set(), 'delta': set(), 'follower': set()}
keys = {
'general': {'num_temps', 'num_gpios', 'user_interface', 'pin_names', 'led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'probe_dist', 'probe_offset', 'probe_safe_dist', 'bed_id', 'fan_id', 'spindle_id', 'unit_name', 'timeout', 'temp_scale_min', 'temp_scale_max', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'spi_setup', 'max_deviation', 'max_v'},
'space': {'type', 'num_axes', 'delta_angle', 'polar_max_r'},
'temp': {'name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'},
'gpio': {'name', 'pin', 'state', 'reset', 'duty'},
'axis': {'name', 'park', 'park_order', 'min', 'max', 'home_pos2'},
'motor': {'step_pin', 'dir_pin', 'enable_pin', 'limit_min_pin', 'limit_max_pin', 'steps_per_unit', 'home_pos', 'limit_v', 'limit_a', 'home_order'},
'extruder': {'dx', 'dy', 'dz'},
'delta': {'axis_min', 'axis_max', 'rodlength', 'radius'},
'follower': {'space', 'motor'}
}
for l in settings.split('\n'):
r = regexp.match(l)
if not r:
errors.append((l, 'syntax error'))
continue
if r.group(1) is not None:
# New section.
if r.group(2) is not None:
# At least one index.
#log("At least one index")
if r.group(3) is not None:
# Two indices: axis, motor, extruder, delta, follower.
#log("Two indices")
index = (int(r.group(4)), int(r.group(5)))
section = r.group(3)
if index[0] >= len(self.spaces) or index[1] >= len(getattr(self.spaces[index[0]], section)):
log('index out of range for %s; %s %s' % (index, len(self.spaces), len(getattr(self.spaces[index[0]], section)) if index[0] < len(self.spaces) else 'x'))
errors.append((l, 'index out of range'))
obj = None
continue
obj = getattr(self.spaces[index[0]], section)[index[1]]
else:
#log("One index")
# One index: space, temp, gpio.
index = int(r.group(5))
section = r.group(2)
if index >= len(getattr(self, section + 's')):
errors.append((l, 'index out of range'))
obj = None
continue
obj = getattr(self, section + 's')[index]
changed[section].add(index)
else:
#log("No index")
# No indices: general.
section = r.group(1)
index = None
obj = self
globals_changed = True
continue
elif obj is None:
# Ignore settings for incorrect section.
continue
if not r.group(6):
# Comment or empty line.
continue
key = r.group(6)
value = r.group(7)
try:
if key == 'pin_names':
if len(self.pin_names) > 0:
# Don't override hardware-provided names.
continue
value = [[int(x[0]), x[1:]] for x in value.split(',')]
elif 'name' in key or key == 'user_interface':
pass # Keep strings as they are.
elif key == 'spi_setup':
value = self._unmangle_spi(value)
elif key.endswith('pin'):
value = read_pin(self, value)
#log('pin imported as {} for {}'.format(value, key))
elif key.startswith('num') or section == 'follower' or key.endswith('_id'):
value = int(value)
else:
value = float(value)
except ValueError:
errors.append((l, 'invalid value for %s' % key))
continue
if key not in keys[section] or (section == 'motor' and ((key in ('home_pos', 'home_order') and index[0] == 1) or (key in ('steps_per_unit', 'limit_v', 'limit_a') and index[0] == 2))):
errors.append((l, 'invalid key for section %s' % section))
continue
# If something critical is changed, update instantly.
if key.startswith('num') or key == 'type':
#log('setting now for %s:%s=%s' % (section, key, value))
if index is None:
self.expert_set_globals(**{key: value})
else:
if section == 'space':
for i in changed['motor']:
if i[0] == index:
self.expert_set_motor(i, readback = False)
for i in changed['axis']:
if i[0] == index:
self.expert_set_axis(i, readback = False)
for i in changed['delta']:
if i[0] == index:
self.expert_set_axis(i, readback = False)
getattr(self, 'expert_set_' + section)(index, **{key: value})
else:
if isinstance(index, tuple):
#log('setting later %s' % repr((section, key, value)))
obj[key] = value
else:
#log('setting later other %s' % repr((section, key, value)))
if section == 'extruder':
obj[ord[key[1]] - ord['x']] = value
else:
setattr(obj, key, value)
# Update values in the machine by calling the expert_set_* functions with no new settings.
if globals_changed:
#log('setting globals')
self.expert_set_globals()
for index in changed['extruder']:
changed['space'].add(index[0])
for index in changed['follower']:
changed['space'].add(index[0])
for index in changed['delta']:
changed['space'].add(index[0])
for section in changed:
for index in changed[section]:
if not isinstance(index, tuple):
continue
if section not in ('follower', 'delta', 'extruder'):
#log('setting non-{delta,follower} %s %s' % (section, index))
getattr(self, 'expert_set_' + section)(index, readback = False)
changed['space'].add(index[0])
for section in changed:
for index in changed[section]:
if isinstance(index, tuple):
continue
#log('setting %s' % repr((section, index)))
getattr(self, 'expert_set_' + section)(index)
self._broadcast(None, 'blocked', None)
return errors
# }}}
def expert_import_POST(self, filename, name): # {{{
'''Import settings using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
return ', '.join('%s (%s)' % (msg, ln) for ln, msg in self.expert_import_settings(open(filename).read(), name))
# }}}
@delayed
def gcode_run(self, id, code, paused = False): # {{{
'''Run a string of g-code.
'''
with fhs.write_temp(text = False) as f:
f.write(code)
f.seek(0)
self.gcode_id = id
# Break this in two, otherwise tail recursion may destroy f before call is done?
ret = self._gcode_run(f.filename, paused = paused)
return ret
# }}}
@delayed
def request_confirmation(self, id, message): # {{{
'''Wait for confirmation.
The return value is True if confirmation is given, False if
not.
'''
# Abort pending confirmation, if any.
if self.confirmer not in (False, None):
self._send(self.confirmer, 'return', False)
self.confirmer = id
self.confirm_id += 1
self.confirm_axes = [[s.get_current_pos(a) for a in range(len(s.axis))] for s in self.spaces]
self.confirm_message = message
self._broadcast(None, 'confirm', self.confirm_id, self.confirm_message)
for c in self.confirm_waits:
self._send(c, 'return', (self.confirm_id, self.confirm_message))
self.confirm_waits.clear()
# }}}
def get_confirm_id(self): # {{{
'''Return id of current confirmation request, if any.
'''
return self.confirm_id, self.confirm_message
# }}}
@delayed
def wait_confirm(self, id, pending = True): # {{{
'''Block until confirmation is requested.
If pending is False, ignore the current request, if any.
'''
if pending and self.confirmer is not None:
self._send(id, 'return', (self.confirm_id, self.confirm_message))
return
self.confirm_waits.add(id)
# }}}
def confirm(self, confirm_id, success = True): # {{{
'''Respond to a confirmation request.
If confirm_id is not None, it must be equal to the current id
or the confirmation is ignored.
Success is passed to the requester. If it is requested by
g-code, passing False will abort the job.
'''
if confirm_id not in (self.confirm_id, None) or self.confirm_axes is None:
# Confirmation was already sent, or never reguested.
#log('no confirm %s' % repr((confirm_id, self.confirm_id)))
return False
id = self.confirmer
self.confirmer = None
self.confirm_message = None
self._broadcast(None, 'confirm', None)
self._reset_extruders(self.confirm_axes)
self.confirm_axes = None
if id not in (False, None):
self._send(id, 'return', success)
else:
if self.probing:
call_queue.append((self.probe_cb[1], [False if success else None]))
else:
if not success:
self.probe_pending = False
self._job_done(False, 'aborted by failed confirmation')
else:
if self.probe_pending and self._pin_valid(self.probe_pin):
self.probe_pending = False
call_queue.append((self._one_probe, []))
else:
self.probe_pending = False
self._send_packet(bytes((protocol.command['RESUME'],)))
return True
# }}}
def queue_add(self, data, name): # {{{
'''Add code to the queue as a string.
'''
with fhs.write_temp() as f:
f.write(data)
f.seek(0)
return self._queue_add(f.filename, name)
# }}}
def queue_add_POST(self, filename, name): # {{{
'''Add g-code to queue using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
return self._queue_add(filename, name)
# }}}
def probe_add_POST(self, filename, name): # {{{
'''Set probe map using a POST request.
Note that this function can only be called using POST; not with the regular websockets system.
'''
with open(filename) as f:
self.probemap = json.loads(f.read().strip())
return '' if self._check_probemap() else 'Invalid probemap'
# }}}
def queue_remove(self, name, audio = False): # {{{
'''Remove an entry from the queue.
'''
assert name in self.jobqueue
#log('removing %s' % name)
if audio:
filename = fhs.read_spool(os.path.join(self.uuid, 'audio', name + os.extsep + 'bin'), opened = False)
del self.audioqueue[name]
self._broadcast(None, 'audioqueue', tuple(self.audioqueue.keys()))
else:
filename = fhs.read_spool(os.path.join(self.uuid, 'gcode', name + os.extsep + 'bin'), opened = False)
del self.jobqueue[name]
self._broadcast(None, 'queue', [(q, self.jobqueue[q]) for q in self.jobqueue])
try:
os.unlink(filename)
except:
log('unable to unlink %s' % filename)
# }}}
@delayed
def queue_run(self, id, name, paused = False): # {{{
'''Start a new job.
'''
if self.probing:
log('ignoring run request while probe is in progress')
if id is not None:
self._send(id, 'return', None)
return
if self.job_current is not None and not self.paused:
log('ignoring run request while job is in progress: %s ' % repr(self.job_current) + str(self.paused))
if id is not None:
self._send(id, 'return', None)
return
#log('set active jobs to %s' % names)
self.job_current = name
self.job_id = id
self._start_job(paused)
# }}}
def get_machine_state(self): # {{{
'''Return current machine state.
Return value is a tuple of a human readable string describing
the state, NaN or the elapsed time, NaN or the total time for
the current job.
Note that the times are computed from the requested speeds.
These are generally too low, because they don't account for
acceleration and velocity limits.
'''
pos = self.tp_get_position()
context = self.tp_get_context(position = pos[0])
if self.paused:
state = 'Paused'
elif self.gcode_map is not None or self.gcode_file:
state = 'Running'
else:
return 'Idle', float('nan'), float('nan'), pos[0], pos[1], context
self._send_packet(struct.pack('=B', protocol.command['GETTIME']))
cmd, s, m, f, e, data = self._get_reply()
if cmd != protocol.rcommand['TIME']:
log('invalid reply to gettime command')
return 'Error', float('nan'), float('nan'), pos[0], pos[1], context
return state, f, (self.total_time[0] + (0 if len(self.spaces) < 1 else self.total_time[1] / self.max_v)) / self.feedrate, pos[0], pos[1], context
# }}}
def send_machine(self, target): # {{{
'''Return all settings about a machine.
'''
self.initialized = True
self._broadcast(target, 'new_machine', self.uuid, [self.queue_length])
self._globals_update(target)
for i, s in enumerate(self.spaces):
self._space_update(i, target)
for i, t in enumerate(self.temps):
self._temp_update(i, target)
for i, g in enumerate(self.gpios):
self._gpio_update(i, target)
self._broadcast(target, 'queue', [(q, self.jobqueue[q]) for q in self.jobqueue])
self._broadcast(target, 'audioqueue', tuple(self.audioqueue.keys()))
if self.confirmer is not None:
self._broadcast(target, 'confirm', self.confirm_id, self.confirm_message)
# }}}
def admin_disconnect(self, reason = None): # {{{
self._send_packet(struct.pack('=B', protocol.command['FORCE_DISCONNECT']))
self._close(False)
# }}}
# Commands for handling the toolpath.
def tp_get_position(self): # {{{
'''Get current toolpath position.
@return position, total toolpath length.'''
if self.gcode_map is None:
return 0, 0
self._send_packet(struct.pack('=B', protocol.command['TP_GETPOS']))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TP_POS']
return f, self.gcode_num_records
# }}}
def tp_set_position(self, position): # {{{
'''Set current toolpath position.
It is an error to call this function while not paused.
@param position: new toolpath position.
@return None.'''
assert self.gcode_map is not None
assert 0 <= position < self.gcode_num_records
assert self.paused
if self.queue_info is not None:
self.queue_info[1] = [] # Don't restore extruder position on resume.
self._send_packet(struct.pack('=Bd', protocol.command['TP_SETPOS'], position))
# }}}
def tp_get_context(self, num = None, position = None): # {{{
'''Get context around a position.
@param num: number of lines context on each side.
@param position: center of the returned region, or None for current position.
@return first position of returned region (normally position - num), list of lines+arcs+specials'''
if self.gcode_map is None:
return 0, []
if num is None:
num = 100; # TODO: make configurable.
if position is None:
position = self.tp_get_position()[0]
position = int(position)
def parse_record(num):
s = struct.calcsize(record_format)
type, tool, X, Y, Z, E, f, F, time, dist = struct.unpack(record_format, self.gcode_map[num * s:(num + 1) * s])
return tuple(protocol.parsed.keys())[tuple(protocol.parsed.values()).index(type)], tool, X, Y, Z, E, f, F, time, dist
return max(0, position - num), [parse_record(x) for x in range(position - num, position + num + 1) if 0 <= x < self.gcode_num_records]
# }}}
def tp_get_string(self, num): # {{{
'''Get string from toolpath.
@param num: index of the string.
@return the string.'''
return self.gcode_strings[num]
# }}}
def tp_find_position(self, x = None, y = None, z = None): # {{{
'''Find toolpath position closest to coordinate.
Inputs may be None, in that case that coordinate is ignored.
@param x: X coordinate of target or None.
@param y: Y coordinate of target or None.
@param z: Z coordinate of target or None.
@return toolpath position.'''
assert self.gcode_map is not None
self._send_packet(struct.pack('=Bddd', protocol.command['TP_FINDPOS'], *(a if a is not None else float('nan') for a in (x, y, z))))
cmd, s, m, f, e, data = self._get_reply()
assert cmd == protocol.rcommand['TP_POS']
return f
# }}}
# }}}
# Accessor functions. {{{
# Globals. {{{
def get_globals(self): # {{{
#log('getting globals')
ret = {'num_temps': len(self.temps), 'num_gpios': len(self.gpios)}
for key in ('name', 'user_interface', 'pin_names', 'uuid', 'queue_length', 'num_pins', 'led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'probe_dist', 'probe_offset', 'probe_safe_dist', 'bed_id', 'fan_id', 'spindle_id', 'unit_name', 'timeout', 'feedrate', 'targetx', 'targety', 'targetangle', 'zoffset', 'store_adc', 'temp_scale_min', 'temp_scale_max', 'probemap', 'paused', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'spi_setup', 'max_deviation', 'max_v'):
ret[key] = getattr(self, key)
return ret
# }}}
def expert_set_globals(self, update = True, **ka): # {{{
#log('setting variables with %s' % repr(ka))
nt = ka.pop('num_temps') if 'num_temps' in ka else None
ng = ka.pop('num_gpios') if 'num_gpios' in ka else None
if 'store_adc' in ka:
self.store_adc = bool(ka.pop('store_adc'))
if 'name' in ka:
name = ka.pop('name')
if name != self.name:
self.name = name
self.admin_set_default_profile(self.default_profile)
if 'probemap' in ka:
self.probemap = ka.pop('probemap')
self._check_probemap()
for key in ('unit_name', 'user_interface', 'pin_names'):
if key in ka:
setattr(self, key, ka.pop(key))
if 'spi_setup' in ka:
self.spi_setup = self._unmangle_spi(ka.pop('spi_setup'))
if self.spi_setup:
self._spi_send(self.spi_setup)
for key in ('led_pin', 'stop_pin', 'probe_pin', 'spiss_pin', 'bed_id', 'fan_id', 'spindle_id', 'park_after_job', 'sleep_after_job', 'cool_after_job', 'timeout'):
if key in ka:
setattr(self, key, int(ka.pop(key)))
for key in ('probe_dist', 'probe_offset', 'probe_safe_dist', 'feedrate', 'targetx', 'targety', 'targetangle', 'zoffset', 'temp_scale_min', 'temp_scale_max', 'max_deviation', 'max_v'):
if key in ka:
setattr(self, key, float(ka.pop(key)))
self._write_globals(nt, ng, update = update)
assert len(ka) == 0
# }}}
def set_globals(self, update = True, **ka): # {{{
real_ka = {}
for key in ('feedrate', 'targetx', 'targety', 'targetangle', 'zoffset'):
if key in ka:
real_ka[key] = ka.pop(key)
assert len(ka) == 0
return self.expert_set_globals(update = update, **real_ka)
# }}}
# }}}
# Space {{{
def get_axis_pos(self, space, axis = None): # {{{
if space >= len(self.spaces) or (axis is not None and axis >= len(self.spaces[space].axis)):
log('request for invalid axis position %d %d' % (space, axis))
return float('nan')
if axis is None:
return [self.spaces[space].get_current_pos(a) for a in range(len(self.spaces[space].axis))]
else:
return self.spaces[space].get_current_pos(axis)
# }}}
def set_axis_pos(self, space, axis, pos): # {{{
if space >= len(self.spaces) or (axis is not None and axis >= len(self.spaces[space].axis)):
log('request to set invalid axis position %d %d' % (space, axis))
return False
return self.spaces[space].set_current_pos(axis, pos)
# }}}
def get_space(self, space): # {{{
ret = {'name': self.spaces[space].name, 'num_axes': len(self.spaces[space].axis), 'num_motors': len(self.spaces[space].motor)}
if self.spaces[space].type == TYPE_CARTESIAN:
pass
elif self.spaces[space].type == TYPE_DELTA:
delta = []
for i in range(3):
d = {}
for key in ('axis_min', 'axis_max', 'rodlength', 'radius'):
d[key] = self.spaces[space].delta[i][key]
delta.append(d)
delta.append(self.spaces[space].delta_angle)
ret['delta'] = delta
elif self.spaces[space].type == TYPE_POLAR:
ret['polar_max_r'] = self.spaces[space].polar_max_r
elif self.spaces[space].type == TYPE_EXTRUDER:
ret['extruder'] = []
for a in range(len(self.spaces[space].axis)):
ret['extruder'].append({})
for key in ('dx', 'dy', 'dz'):
ret['extruder'][-1][key] = self.spaces[space].extruder[a][key]
elif self.spaces[space].type == TYPE_FOLLOWER:
ret['follower'] = []
for a in range(len(self.spaces[space].axis)):
ret['follower'].append({})
for key in ('space', 'motor'):
ret['follower'][-1][key] = self.spaces[space].follower[a][key]
else:
log('invalid type')
return ret
# }}}
def get_axis(self, space, axis): # {{{
ret = {'name': self.spaces[space].axis[axis]['name']}
if space == 1:
ret['multiplier'] = self.multipliers[axis]
if space == 0:
for key in ('park', 'park_order', 'min', 'max', 'home_pos2'):
ret[key] = self.spaces[space].axis[axis][key]
return ret
# }}}
def get_motor(self, space, motor): # {{{
ret = {'name': self.spaces[space].motor_name(motor)}
for key in ('step_pin', 'dir_pin', 'enable_pin'):
ret[key] = self.spaces[space].motor[motor][key]
if space != 1:
for key in ('limit_min_pin', 'limit_max_pin', 'home_pos', 'home_order'):
ret[key] = self.spaces[space].motor[motor][key]
if space != 2:
for key in ('steps_per_unit', 'limit_v', 'limit_a'):
ret[key] = self.spaces[space].motor[motor][key]
return ret
# }}}
def expert_set_space(self, space, readback = True, update = True, **ka): # {{{
old_type = self.spaces[space].type
if space == 0 and 'type' in ka:
self.spaces[space].type = int(ka.pop('type'))
current_pos = None if self.spaces[space].type != old_type else self.get_axis_pos(space)
if self.spaces[space].type == TYPE_EXTRUDER:
if 'extruder' in ka:
e = ka.pop('extruder')
for ei, ee in e.items():
i = int(ei)
for key in ('dx', 'dy', 'dz'):
if key in ee:
self.spaces[space].extruder[i][key] = ee.pop(key)
assert len(ee) == 0
if self.spaces[space].type == TYPE_FOLLOWER:
if 'follower' in ka:
f = ka.pop('follower')
for fi, ff in f.items():
fi = int(fi)
for key in ('space', 'motor'):
if key in ff:
self.spaces[space].follower[fi][key] = int(ff.pop(key))
assert len(ff) == 0
if self.spaces[space].type in (TYPE_CARTESIAN, TYPE_EXTRUDER, TYPE_FOLLOWER):
if 'num_axes' in ka:
num_axes = int(ka.pop('num_axes'))
else:
num_axes = len(self.spaces[space].axis)
num_motors = num_axes
elif self.spaces[space].type == TYPE_DELTA:
num_axes = 3;
num_motors = 3;
if 'delta' in ka:
d = ka.pop('delta')
for di, dd in d.items():
i = int(di)
assert 0 <= i < 3
for key in ('axis_min', 'axis_max', 'rodlength', 'radius'):
if key in dd:
self.spaces[space].delta[i][key] = dd.pop(key)
assert len(dd) == 0
if 'delta_angle' in ka:
self.spaces[space].delta_angle = ka.pop('delta_angle')
elif self.spaces[space].type == TYPE_POLAR:
num_axes = 3;
num_motors = 3;
if 'polar_max_r' in ka:
self.spaces[space].polar_max_r = ka.pop('polar_max_r')
self._send_packet(struct.pack('=BB', protocol.command['WRITE_SPACE_INFO'], space) + self.spaces[space].write_info(num_axes))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
if len(ka) != 0:
log('invalid input ignored: %s' % repr(ka))
if current_pos is not None and not all(math.isnan(x) for x in current_pos) and (self.paused or (self.home_phase is None and not self.gcode_file and self.gcode_map is None)):
self.line({space: current_pos})
#else:
# log(repr(('not going to pos:', current_pos, self.paused, self.home_phase, self.gcode_file, self.gcode_map)))
# }}}
def expert_set_axis(self, spaceaxis, readback = True, update = True, **ka): # {{{
space, axis = spaceaxis
if 'name' in ka:
self.spaces[space].axis[axis]['name'] = ka.pop('name')
if space == 0:
for key in ('park', 'park_order', 'min', 'max', 'home_pos2'):
if key in ka:
self.spaces[space].axis[axis][key] = ka.pop(key)
if space == 1 and 'multiplier' in ka and axis < len(self.spaces[space].motor):
assert(ka['multiplier'] > 0)
self.multipliers[axis] = ka.pop('multiplier')
self.expert_set_motor((space, axis), readback, update)
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_AXIS'], space, axis) + self.spaces[space].write_axis(axis))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
assert len(ka) == 0
# }}}
def expert_set_motor(self, spacemotor, readback = True, update = True, **ka): # {{{
space, motor = spacemotor
current_pos = self.get_axis_pos(space)
for key in ('step_pin', 'dir_pin', 'enable_pin'):
if key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
for key in ('home_pos', 'limit_min_pin', 'limit_max_pin'):
if space != 1 and key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
if space != 1 and 'home_order' in ka:
self.spaces[space].motor[motor]['home_order'] = ka.pop('home_order')
for key in ('steps_per_unit', 'limit_v', 'limit_a'):
if space != 2 and key in ka:
self.spaces[space].motor[motor][key] = ka.pop(key)
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], space, motor) + self.spaces[space].write_motor(motor))
followers = False
for m, mt in enumerate(self.spaces[2].motor):
fs = self.spaces[2].follower[m]['space']
fm = self.spaces[2].follower[m]['motor']
if fs == space and fm == motor:
followers = True
self._send_packet(struct.pack('=BBB', protocol.command['WRITE_SPACE_MOTOR'], 2, m) + self.spaces[2].write_motor(m))
if readback:
self.spaces[space].read(self._read('SPACE', space))
if update:
self._space_update(space)
if followers:
self.spaces[2].read(self._read('SPACE', 2))
if update:
self._space_update(2)
assert len(ka) == 0
if not all(math.isnan(x) for x in current_pos) and (self.paused or (self.home_phase is None and not self.gcode_file and self.gcode_map is None)):
self.line({space: current_pos})
# }}}
# }}}
# Temp {{{
def get_temp(self, temp): # {{{
ret = {}
for key in ('name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'):
ret[key] = getattr(self.temps[temp], key)
return ret
# }}}
def expert_set_temp(self, temp, update = True, **ka): # {{{
ret = {}
for key in ('name', 'R0', 'R1', 'Rc', 'Tc', 'beta', 'heater_pin', 'fan_pin', 'thermistor_pin', 'fan_temp', 'fan_duty', 'heater_limit_l', 'heater_limit_h', 'fan_limit_l', 'fan_limit_h', 'hold_time'):
if key in ka:
setattr(self.temps[temp], key, ka.pop(key))
self._send_packet(struct.pack('=BB', protocol.command['WRITE_TEMP'], temp) + self.temps[temp].write())
self.temps[temp].read(self._read('TEMP', temp))
if update:
self._temp_update(temp)
if len(ka) != 0:
log('problem: %s' % repr(ka))
assert len(ka) == 0
# }}}
def set_temp(self, temp, update = True, **ka): # {{{
real_ka = {}
if 'fan_duty' in ka:
real_ka['fan_duty'] = ka.pop('fan_duty')
assert len(ka) == 0
return self.expert_set_temp(temp, update = update, **real_ka)
# }}}
# }}}
# Gpio {{{
@delayed
def wait_gpio(self, id, gpio, value = 1): # {{{
assert gpio < len(self.gpios)
if int(value) == int(self.gpios[gpio].value):
self._send(id, 'return', None)
return
if gpio not in self.gpio_waits:
self.gpio_waits[gpio] = []
self.gpio_waits[gpio].append(id)
# }}}
def get_gpio(self, gpio): # {{{
ret = {}
for key in ('name', 'pin', 'state', 'reset', 'duty', 'value'):
ret[key] = getattr(self.gpios[gpio], key)
return ret
# }}}
def expert_set_gpio(self, gpio, update = True, **ka): # {{{
for key in ('name', 'pin', 'state', 'reset', 'duty'):
if key in ka:
setattr(self.gpios[gpio], key, ka.pop(key))
self.gpios[gpio].state = int(self.gpios[gpio].state)
self.gpios[gpio].reset = int(self.gpios[gpio].reset)
if self.gpios[gpio].reset >= 2 or (self.gpios[gpio].reset < 2 and self.gpios[gpio].state >= 2):
self.gpios[gpio].state = self.gpios[gpio].reset
#log('gpio %d reset %d' % (gpio, self.gpios[gpio].reset))
self._send_packet(struct.pack('=BB', protocol.command['WRITE_GPIO'], gpio) + self.gpios[gpio].write())
self.gpios[gpio].read(self._read('GPIO', gpio))
if update:
self._gpio_update(gpio)
assert len(ka) == 0
# }}}
def set_gpio(self, gpio, update = True, **ka): # {{{
real_ka = {}
if 'state' in ka:
real_ka['state'] = ka.pop('state')
assert len(ka) == 0
return self.expert_set_gpio(gpio, update = update, **real_ka)
# }}}
# }}}
# }}}
# }}}
call_queue = []
machine = Machine(config['allow-system'])
if machine.machine is None:
sys.exit(0)
while True: # {{{
while len(call_queue) > 0:
f, a = call_queue.pop(0)
#log('calling %s' % repr((f, a)))
f(*a)
while machine.machine.available():
machine._machine_input()
if len(call_queue) > 0:
continue # Handle this first.
fds = [sys.stdin, machine.machine]
#log('waiting; movewait = %d' % machine.movewait)
found = select.select(fds, [], fds, None)
if sys.stdin in found[0] or sys.stdin in found[2]:
#log('command')
machine._command_input()
if machine.machine in found[0] or machine.machine in found[2]:
#log('machine')
machine._machine_input()
# }}}
| agpl-3.0 | 8,010,818,517,369,724,000 | 37.630791 | 701 | 0.603842 | false | 2.758322 | false | false | false |
maas/maas | src/maasserver/forms/iprange.py | 1 | 1559 | # Copyright 2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""IPRange form."""
from django import forms
from django.contrib.auth.models import User
from maasserver.forms import MAASModelForm
from maasserver.models import Subnet
from maasserver.models.iprange import IPRange
class IPRangeForm(MAASModelForm):
"""IPRange creation/edition form."""
user = forms.ModelChoiceField(
required=False, queryset=User.objects, to_field_name="username"
)
class Meta:
model = IPRange
fields = ("subnet", "type", "start_ip", "end_ip", "user", "comment")
def __init__(
self, data=None, instance=None, request=None, *args, **kwargs
):
if data is None:
data = {}
else:
data = data.copy()
# If this is a new IPRange, fill in the 'user' and 'subnet' fields
# automatically, if necessary.
if instance is None:
start_ip = data.get("start_ip")
subnet = data.get("subnet")
if subnet is None and start_ip is not None:
subnet = Subnet.objects.get_best_subnet_for_ip(start_ip)
if subnet is not None:
data["subnet"] = subnet.id
if request is not None:
data["user"] = request.user.username
elif instance.user and "user" not in data:
data["user"] = instance.user.username
super().__init__(data=data, instance=instance, *args, **kwargs)
| agpl-3.0 | 2,087,308,162,693,567,500 | 32.891304 | 76 | 0.609365 | false | 3.977041 | false | false | false |
Wuguanping/Server_Manage_Plugin | Openstack_Plugin/ironic-plugin-pike/ironic/tests/unit/drivers/modules/ilo/test_boot.py | 4 | 52035 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for boot methods used by iLO modules."""
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import boot as ilo_boot
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
if six.PY3:
import io
file = io.BytesIO
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloBootCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloBootCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
def test_parse_driver_info(self):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
expected_driver_info = {'ilo_deploy_iso': 'deploy-iso'}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
self.assertEqual(expected_driver_info, actual_driver_info)
def test_parse_driver_info_exc(self):
self.assertRaises(exception.MissingParameterValue,
ilo_boot.parse_driver_info, self.node)
class IloBootPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloBootPrivateMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
def test__get_boot_iso_object_name(self):
boot_iso_actual = ilo_boot._get_boot_iso_object_name(self.node)
boot_iso_expected = "boot-%s" % self.node.uuid
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_http_url(self, service_mock):
url = 'http://abc.org/image/qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
service_mock.assert_called_once_with(mock.ANY, url)
self.assertEqual(url, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
def test__get_boot_iso_unsupported_url(self, validate_href_mock):
validate_href_mock.side_effect = exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')
url = 'file://img.qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
ilo_boot._get_boot_iso, task, 'root-uuid')
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_glance_image(self, deploy_info_mock,
image_props_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': u'glance://uui\u0111',
'kernel_id': None,
'ramdisk_id': None}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = False
task.node.driver_internal_info = driver_internal_info
task.node.save()
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_iso_expected = u'glance://uui\u0111'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot.LOG, 'error', spec_set=True, autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_uefi_no_glance_image(self,
deploy_info_mock,
image_props_mock,
log_mock,
boot_mode_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': None,
'ramdisk_id': None}
properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
boot_iso_result = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
self.assertTrue(log_mock.called)
self.assertFalse(boot_mode_mock.called)
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create(self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
swift_api_mock,
create_boot_iso_mock, tempfile_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.pxe.pxe_append_params = 'kernel-params'
swift_obj_mock = swift_api_mock.return_value
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': 'kernel_uuid',
'ramdisk_id': 'ramdisk_uuid'}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
'kernel_uuid',
'ramdisk_uuid',
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
swift_obj_mock.create_object.assert_called_once_with('ilo-cont',
'abcdef',
'tmpfile')
boot_iso_expected = 'swift:abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(ilo_common, 'copy_image_to_web_server', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_recreate_boot_iso_use_webserver(
self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
create_boot_iso_mock, tempfile_mock,
copy_file_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_url = "http://10.10.1.30/httpboot"
CONF.deploy.http_root = "/httpboot"
CONF.pxe.pxe_append_params = 'kernel-params'
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
ramdisk_href = "http://10.10.1.30/httpboot/ramdisk"
kernel_href = "http://10.10.1.30/httpboot/kernel"
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': kernel_href,
'ramdisk_id': ramdisk_href}
boot_object_name_mock.return_value = 'new_boot_iso'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
copy_file_mock.return_value = "http://10.10.1.30/httpboot/new_boot_iso"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
instance_info = task.node.instance_info
old_boot_iso = 'http://10.10.1.30/httpboot/old_boot_iso'
instance_info['ilo_boot_iso'] = old_boot_iso
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
kernel_href,
ramdisk_href,
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
boot_iso_expected = 'http://10.10.1.30/httpboot/new_boot_iso'
self.assertEqual(boot_iso_expected, boot_iso_actual)
copy_file_mock.assert_called_once_with(fileobj_mock.name,
'new_boot_iso')
@mock.patch.object(ilo_common, 'copy_image_to_web_server', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__get_boot_iso_create_use_webserver_true_ramdisk_webserver(
self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
create_boot_iso_mock, tempfile_mock,
copy_file_mock):
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_url = "http://10.10.1.30/httpboot"
CONF.deploy.http_root = "/httpboot"
CONF.pxe.pxe_append_params = 'kernel-params'
fileobj_mock = mock.MagicMock(spec=file)
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
ramdisk_href = "http://10.10.1.30/httpboot/ramdisk"
kernel_href = "http://10.10.1.30/httpboot/kernel"
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': kernel_href,
'ramdisk_id': ramdisk_href}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
copy_file_mock.return_value = "http://10.10.1.30/httpboot/abcdef"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_boot._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
kernel_href,
ramdisk_href,
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
boot_iso_expected = 'http://10.10.1.30/httpboot/abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
copy_file_mock.assert_called_once_with(fileobj_mock.name,
'abcdef')
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
def test__clean_up_boot_iso_for_instance(self, swift_mock,
boot_object_name_mock):
swift_obj_mock = swift_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_boot._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
@mock.patch.object(ilo_boot.LOG, 'exception', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
def test__clean_up_boot_iso_for_instance_exc(self, swift_mock,
boot_object_name_mock,
log_mock):
swift_obj_mock = swift_mock.return_value
exc = exception.SwiftObjectNotFoundError('error')
swift_obj_mock.delete_object.side_effect = exc
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_boot._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
self.assertTrue(log_mock.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_on_webserver(self, unlink_mock):
CONF.ilo.use_web_server_for_images = True
CONF.deploy.http_root = "/webserver"
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'http://x.y.z.a/webserver/boot-object'
self.node.instance_info = i_info
self.node.save()
boot_iso_path = "/webserver/boot-object"
ilo_boot._clean_up_boot_iso_for_instance(self.node)
unlink_mock.assert_called_once_with(boot_iso_path)
@mock.patch.object(ilo_boot, '_get_boot_iso_object_name', spec_set=True,
autospec=True)
def test__clean_up_boot_iso_for_instance_no_boot_iso(
self, boot_object_name_mock):
ilo_boot._clean_up_boot_iso_for_instance(self.node)
self.assertFalse(boot_object_name_mock.called)
@mock.patch.object(ilo_boot, 'parse_driver_info', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'get_image_instance_info',
spec_set=True, autospec=True)
def test__parse_deploy_info(self, instance_info_mock, driver_info_mock):
instance_info_mock.return_value = {'a': 'b'}
driver_info_mock.return_value = {'c': 'd'}
expected_info = {'a': 'b', 'c': 'd'}
actual_info = ilo_boot._parse_deploy_info(self.node)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_MissingParam(self, mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
"Missing 'ilo_deploy_iso'",
ilo_boot._validate_driver_info, task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_valid_uuid(self, mock_parse_driver_info,
mock_is_glance_image):
mock_is_glance_image.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
deploy_iso = '8a81759a-f29b-454b-8ab3-161c6ca1882c'
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
ilo_boot._validate_driver_info(task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_is_glance_image.assert_called_once_with(deploy_iso)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_InvalidParam(self, mock_parse_driver_info,
mock_is_glance_image,
mock_validate_href):
deploy_iso = 'http://abc.org/image/qcow2'
mock_validate_href.side_effect = exception.ImageRefValidationFailed(
image_href='http://abc.org/image/qcow2', reason='fail')
mock_is_glance_image.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
self.assertRaisesRegex(exception.InvalidParameterValue,
"Virtual media boot accepts",
ilo_boot._validate_driver_info, task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test__validate_driver_info_valid_url(self, mock_parse_driver_info,
mock_is_glance_image,
mock_validate_href):
deploy_iso = 'http://abc.org/image/deploy.iso'
mock_is_glance_image.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = deploy_iso
ilo_boot._validate_driver_info(task)
mock_parse_driver_info.assert_called_once_with(task.node)
mock_validate_href.assert_called_once_with(mock.ANY, deploy_iso)
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def _test__validate_instance_image_info(self,
deploy_info_mock,
validate_prop_mock,
props_expected):
d_info = {'image_source': 'uuid'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_boot._validate_instance_image_info(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, props_expected)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test__validate_glance_partition_image(self,
is_glance_image_mock):
is_glance_image_mock.return_value = True
self._test__validate_instance_image_info(props_expected=['kernel_id',
'ramdisk_id'])
def test__validate_whole_disk_image(self):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
self._test__validate_instance_image_info(props_expected=[])
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test__validate_non_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = False
self._test__validate_instance_image_info(props_expected=['kernel',
'ramdisk'])
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_false(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(func_set_secure_boot_mode.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_true(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
func_set_secure_boot_mode.assert_called_once_with(task, False)
self.assertTrue(returned_state)
@mock.patch.object(ilo_boot, 'exception', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_secure_boot_mode', spec_set=True,
autospec=True)
def test__disable_secure_boot_exception(self,
func_get_secure_boot_mode,
exception_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
func_get_secure_boot_mode.side_effect = Exception
returned_state = ilo_boot._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_sec_boot_on(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
ret_boot_mode = task.node.instance_info['deploy_boot_mode']
self.assertEqual('uefi', ret_boot_mode)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_inst_info(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
task.node.instance_info = instance_info
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
self.assertNotIn('deploy_boot_mode', task.node.instance_info)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_disable_secure_boot', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
def test_prepare_node_for_deploy_sec_boot_on_inst_info(
self, func_node_power_action, func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
task.node.instance_info = instance_info
ilo_boot.prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
self.assertNotIn('deploy_boot_mode', task.node.instance_info)
class IloVirtualMediaBootTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaBootTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_boot, '_validate_driver_info',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot, '_validate_instance_image_info',
spec_set=True, autospec=True)
def test_validate(self, mock_val_instance_image_info,
mock_val_driver_info):
instance_info = self.node.instance_info
instance_info['ilo_boot_iso'] = 'deploy-iso'
instance_info['image_source'] = '6b2f0c0c-79e8-4db6-842e-43c9764204af'
self.node.instance_info = instance_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
task.driver.boot.validate(task)
mock_val_instance_image_info.assert_called_once_with(task)
mock_val_driver_info.assert_called_once_with(task)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy',
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_power_action',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia', spec_set=True,
autospec=True)
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id',
spec_set=True, autospec=True)
def _test_prepare_ramdisk(self, get_nic_mock, setup_vmedia_mock,
eject_mock, node_power_mock,
prepare_node_for_deploy_mock,
ilo_boot_iso, image_source,
ramdisk_params={'a': 'b'}):
instance_info = self.node.instance_info
instance_info['ilo_boot_iso'] = ilo_boot_iso
instance_info['image_source'] = image_source
self.node.instance_info = instance_info
self.node.save()
get_nic_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_info = task.node.driver_info
driver_info['ilo_deploy_iso'] = 'deploy-iso'
task.node.driver_info = driver_info
task.driver.boot.prepare_ramdisk(task, ramdisk_params)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
if task.node.provision_state == states.DEPLOYING:
prepare_node_for_deploy_mock.assert_called_once_with(task)
eject_mock.assert_called_once_with(task)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab'}
get_nic_mock.assert_called_once_with(task)
setup_vmedia_mock.assert_called_once_with(task, 'deploy-iso',
expected_ramdisk_opts)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image):
"""Ensure deploy ops are blocked when not deploying and not cleaning"""
for state in states.STABLE_STATES:
mock_is_image.reset_mock()
self.node.provision_state = state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
self.assertFalse(mock_is_image.called)
def test_prepare_ramdisk_glance_image(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='swift:abcdef',
image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af')
self.node.refresh()
self.assertNotIn('ilo_boot_iso', self.node.instance_info)
def test_prepare_ramdisk_not_a_glance_image(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='http://mybootiso',
image_source='http://myimage')
self.node.refresh()
self.assertEqual('http://mybootiso',
self.node.instance_info['ilo_boot_iso'])
def test_prepare_ramdisk_glance_image_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='swift:abcdef',
image_source='6b2f0c0c-79e8-4db6-842e-43c9764204af')
self.node.refresh()
self.assertNotIn('ilo_boot_iso', self.node.instance_info)
def test_prepare_ramdisk_not_a_glance_image_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk(
ilo_boot_iso='http://mybootiso',
image_source='http://myimage')
self.node.refresh()
self.assertEqual('http://mybootiso',
self.node.instance_info['ilo_boot_iso'])
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_with_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = 'boot.iso'
task.driver.boot._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
setup_vmedia_mock.assert_called_once_with(
task, 'boot.iso')
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM, persistent=True)
self.assertEqual('boot.iso',
task.node.instance_info['ilo_boot_iso'])
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_get_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot_without_boot_iso(
self, get_boot_iso_mock, setup_vmedia_mock, set_boot_device_mock):
root_uuid = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_boot_iso_mock.return_value = None
task.driver.boot._configure_vmedia_boot(
task, root_uuid)
get_boot_iso_mock.assert_called_once_with(
task, root_uuid)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot, '_clean_up_boot_iso_for_instance',
spec_set=True, autospec=True)
def test_clean_up_instance(self, cleanup_iso_mock,
cleanup_vmedia_mock, node_power_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = False
driver_internal_info['root_uuid_or_disk_id'] = (
"12312642-09d3-467f-8e09-12385826a123")
task.node.driver_internal_info = driver_internal_info
task.node.save()
task.driver.boot.clean_up_instance(task)
cleanup_iso_mock.assert_called_once_with(task.node)
cleanup_vmedia_mock.assert_called_once_with(task)
driver_internal_info = task.node.driver_internal_info
self.assertNotIn('boot_iso_created_in_web_server',
driver_internal_info)
self.assertNotIn('root_uuid_or_disk_id', driver_internal_info)
node_power_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_ramdisk(self, cleanup_vmedia_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_ramdisk(task)
cleanup_vmedia_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def _test_prepare_instance_whole_disk_image(
self, cleanup_vmedia_boot_mock, set_boot_device_mock,
update_boot_mode_mock, update_secure_boot_mode_mock):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
def test_prepare_instance_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_prepare_instance_whole_disk_image()
def test_prepare_instance_whole_disk_image(self):
self._test_prepare_instance_whole_disk_image()
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_boot.IloVirtualMediaBoot,
'_configure_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
self, cleanup_vmedia_boot_mock, configure_vmedia_mock,
update_boot_mode_mock, update_secure_boot_mode_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': (
"12312642-09d3-467f-8e09-12385826a123")}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
configure_vmedia_mock.assert_called_once_with(
mock.ANY, task, "12312642-09d3-467f-8e09-12385826a123")
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
class IloPXEBootTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEBootTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
def test_prepare_ramdisk_not_deploying_not_cleaning(
self, pxe_prepare_instance_mock, prepare_node_mock):
self.node.provision_state = states.CLEANING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
self.assertFalse(prepare_node_mock.called)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY,
task, None)
@mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True,
autospec=True)
def test_prepare_ramdisk_in_deploying(self, pxe_prepare_instance_mock,
prepare_node_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
prepare_node_mock.assert_called_once_with(task)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY,
task, None)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_instance', spec_set=True,
autospec=True)
def test_clean_up_instance(self, pxe_cleanup_mock, node_power_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_instance(task)
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
pxe_cleanup_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', spec_set=True,
autospec=True)
def test_prepare_instance(self, pxe_prepare_instance_mock,
update_boot_mode_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
| apache-2.0 | 5,620,285,987,895,974,000 | 50.724652 | 79 | 0.562371 | false | 3.87194 | true | false | false |
dostavro/dotfiles | sublime2/Packages/LaTeXTools/viewPDF.py | 1 | 2572 | # ST2/ST3 compat
from __future__ import print_function
import sublime
if sublime.version() < '3000':
# we are on ST2 and Python 2.X
_ST3 = False
import getTeXRoot
else:
_ST3 = True
from . import getTeXRoot
import sublime_plugin, os, os.path, platform
from subprocess import Popen
# View PDF file corresonding to TEX file in current buffer
# Assumes that the SumatraPDF viewer is used (great for inverse search!)
# and its executable is on the %PATH%
# Warning: we do not do "deep" safety checks (e.g. see if PDF file is old)
class View_pdfCommand(sublime_plugin.WindowCommand):
def run(self):
s = sublime.load_settings("LaTeXTools Preferences.sublime-settings")
prefs_keep_focus = s.get("keep_focus", True)
prefs_lin = s.get("linux")
view = self.window.active_view()
texFile, texExt = os.path.splitext(view.file_name())
if texExt.upper() != ".TEX":
sublime.error_message("%s is not a TeX source file: cannot view." % (os.path.basename(view.file_name()),))
return
quotes = ""# \"" MUST CHECK WHETHER WE NEED QUOTES ON WINDOWS!!!
root = getTeXRoot.get_tex_root(view)
rootFile, rootExt = os.path.splitext(root)
pdfFile = quotes + rootFile + '.pdf' + quotes
s = platform.system()
script_path = None
if s == "Darwin":
# for inverse search, set up a "Custom" sync profile, using
# "subl" as command and "%file:%line" as argument
# you also have to put a symlink to subl somewhere on your path
# Also check the box "check for file changes"
viewercmd = ["open", "-a", "Skim"]
elif s == "Windows":
# with new version of SumatraPDF, can set up Inverse
# Search in the GUI: under Settings|Options...
# Under "Set inverse search command-line", set:
# sublime_text "%f":%l
viewercmd = ["SumatraPDF", "-reuse-instance"]
elif s == "Linux":
# the required scripts are in the 'evince' subdir
script_path = os.path.join(sublime.packages_path(), 'LaTeXTools', 'evince')
ev_sync_exec = os.path.join(script_path, 'evince_sync') # so we get inverse search
# Get python binary if set in preferences:
py_binary = prefs_lin["python2"] or 'python'
sb_binary = prefs_lin["sublime"] or 'sublime-text'
viewercmd = ['sh', ev_sync_exec, py_binary, sb_binary]
else:
sublime.error_message("Platform as yet unsupported. Sorry!")
return
print (viewercmd + [pdfFile])
try:
Popen(viewercmd + [pdfFile], cwd=script_path)
except OSError:
sublime.error_message("Cannot launch Viewer. Make sure it is on your PATH.")
| mit | -4,437,854,508,914,317,300 | 35.823529 | 109 | 0.667574 | false | 3.125152 | false | false | false |
dhylands/teensy-mon | teensy-mon.py | 1 | 8986 | #!/usr/bin/python -u
"""Program for monitoring serial messages from the Teensy.
This program waits for the device to be connected and when the teensy is
disconnected, then it will go back to waiting for the teensy to once again
be connected.
This program also looks for lines that start with a single letter followed
by a colon, and will colorize the lines based on the letter.
"""
import select
import pyudev
import serial
import sys
import tty
import termios
import traceback
import syslog
import argparse
(LT_BLACK, LT_RED, LT_GREEN, LT_YELLOW,
LT_BLUE, LT_MAGENTA, LT_CYAN, LT_WHITE) = [
("\033[1;%dm" % (30 + i)) for i in range(8)]
(DK_BLACK, DK_RED, DK_GREEN, DK_YELLOW,
DK_BLUE, DK_MAGENTA, DK_CYAN, DK_WHITE) = [
("\033[2;%dm" % (30 + i)) for i in range(8)]
NO_COLOR = "\033[0m"
COLORS = {
'W': LT_YELLOW,
'I': "",
'D': LT_BLUE,
'C': LT_RED,
'E': LT_RED
}
class OutputWriter(object):
"""Class for dealing with the output from the teensy."""
def __init__(self):
self.buffered_output = ""
self.column = 0
self.colored = False
def write(self, string):
"""Writes characters to output. Lines will be delimited by
newline characters.
This routine breaks the output into lines and writes each line
individually, colorizing as appropriate.
"""
if len(self.buffered_output) > 0:
string = self.buffered_output + string
self.buffered_output = ""
while True:
nl_index = string.find('\n')
if self.column == 0 and nl_index < 0 and len(string) < 2:
self.buffered_output = string
return
if nl_index < 0:
line_string = string
else:
line_string = string[0:nl_index + 1]
prefix = ""
suffix = ""
if (self.column == 0 and len(string) >= 2 and
string[1] == ':' and string[0] in COLORS):
prefix = COLORS[string[0]]
self.colored = True
if nl_index >= 0 and self.colored:
suffix = NO_COLOR
sys.stdout.write(prefix + line_string + suffix)
sys.stdout.flush()
self.column += len(line_string)
if nl_index < 0:
return
string = string[nl_index + 1:]
self.column = 0
def is_teensy(device, serial_num=None):
"""Checks device to see if its a teensy device.
If serial is provided, then it will further check to see if the
serial number of the teensy device also matches.
"""
if 'ID_VENDOR' not in device:
return False
if not device['ID_VENDOR'].startswith('Teensy'):
return False
if serial_num is None:
return True
return device['ID_SERIAL_SHORT'] == serial_num
def teensy_mon(monitor, device):
"""Monitors the serial port from a given teensy device.
This function open the USDB serial port associated with device, and
will read characters from it and send to stdout. It will also read
characters from stdin and send them to the device.
This function returns when the teensy deivce disconnects (or is
disconnected).
"""
port_name = device.device_node
serial_num = device['ID_SERIAL_SHORT']
print 'Teensy device connected @%s (serial %s)\r' % (port_name, serial_num)
epoll = select.epoll()
epoll.register(monitor.fileno(), select.POLLIN)
output = OutputWriter()
try:
serial_port = serial.Serial(port=port_name,
timeout=0.001,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
xonxoff=False,
rtscts=False,
dsrdtr=False)
except serial.serialutil.SerialException:
print "Unable to open port '%s'" % port_name
return
serial_fd = serial_port.fileno()
tty.setraw(serial_fd)
new_settings = termios.tcgetattr(serial_fd)
new_settings[6][termios.VTIME] = 0
new_settings[6][termios.VMIN] = 1
termios.tcsetattr(serial_fd, termios.TCSANOW, new_settings)
epoll.register(serial_port.fileno(), select.POLLIN)
epoll.register(sys.stdin.fileno(), select.POLLIN)
while True:
events = epoll.poll()
for fileno, _ in events:
if fileno == monitor.fileno():
dev = monitor.poll()
if (dev.device_node != port_name or
dev.action != 'remove'):
continue
print 'Teensy device @', port_name, ' disconnected.\r'
print
serial_port.close()
return
if fileno == serial_port.fileno():
try:
data = serial_port.read(256)
except serial.serialutil.SerialException:
print 'Teensy device @', port_name, ' disconnected.\r'
print
serial_port.close()
return
#for x in data:
# print "Serial.Read '%c' 0x%02x" % (x, ord(x))
# For now, we'll not support color, and let the target do it.
# That also means that we work better if the target is doing
# something like readline
#output.write(data)
sys.stdout.write(data)
sys.stdout.flush()
if fileno == sys.stdin.fileno():
data = sys.stdin.read(1)
#for x in data:
# print "stdin.Read '%c' 0x%02x" % (x, ord(x))
if data[0] == chr(3):
raise KeyboardInterrupt
if data[0] == '\n':
serial_port.write('\r')
else:
serial_port.write(data)
def main():
"""The main program."""
parser = argparse.ArgumentParser(
prog="teensy_mon",
usage="%(prog)s [options] [command]",
description="Monitor serial output from teensy devices",
epilog="Press Control-C to quit"
)
parser.add_argument(
"-l", "--list",
dest="list",
action="store_true",
help="List Teensy devices currently connected"
)
parser.add_argument(
"-s", "--serial",
dest="serial",
help="Connect to Teeny device with a given serial number"
)
parser.add_argument(
"-v", "--verbose",
dest="verbose",
action="store_true",
help="Turn on verbose messages",
default=False
)
args = parser.parse_args(sys.argv[1:])
if args.verbose:
print 'pyudev version =', pyudev.__version__
context = pyudev.Context()
context.log_priority = syslog.LOG_NOTICE
if args.list:
detected = False
for device in context.list_devices(subsystem='tty'):
if is_teensy(device):
print 'Teensy device serial %-5s found @%s' % (
device['ID_SERIAL_SHORT'], device.device_node)
detected = True
if not detected:
print 'No Teensy devices detected.'
return
stdin_fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(stdin_fd)
try:
# Make some changes to stdin. We want to turn off canonical
# processing (so that ^H gets sent to the teensy), turn off echo,
# and make it unbuffered.
tty.setraw(stdin_fd)
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] &= ~(termios.ICANON | termios.ECHO)
new_settings[6][termios.VTIME] = 0
new_settings[6][termios.VMIN] = 1
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
monitor = pyudev.Monitor.from_netlink(context)
monitor.start()
monitor.filter_by('tty')
# Check to see if the teensy device is already present.
for device in context.list_devices(subsystem='tty'):
if is_teensy(device, args.serial):
teensy_mon(monitor, device)
# Otherwise wait for the teensy device to connect
while True:
if args.serial:
print 'Waiting for Teensy with serial %s ...' % args.serial
else:
print 'Waiting for Teensy...'
for device in iter(monitor.poll, None):
if device.action != 'add':
continue
if is_teensy(device, args.serial):
teensy_mon(monitor, device)
except KeyboardInterrupt:
print '\r\n'
except Exception:
traceback.print_exc()
# Restore stdin back to its old settings
termios.tcsetattr(stdin_fd, termios.TCSANOW, old_settings)
main()
| mit | -45,455,361,719,860,664 | 32.281481 | 79 | 0.551858 | false | 3.977866 | false | false | false |
mikewiebe-ansible/ansible | test/units/modules/network/fortios/test_fortios_spamfilter_options.py | 21 | 5380 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_spamfilter_options
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_spamfilter_options.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_spamfilter_options_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_options_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_options_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_spamfilter_options_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_options': {
'random_attribute_not_valid': 'tag',
'dns_timeout': '3'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_options.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'dns-timeout': '3'
}
set_method_mock.assert_called_with('spamfilter', 'options', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 | -8,573,058,264,405,497,000 | 34.629139 | 133 | 0.684387 | false | 3.754361 | true | false | false |
wri/gfw-api | gfw/models/subscription.py | 1 | 4363 | # Global Forest Watch API
# Copyright (C) 2013 World Resource Institute
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module supports pubsub."""
import logging
import copy
import json
from appengine_config import runtime_config
from google.appengine.ext import ndb
from google.appengine.api import users
from google.appengine.api import taskqueue
from gfw.user.gfw_user import GFWUser
from gfw.models.topic import Topic
from gfw.mailers.subscription_confirmation import SubscriptionConfirmationMailer
class Subscription(ndb.Model):
name = ndb.StringProperty()
topic = ndb.StringProperty()
email = ndb.StringProperty()
url = ndb.StringProperty()
user_id = ndb.KeyProperty()
pa = ndb.StringProperty()
use = ndb.StringProperty()
useid = ndb.IntegerProperty()
iso = ndb.StringProperty()
id1 = ndb.StringProperty()
ifl = ndb.StringProperty()
fl_id1 = ndb.StringProperty()
wdpaid = ndb.IntegerProperty()
has_geom = ndb.BooleanProperty(default=False)
confirmed = ndb.BooleanProperty(default=False)
geom = ndb.JsonProperty()
params = ndb.JsonProperty()
updates = ndb.JsonProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
new = ndb.BooleanProperty(default=True)
geostore = ndb.StringProperty()
language = ndb.StringProperty(choices=['EN', 'ES', 'FR', 'ID', 'PT', 'ZH'], default='EN')
overview_image = ndb.BlobProperty()
kind = 'Subscription'
@classmethod
def create(cls, params, user=None):
"""Create subscription if email and, iso or geom is present"""
subscription = Subscription()
subscription.populate(**params)
subscription.params = params
subscription.has_geom = bool(params.get('geom'))
user_id = user.key if user is not None else ndb.Key('User', None)
subscription.user_id = user_id
subscription.put()
return subscription
@classmethod
def subscribe(cls, params, user):
subscription = Subscription.create(params, user)
if subscription:
subscription.send_confirmation_email()
return subscription
else:
return False
@classmethod
def confirm_by_id(cls, id):
subscription = cls.get_by_id(int(id))
if subscription:
return subscription.confirm()
else:
return False
def send_confirmation_email(self):
taskqueue.add(url='/v2/subscriptions/tasks/confirmation',
queue_name='pubsub-confirmation',
params=dict(subscription=self.key.urlsafe()))
def to_dict(self):
result = super(Subscription,self).to_dict()
result['key'] = self.key.id()
result.pop('overview_image', None)
return result
def formatted_name(self):
if (not self.name) or (len(self.name) == 0):
return "Unnamed Subscription"
else:
return self.name
def confirm(self):
self.confirmed = True
return self.put()
def unconfirm(self):
self.confirmed = False
self.send_confirmation_email()
return self.put()
def unsubscribe(self):
return self.key.delete()
def run_analysis(self, begin, end):
params = copy.copy(self.params)
params['begin'] = begin
params['end'] = end
if 'geom' in params:
geom = params['geom']
if 'geometry' in geom:
geom = geom['geometry']
params['geojson'] = json.dumps(geom)
topic = Topic.get_by_id(self.topic)
return topic.execute(params)
| gpl-2.0 | 8,102,869,510,840,577,000 | 31.804511 | 94 | 0.646115 | false | 4.171128 | false | false | false |
rubenlalinde/quacher | quacher.py | 1 | 3508 | #!/usr/bin/python
# Package imports
import requests
import csv
# Stock information
symbol = input('Enter a stock symbol: ').upper()
stock = dict( [
( 'name' , 'Excellent Co.' ),
( 'symbol' , symbol ),
( 'phone' , '800-YOU-GAIN' ),
( 'currentPrice' , '150' ),
( 'yearHigh' , '12' ),
( 'yearLow' , '5' ),
( 'marketCap' , '$100 Million' ),
( 'dayDollarVolume' , '$1 Million' ),
( 'sales' , '$50 Million' ),
( 'netProfitMargin' , '25%' ),
( 'cash' , '$10 Million' ),
( 'totalDebt' , '$2.5 Million' ),
( 'salesPerShare' , '$15' ),
( 'cashFloatingPerShare' , '$0.45' ),
( 'earningsPerShare' , '$4' ),
( 'dividendYield' , 'N/A' ),
( 'returnOnEquity' , '20%' ),
( 'insiderBuy' , '10' ),
( 'insiderOwn' , '30%' ),
( 'stockBuyback' , 'Yes' ),
( 'epsRank' , '95' ),
( 'rpsRank' , '95' ),
( '5yrSales' , '20%' ),
( '5yrPrice' , '900%' ),
( 'projSales' , '25%' ),
( 'projHi' , '100' ),
( 'projLow' , '60' ),
( 'time' , '1' ),
( 'safety' , '2' ),
( 'stars' , '5' ),
( 'fairValue' , '5' ),
( 'currentPE' , '2.5' ),
( 'averagePE' , '5' ),
( 'ps' , '0.67' ),
( 'pb' , '0.5' ),
( 'currentRatio' , '10' ),
( 'quickRatio' , '2' ),
( 'smaMacdRsi' , 'strongUp' )
] )
'''
# Search the website for the quote and grab the information
# This gives me of the statistics that we need for the
# table but not everything. May need to explore multiple sites.
r = requests.get( 'http://www.google.com/finance?q=' + symbol )
print( r.text )
'''
# Output the statistics to a CSV formatted file
with open('worksheet.csv','w',newline='') as csvfile:
writer = csv.writer( csvfile, delimiter=' ', quoting=csv.QUOTE_MINIMAL )
writer.writerow( [
'Company Name, Symbol, and Phone',
'Current Price',
'52 wk Hi/Lo',
'Market Cap',
'Day Dollar Volume',
'Sales',
'Net Profit Margin',
'Cash',
'Total Debt',
'Sales / Share',
'Cash Flow / Share',
'Earnings / Share',
'Dividend Yield',
'ROE',
'Insider Buy/Own',
'Stock Buyback',
'EPS Rank',
'RPS Rank',
'5 yr Sales',
'5 yr Price',
'Proj Sales',
'Proj Hi/Lo',
'Time Safe',
'STARS Fair Val',
'Current P/E',
'Average P/E',
'P/S',
'P/B',
'Current Ratio',
'Quick Ratio',
'SMA MACD RSI' ] )
writer.writerow( [
str( stock['name'] + ', ' + stock['symbol'] + ', ' + stock['phone'] ),
str( stock['currentPrice'] ),
str( stock['yearHigh'] + '/' + stock['yearLow'] ),
str( stock['marketCap'] ),
str( stock['dayDollarVolume'] ),
str( stock['sales'] ),
str( stock['netProfitMargin'] ),
str( stock['cash'] ),
str( stock['totalDebt'] ),
str( stock['salesPerShare'] ),
str( stock['cashFloatingPerShare'] ),
str( stock['earningsPerShare'] ),
str( stock['dividendYield'] ),
str( stock['returnOnEquity'] ),
str( stock['insiderBuy'] + '/' + stock['insiderOwn'] ),
str( stock['stockBuyback'] ),
str( stock['epsRank'] ),
str( stock['rpsRank'] ),
str( stock['5yrSales'] ),
str( stock['5yrPrice'] ),
str( stock['projSales'] ),
str( stock['projHi'] + '/' + stock['projLow'] ),
str( stock['time'] + '/' + stock['safety'] ),
str( stock['stars'] + '/' + stock['fairValue'] ),
str( stock['currentPE'] ),
str( stock['averagePE'] ),
str( stock['ps'] ),
str( stock['pb'] ),
str( stock['currentRatio'] ),
str( stock['quickRatio'] ),
str( stock['smaMacdRsi'] ) ] )
| mit | 1,302,003,198,169,908,700 | 25.984615 | 75 | 0.526796 | false | 2.70888 | false | false | false |
qiyeboy/SpiderBook | ch11/APISpider/SpiderDataOutput.py | 1 | 1240 | #coding:utf-8
import codecs
class SpiderDataOutput(object):
def __init__(self):
self.filepath='kuwo.html'
self.output_head(self.filepath)
def output_head(self,path):
'''
将HTML头写进去
:return:
'''
fout=codecs.open(path,'w',encoding='utf-8')
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
fout.close()
def output_html(self,path,datas):
'''
将数据写入HTML文件中
:param path: 文件路径
:return:
'''
if datas==None:
return
fout=codecs.open(path,'a',encoding='utf-8')
for data in datas:
fout.write("<tr>")
fout.write("<td>%s</td>"%data['file_id'])
fout.write("<td>%s</td>"%data['name'])
fout.write("<td>%s</td>"%data['file_path'])
fout.write("</tr>")
fout.close()
def ouput_end(self,path):
'''
输出HTML结束
:param path: 文件存储路径
:return:
'''
fout=codecs.open(path,'a',encoding='utf-8')
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close() | mit | 4,054,300,558,502,535,700 | 24.255319 | 55 | 0.482293 | false | 3.350282 | false | false | false |
akretion/odoo | addons/test_mass_mailing/tests/test_blacklist_mixin.py | 12 | 3425 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo.tests.common import users
from odoo.addons.test_mass_mailing.tests import common
from odoo.addons.test_mass_mailing.models.mass_mail_test import MassMailTestBlacklist
from odoo.exceptions import AccessError, UserError
class TestBLMixin(common.MassMailingCase):
@classmethod
def setUpClass(cls):
super(TestBLMixin, cls).setUpClass()
cls.env['mail.blacklist'].create([{
'email': 'Arya.Stark@example.com',
'active': True,
}, {
'email': 'Sansa.Stark@example.com',
'active': False,
}])
@users('emp')
def test_bl_mixin_primary_field_consistency(self):
MassMailTestBlacklist._primary_email = ['not_a_field']
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = 'not_a_list'
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = 'email_from'
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = ['email_from', 'name']
with self.assertRaises(UserError):
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
MassMailTestBlacklist._primary_email = ['email_from']
self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
@users('emp')
def test_bl_mixin_is_blacklisted(self):
""" Test is_blacklisted field computation """
record = self.env['mass.mail.test.bl'].create({'email_from': 'arya.stark@example.com'})
self.assertTrue(record.is_blacklisted)
record = self.env['mass.mail.test.bl'].create({'email_from': 'not.arya.stark@example.com'})
self.assertFalse(record.is_blacklisted)
@users('emp')
def test_bl_mixin_search_blacklisted(self):
""" Test is_blacklisted field search implementation """
record1 = self.env['mass.mail.test.bl'].create({'email_from': 'arya.stark@example.com'})
record2 = self.env['mass.mail.test.bl'].create({'email_from': 'not.arya.stark@example.com'})
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)])
self.assertEqual(search_res, record2)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', True)])
self.assertEqual(search_res, record2)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', False)])
self.assertEqual(search_res, record1)
@users('emp')
def test_bl_mixin_search_blacklisted_format(self):
""" Test is_blacklisted field search using email parsing """
record1 = self.env['mass.mail.test.bl'].create({'email_from': 'Arya Stark <arya.stark@example.com>'})
self.assertTrue(record1.is_blacklisted)
search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)])
self.assertEqual(search_res, record1)
| agpl-3.0 | 5,994,033,542,301,576,000 | 41.283951 | 109 | 0.635912 | false | 3.567708 | true | false | false |
osrf/osrf_hw | kicad_scripts/freecad_gen_BGA.py | 1 | 5331 | from __future__ import print_function
import sys
sys.path.append('/usr/lib/freecad/lib')
print(sys.path)
import FreeCAD# as App
import ImportGui
import FreeCADGui# as Gui
import os
import Draft#,Sketch,Part
# lets assume for now that we have all the information in a filename
# lets also assume that they are only full ball arrays no missing ball in the center)
# all distances in mm
# FIXME doesnt handle different x and y pitch
# FIXME size of balls
# NOTE incomplete bsall matrices not handled
# one should remove them by hand because impossible to handle all the fishy cases automatically
MMTOMIL = 0.3937
directory = sys.argv[2]; name = sys.argv[3]; pitch = float(sys.argv[4])
nBallx = int(sys.argv[5]); nBally = int(sys.argv[6])
length = float(sys.argv[7]); width = float(sys.argv[8])
height = float(sys.argv[9]); ballradius = pitch/4.
# go in sketch mode
Gui.activateWorkbench("SketcherWorkbench")
# create doc
App.newDocument()
App.setActiveDocument("Unnamed")
App.ActiveDocument=App.getDocument("Unnamed")
Gui.ActiveDocument=Gui.getDocument("Unnamed")
print("document created")
# create sketch
App.activeDocument().addObject('Sketcher::SketchObject','Sketch')
print("sketch added")
App.activeDocument().Sketch.Placement = App.Placement(App.Vector(0.0,0.0,0.0),App.Rotation(0.0,0.0,0.0,1.0))
Gui.activeDocument().setEdit('Sketch')
print("edit sketch")
# trace rectangle
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(width/2.0,-length/2.0,0),App.Vector(-width/2.0,-length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(-width/2.0,-length/2.0,0),App.Vector(-width/2.0,length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(-width/2.0,length/2.0,0),App.Vector(width/2.0,length/2.0,0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(App.Vector(width/2.0,length/2.0,0),App.Vector(width/2.0,-length/2.0,0)))
print("place lines")
# add circular cutout
App.ActiveDocument.Sketch.addGeometry(Part.Circle(App.Vector(-width/2.0+1,length/2.0-1,0),App.Vector(0,0,1),0.5))
App.ActiveDocument.recompute()
Gui.getDocument('Unnamed').resetEdit()
App.getDocument('Unnamed').recompute()
# create pad from sketch
Gui.activateWorkbench("PartDesignWorkbench")
App.activeDocument().addObject("PartDesign::Pad","Pad")
App.activeDocument().Pad.Sketch = App.activeDocument().Sketch
App.activeDocument().Pad.Length = height
App.ActiveDocument.recompute()
Gui.activeDocument().hide("Sketch")
# change pad color to black
Gui.getDocument("Unnamed").getObject("Pad").ShapeColor = (0.00,0.00,0.00)
Gui.getDocument("Unnamed").getObject("Pad").Visibility=False #Hide pad
# Add Cylinder
Gui.activateWorkbench("PartWorkbench")
App.ActiveDocument.addObject("Part::Cylinder","Cylinder")
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Radius = 0.5
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Height = height
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Placement = App.Placement(App.Vector(-width/2.0+1,length/2.0-1,ballradius),App.Rotation(0,0,0,1))
App.ActiveDocument.recompute()
# Ball creation
App.ActiveDocument.addObject("Part::Sphere","Sphere")
App.ActiveDocument.recompute()
FreeCAD.getDocument("Unnamed").getObject("Sphere").Radius = ballradius
App.ActiveDocument.recompute()
# Ball Array creation
Gui.activateWorkbench("ArchWorkbench")
Draft.array(App.getDocument("Unnamed").getObject("Sphere"),App.Vector(pitch,0,0),App.Vector(0,pitch,0),nBallx,nBally)
## Merge all the spheres into a single object
Gui.activateWorkbench("PartWorkbench")
shapesToFuse=[]
for obj in FreeCAD.ActiveDocument.Objects:
if obj.Name.find("Sphere") != -1:
Gui.Selection.addSelection(obj)
shapesToFuse.append(obj)
App.activeDocument().addObject("Part::MultiFuse","Fusion")
App.activeDocument().Fusion.Shapes = shapesToFuse
App.ActiveDocument.recompute()
fuse = FreeCAD.ActiveDocument.getObject("Fusion")
fuse.Placement = App.Placement(App.Vector(-(nBallx-1)*pitch/2.0,-(nBally-1)*pitch/2.0,ballradius),App.Rotation(0,0,0,1))
App.ActiveDocument.getObject("Pad").Placement = App.Placement(App.Vector(0,0,ballradius),App.Rotation(0,0,0,1))
Gui.ActiveDocument.getObject("Pad").Visibility=True
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewBottom()
## Export as a step model
exp_objects = []
for obj in FreeCAD.ActiveDocument.Objects:
# select all but indivudial Spheres and Sketch
if (obj.Name.find("Sphere") == -1) and (obj.Name.find("Sketch") == -1):
Gui.Selection.addSelection(obj)
exp_objects.append(obj)
else:
FreeCAD.ActiveDocument.removeObject(obj.Name)
App.activeDocument().addObject("Part::MultiFuse","Fusion2")
App.activeDocument().Fusion2.Shapes = exp_objects
App.ActiveDocument.recompute()
for obj in exp_objects:
FreeCAD.ActiveDocument.removeObject(obj.Name)
exp_objects= []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Fusion2"))
ImportGui.export(exp_objects,os.path.join(directory, name + '.step'))
del exp_objects
# Scale to mil before export to VRML for KiCAD use
Draft.scale(FreeCAD.ActiveDocument.ActiveObject, FreeCAD.Vector(MMTOMIL,MMTOMIL,MMTOMIL))
FreeCAD.ActiveDocument.removeObject("Fusion2")
### Export as a VRML model
exp_objects = []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Scale"))
FreeCADGui.export(exp_objects,os.path.join(directory, name + '.wrl'))
del exp_objects
exit(1)
| apache-2.0 | -542,362,453,800,167,000 | 40.325581 | 150 | 0.763271 | false | 3.001689 | false | false | false |
ayarshabeer/django-rest-framework-jwt | rest_framework_jwt/utils.py | 13 | 2538 | import jwt
import warnings
from calendar import timegm
from datetime import datetime
from rest_framework_jwt.compat import get_username, get_username_field
from rest_framework_jwt.settings import api_settings
def jwt_payload_handler(user):
username_field = get_username_field()
username = get_username(user)
warnings.warn(
'The following fields will be removed in the future: '
'`email` and `user_id`. ',
DeprecationWarning
)
payload = {
'user_id': user.pk,
'email': user.email,
'username': username,
'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA
}
payload[username_field] = username
# Include original issued at time for a brand new token,
# to allow token refresh
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
return payload
def jwt_get_user_id_from_payload_handler(payload):
"""
Override this function if user_id is formatted differently in payload
"""
warnings.warn(
'The following will be removed in the future. '
'Use `JWT_PAYLOAD_GET_USERNAME_HANDLER` instead.',
DeprecationWarning
)
return payload.get('user_id')
def jwt_get_username_from_payload_handler(payload):
"""
Override this function if username is formatted differently in payload
"""
return payload.get('username')
def jwt_encode_handler(payload):
return jwt.encode(
payload,
api_settings.JWT_SECRET_KEY,
api_settings.JWT_ALGORITHM
).decode('utf-8')
def jwt_decode_handler(token):
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
return jwt.decode(
token,
api_settings.JWT_SECRET_KEY,
api_settings.JWT_VERIFY,
options=options,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER,
algorithms=[api_settings.JWT_ALGORITHM]
)
def jwt_response_payload_handler(token, user=None, request=None):
"""
Returns the response data for both the login and refresh views.
Override to return a custom response such as including the
serialized representation of the User.
Example:
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user).data
}
"""
return {
'token': token
}
| mit | 8,271,559,828,750,785,000 | 24.128713 | 74 | 0.64342 | false | 3.828054 | false | false | false |
quantumlib/Cirq | cirq-core/cirq/contrib/acquaintance/optimizers_test.py | 1 | 2786 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
import cirq.testing as ct
import cirq.contrib.acquaintance as cca
def test_remove_redundant_acquaintance_opportunities():
device = cca.UnconstrainedAcquaintanceDevice
a, b, c, d, e = cirq.LineQubit.range(5)
swap = cca.SwapPermutationGate()
with pytest.raises(TypeError):
ops = [cca.acquaint(a, b)]
strategy = cirq.Circuit(ops)
cca.remove_redundant_acquaintance_opportunities(strategy)
ops = [cca.acquaint(a, b), cca.acquaint(a, b)]
strategy = cirq.Circuit(ops, device=device)
diagram_before = """
0: ───█───█───
│ │
1: ───█───█───
"""
ct.assert_has_diagram(strategy, diagram_before)
cca.remove_redundant_acquaintance_opportunities(strategy)
cca.remove_redundant_acquaintance_opportunities(strategy)
diagram_after = """
0: ───█───────
│
1: ───█───────
"""
ct.assert_has_diagram(strategy, diagram_after)
ops = [cca.acquaint(a, b), cca.acquaint(c, d), swap(d, e), swap(c, d), cca.acquaint(d, e)]
strategy = cirq.Circuit(ops, device=device)
diagram_before = """
0: ───█───────────────────
│
1: ───█───────────────────
2: ───█─────────0↦1───────
│ │
3: ───█───0↦1───1↦0───█───
│ │
4: ───────1↦0─────────█───
"""
ct.assert_has_diagram(strategy, diagram_before)
cca.remove_redundant_acquaintance_opportunities(strategy)
diagram_after = """
0: ───█───────────────────
│
1: ───█───────────────────
2: ───█─────────0↦1───────
│ │
3: ───█───0↦1───1↦0───────
│
4: ───────1↦0─────────────
"""
ct.assert_has_diagram(strategy, diagram_after)
| apache-2.0 | -2,892,330,833,436,625,000 | 28.946667 | 94 | 0.584595 | false | 2.642353 | false | false | false |
chrison999/mycroft-skill-bitcoin-enhanced | __init__.py | 1 | 4818 | # mycroft-skill-obitcoin-enhanced
#
# A skill for MycroftAI that querys various bitcoin statistics.
#
# Adapted from a MycroftAI skill by Red5d
#
# Licensed under the GNU General Public License v3
# (see LICENSE for more details
from os.path import dirname, join
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
import requests
__author__ = 'Red5d', 'chrison999'
class BitcoinSkill(MycroftSkill):
def __init__(self):
super(BitcoinSkill, self).__init__(name="BitcoinSkill")
def initialize(self):
intent = IntentBuilder("BitcoinAvgIntent").require("BitcoinAvgKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_avg)
intent = IntentBuilder("BitcoinHighIntent").require("BitcoinHighKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_high)
intent = IntentBuilder("BitcoinLowIntent").require("BitcoinLowKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_low)
intent = IntentBuilder("BitcoinLastIntent").require("BitcoinLastKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_last)
intent = IntentBuilder("BitcoinVolIntent").require("BitcoinVolKeyword") \
.optionally("Currency").build()
self.register_intent(intent, self.handle_volume)
def handle_avg(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['24h_avg']
self.speak("The 24 hour average bitcoin price is "+str(price)+" "+currency+".")
def handle_high(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['ask']
self.speak("The current asking price for bitcoin is "+str(price)+" "+currency+".")
def handle_low(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['bid']
self.speak("The current bid price for bitcoin is "+str(price)+" "+currency+".")
def handle_last(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['last']
self.speak("The current price for bitcoin is "+str(price)+" "+currency+".")
def handle_volume(self, message):
currency = str(message.data.get("Currency")) # optional parameter
if currency == 'None':
currency = 'u s dollars'
result = self.fiat_get(currency)
price = requests.get("https://api.bitcoinaverage.com/all").json()[str(result)]['averages']['total_vol']
self.speak("The 24 hour volume for "+currency+" bitcoin is "+str(price)+" btc.")
def fiat_get(self, currency):
if currency == 'None':
currency = 'U S dollars'
result = 'USD'
return result
else:
choices = {
'reals': 'BRL',
'canadian dollars': 'CAD',
'euros': 'EUR',
'yuans': 'CNY',
'koruna': 'CZK',
'rupiahs': 'IDR',
'shekels': 'ILS',
'rupees': 'INR',
'yens': 'JPY',
'won': 'KRW',
'pesos': 'MXN',
'ringgit': 'MYR',
'nairas': 'NGN',
'zlotys': 'PLN',
'roubles': 'RUB',
'kronas': 'SEK',
'singapore dollars': 'SGD',
'lira': 'TRY',
'u s a dollars': 'USD',
'american dollars': 'USD',
'rands': 'ZAR',
'pounds': "GBP"}
result = choices.get(str(currency), 'USD')
return result
def stop(self):
pass
def create_skill():
return BitcoinSkill()
| gpl-3.0 | 5,888,199,872,106,512,000 | 39.15 | 111 | 0.557285 | false | 3.851319 | false | false | false |
fujicoin/electrum-fjc | electrum/gui/qt/__init__.py | 1 | 14139 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugin import run_hook
from electrum.base_wizard import GoBack
from electrum.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config, daemon, plugins):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.nd = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_network_dialog(self, parent):
if not self.daemon.network:
parent.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
if self.nd:
self.nd.on_update()
self.nd.show()
self.nd.raise_()
return
self.nd = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.nd.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + str(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + str(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + str(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
self.config.open_last_wallet()
path = self.config.get_wallet_path()
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
| mit | -6,204,389,142,058,746,000 | 38.166205 | 143 | 0.606903 | false | 4.075814 | true | false | false |
wimoverwater/Sick-Beard | sickbeard/metadata/xbmc.py | 1 | 10797 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sickbeard
import generic
from sickbeard.common import *
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
import xml.etree.cElementTree as etree
class XBMCMetadata(generic.GenericMetadata):
def __init__(self):
generic.GenericMetadata.__init__(self)
self.name = 'XBMC'
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for an XBMC-style tvshow.nfo and
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
show_ID = show_obj.tvdbid
t = tvdb_api.Tvdb(actors=True, **sickbeard.TVDB_API_PARMS)
tv_node = etree.Element("tvshow")
for ns in XML_NSMAP.keys():
tv_node.set(ns, XML_NSMAP[ns])
try:
myShow = t[int(show_ID)]
except tvdb_exceptions.tvdb_shownotfound:
logger.log(u"Unable to find show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
raise
except tvdb_exceptions.tvdb_error:
logger.log(u"TVDB is down, can't use its data to add this show", logger.ERROR)
raise
# check for title and id
try:
if myShow["seriesname"] == None or myShow["seriesname"] == "" or myShow["id"] == None or myShow["id"] == "":
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
except tvdb_exceptions.tvdb_attributenotfound:
logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR)
return False
title = etree.SubElement(tv_node, "title")
if myShow["seriesname"] != None:
title.text = myShow["seriesname"]
rating = etree.SubElement(tv_node, "rating")
if myShow["rating"] != None:
rating.text = myShow["rating"]
plot = etree.SubElement(tv_node, "plot")
if myShow["overview"] != None:
plot.text = myShow["overview"]
episodeguide = etree.SubElement(tv_node, "episodeguide")
episodeguideurl = etree.SubElement( episodeguide, "url")
episodeguideurl2 = etree.SubElement(tv_node, "episodeguideurl")
if myShow["id"] != None:
showurl = sickbeard.TVDB_BASE_URL + '/series/' + myShow["id"] + '/all/en.zip'
episodeguideurl.text = showurl
episodeguideurl2.text = showurl
mpaa = etree.SubElement(tv_node, "mpaa")
if myShow["contentrating"] != None:
mpaa.text = myShow["contentrating"]
tvdbid = etree.SubElement(tv_node, "tvdbid")
if myShow["id"] != None:
tvdbid.text = myShow["id"]
genre = etree.SubElement(tv_node, "genre")
if myShow["genre"] != None:
genre.text = " / ".join([x for x in myShow["genre"].split('|') if x])
premiered = etree.SubElement(tv_node, "premiered")
if myShow["firstaired"] != None:
premiered.text = myShow["firstaired"]
studio = etree.SubElement(tv_node, "studio")
if myShow["network"] != None:
studio.text = myShow["network"]
for actor in myShow['_actors']:
cur_actor = etree.SubElement(tv_node, "actor")
cur_actor_name = etree.SubElement( cur_actor, "name")
cur_actor_name.text = actor['name']
cur_actor_role = etree.SubElement( cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement( cur_actor, "thumb")
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text != None:
cur_actor_thumb.text = cur_actor_thumb_text
# Make it purdy
helpers.indentXML(tv_node)
data = etree.ElementTree(tv_node)
return data
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for an XBMC-style episode.nfo and
returns the resulting data object.
show_obj: a TVEpisode instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
try:
t = tvdb_api.Tvdb(actors=True, **sickbeard.TVDB_API_PARMS)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - "+str(e).decode('utf-8'), logger.ERROR)
return
if len(eps_to_write) > 1:
rootNode = etree.Element( "xbmcmultiepisode" )
else:
rootNode = etree.Element( "episodedetails" )
# Set our namespace correctly
for ns in XML_NSMAP.keys():
rootNode.set(ns, XML_NSMAP[ns])
# write an NFO containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
logger.log(u"Creating metadata for episode "+str(ep_obj.season)+"x"+str(ep_obj.episode), logger.DEBUG)
if len(eps_to_write) > 1:
episode = etree.SubElement( rootNode, "episodedetails" )
else:
episode = rootNode
title = etree.SubElement( episode, "title" )
if curEpToWrite.name != None:
title.text = curEpToWrite.name
season = etree.SubElement( episode, "season" )
season.text = str(curEpToWrite.season)
episodenum = etree.SubElement( episode, "episode" )
episodenum.text = str(curEpToWrite.episode)
aired = etree.SubElement( episode, "aired" )
if curEpToWrite.airdate != datetime.date.fromordinal(1):
aired.text = str(curEpToWrite.airdate)
else:
aired.text = ''
plot = etree.SubElement( episode, "plot" )
if curEpToWrite.description != None:
plot.text = curEpToWrite.description
displayseason = etree.SubElement( episode, "displayseason" )
if myEp.has_key('airsbefore_season'):
displayseason_text = myEp['airsbefore_season']
if displayseason_text != None:
displayseason.text = displayseason_text
displayepisode = etree.SubElement( episode, "displayepisode" )
if myEp.has_key('airsbefore_episode'):
displayepisode_text = myEp['airsbefore_episode']
if displayepisode_text != None:
displayepisode.text = displayepisode_text
thumb = etree.SubElement( episode, "thumb" )
thumb_text = myEp['filename']
if thumb_text != None:
thumb.text = thumb_text
watched = etree.SubElement( episode, "watched" )
watched.text = 'false'
credits = etree.SubElement( episode, "credits" )
credits_text = myEp['writer']
if credits_text != None:
credits.text = credits_text
director = etree.SubElement( episode, "director" )
director_text = myEp['director']
if director_text != None:
director.text = director_text
rating = etree.SubElement( episode, "rating" )
rating_text = myEp['rating']
if rating_text != None:
rating.text = rating_text
gueststar_text = myEp['gueststars']
if gueststar_text != None:
for actor in gueststar_text.split('|'):
cur_actor = etree.SubElement( episode, "actor" )
cur_actor_name = etree.SubElement(
cur_actor, "name"
)
cur_actor_name.text = actor
for actor in myShow['_actors']:
cur_actor = etree.SubElement( episode, "actor" )
cur_actor_name = etree.SubElement( cur_actor, "name" )
cur_actor_name.text = actor['name']
cur_actor_role = etree.SubElement( cur_actor, "role" )
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
cur_actor_thumb = etree.SubElement( cur_actor, "thumb" )
cur_actor_thumb_text = actor['image']
if cur_actor_thumb_text != None:
cur_actor_thumb.text = cur_actor_thumb_text
#
# Make it purdy
helpers.indentXML( rootNode )
data = etree.ElementTree( rootNode )
return data
# present a standard "interface" from the module
metadata_class = XBMCMetadata
| gpl-3.0 | -8,118,284,363,383,384,000 | 36.560714 | 177 | 0.560248 | false | 4.028731 | false | false | false |
fxia22/ASM_xf | PythonD/site_python/numarray/codegenerator/bytescode.py | 2 | 18281 | """This module generates the "bytes" module which contains various
byte munging C functions: copying, alignment, byteswapping, choosing,
putting, taking.
WARNING: This module exists solely as a mechanism to generate a
portion of numarray and is not intended to provide any
post-installation functionality.
"""
from basecode import CodeGenerator, template, _HEADER
BYTES_HEADER = _HEADER + \
'''
#include <assert.h>
#define NA_ACOPYN(i, o) memcpy(o, i, N)
/* The following is used to copy nbytes of data for each element. **
** As such it can be used to align any sort of data provided the **
** output pointers used are aligned */
static int copyNbytes(long dim, long nbytes, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i, j;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
for (j=0; j<nbytes; j++) {
*tout++ = *tin++;
}
tin = tin + inbstrides[dim] - nbytes;
tout = tout + outbstrides[dim]- nbytes;
}
}
else {
for (i=0; i<niters[dim]; i++) {
copyNbytes(dim-1, nbytes, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(copyNbytes, !CHECK_ALIGN, -1, -1);
/* Copy a data buffer to a new string
**
** Arguments:
**
** Tuple of iteration values for each dimension of input array.
** Input buffer object.
** Input byte offset.
** Tuple of input byte strides.
** Size of input data item in bytes.
**
** Returns Python string.
*/
static PyObject *copyToString(PyObject *self, PyObject *args) {
PyObject *inbuffObj;
PyObject *nitersObj, *inbstridesObj;
PyObject *otemp, *outstring;
long ltemp;
int nniters, ninbstrides, nargs;
long nbytes;
maybelong niters[MAXDIM], inbstrides[MAXDIM], outbstrides[MAXDIM];
void *inbuffer, *outbuffer;
long i, inbsize, outbsize, nelements=1, inboffset;
nargs = PyObject_Length(args);
if (!PyArg_ParseTuple(args, "OOlOl",
&nitersObj, &inbuffObj, &inboffset, &inbstridesObj, &nbytes))
return NULL;
if (!PySequence_Check(nitersObj))
return PyErr_Format(PyExc_TypeError,
"copyToString: invalid shape object");
if (!PySequence_Check(inbstridesObj))
return PyErr_Format(PyExc_TypeError,
"copyToString: invalid strides object");
nniters = PyObject_Length(nitersObj);
ninbstrides = PyObject_Length(inbstridesObj);
if (nniters != ninbstrides)
return PyErr_Format(PyExc_ValueError,
"copyToString: shape & strides don't match");
for (i=nniters-1; i>=0; i--) {
otemp = PySequence_GetItem(nitersObj, i);
if (PyInt_Check(otemp))
ltemp = PyInt_AsLong(otemp);
else if (PyLong_Check(otemp))
ltemp = PyLong_AsLong(otemp);
else
return PyErr_Format(PyExc_TypeError,
"copyToString: non-integer shape element");
nelements *= ltemp;
niters[nniters-i-1] = ltemp;
Py_DECREF(otemp);
otemp = PySequence_GetItem(inbstridesObj, i);
if (PyInt_Check(otemp))
inbstrides[nniters-i-1] = PyInt_AsLong(otemp);
else if (PyLong_Check(otemp))
inbstrides[nniters-i-1] = PyLong_AsLong(otemp);
else
return PyErr_Format(PyExc_TypeError,
"copyToString: non-integer stride element");
Py_DECREF(otemp);
}
if (!nelements)
return PyString_FromStringAndSize("", 0);
outbstrides[0] = nbytes;
for (i=1; i<nniters; i++) {
outbstrides[i] = outbstrides[i-1]*niters[i-1];
}
outbsize = outbstrides[nniters-1]*niters[nniters-1];
outstring = PyString_FromStringAndSize(NULL, outbsize);
if (!outstring)
return NULL;
outbuffer = (void *) PyString_AsString(outstring);
if ((inbsize = NA_getBufferPtrAndSize(inbuffObj, 1, &inbuffer)) < 0)
return PyErr_Format(PyExc_TypeError,
"copyToString: Problem with array buffer");
if (NA_checkOneStriding("copyToString", nniters, niters,
inboffset, inbstrides, inbsize, nbytes, 0) ||
NA_checkOneStriding("copyToString", nniters, niters,
0, outbstrides, outbsize, nbytes, 0))
return NULL;
BEGIN_THREADS
copyNbytes(nniters-1, nbytes, niters,
inbuffer, inboffset, inbstrides, outbuffer, 0, outbstrides);
END_THREADS
return outstring;
}
/* chooseXbytes functions are called as uFuncs... */
enum CLIP_MODE {
CLIPPED,
WRAPPED,
RAISE
};
#define wrap(i, max) \
while(i < 0) \
i += max; \
while(i >= max) \
i -= max;
static int takeNbytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, N;
maybelong *scatteredstrides, *scatteredshape, **indices;
char *gathered, *scattered;
maybelong nindices = ninargs-4, outi = ninargs+noutargs-1;
if (NA_checkIo("takeNbytes", 4, 1, MIN(ninargs, 4), noutargs))
return -1;
if (nindices == 0)
return 0;
if (NA_checkOneCBuffer("takeNbytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("takeNbytes", nindices, buffers[2], bsizes[2], sizeof(maybelong)))
return -1;
else {
scatteredstrides = (maybelong *) buffers[2];
}
if (NA_checkOneCBuffer("takeNbytes", nindices, buffers[3], bsizes[3], sizeof(maybelong)))
return -1;
else {
scatteredshape = (maybelong *) buffers[3];
}
if (NA_checkOneStriding("takeNBytes", nindices, scatteredshape, 0, scatteredstrides, bsizes[1], N, 0))
return -1;
else
scattered = (char *) buffers[1];
for(i=4; i<nindices; i++)
if (NA_checkOneCBuffer("takeNbytes", niter, buffers[i], bsizes[i], sizeof(maybelong)))
return -1;
indices = (maybelong **) &buffers[4];
if (NA_checkOneCBuffer("takeNbytes", niter*N, buffers[outi], bsizes[outi], 1))
return -1;
else
gathered = (char *) buffers[ninargs+noutargs-1];
switch( cMode )
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
wrap(k, scatteredshape[j]);
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
case CLIPPED:
default:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k = 0;
else if (k >= scatteredshape[j])
k = scatteredshape[j]-1;
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k += scatteredshape[j];
if (k >= scatteredshape[j]) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
index += scatteredstrides[j]*k;
}
memcpy( &gathered[i*N], scattered+index, N);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(takeNbytes, CFUNC_UFUNC);
static int putNbytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, N;
maybelong *scatteredstrides, *scatteredshape, **indices;
char *gathered, *scattered;
long nindices = ninargs-4, outi = ninargs+noutargs-1;
if (nindices == 0)
return 0;
if (NA_checkIo("putNbytes", 4, 1, MIN(ninargs, 4), noutargs))
return -1;
if (NA_checkOneCBuffer("putNbytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("putNbytes", niter*N, buffers[1], bsizes[1], 1))
return -1;
else
gathered = (char *) buffers[1];
if (NA_checkOneCBuffer("putNbytes", nindices, buffers[2], bsizes[2], sizeof(maybelong)))
return -1;
else {
scatteredstrides = (maybelong *) buffers[2];
}
if (NA_checkOneCBuffer("putNbytes", nindices, buffers[3], bsizes[3], sizeof(maybelong)))
return -1;
else {
scatteredshape = (maybelong *) buffers[3];
}
for(i=4; i<nindices; i++)
if (NA_checkOneCBuffer("putNbytes", niter, buffers[i], bsizes[i], sizeof(maybelong)))
return -1;
indices = (maybelong **) &buffers[4];
if (NA_checkOneStriding("putNBytes", nindices, scatteredshape, 0, scatteredstrides, bsizes[outi], N, 0))
return -1;
else
scattered = (char *) buffers[outi];
switch( cMode )
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
wrap(k, scatteredshape[j]);
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
case CLIPPED:
default:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k = 0;
else if (k >= scatteredshape[j])
k = scatteredshape[j]-1;
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j, index;
for(j=index=0; j<nindices; j++)
{
maybelong k = indices[j][i];
if (k < 0)
k += scatteredshape[j];
if (k >= scatteredshape[j]) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
index += scatteredstrides[j]*k;
}
memcpy( scattered+index, &gathered[i*N], N);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(putNbytes, CFUNC_UFUNC);
'''
COPY_TEMPLATE = \
'''
/*******************************************
* *
* These copy data to a contiguous buffer. *
* They do not handle non-aligned data. *
* Offsets and Strides are in byte units *
* *
*******************************************/
static int copy<size>bytes(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
NA_ACOPY<size>(tin, tout);
tin += inbstrides[dim];
tout += outbstrides[dim];
}
}
else {
for (i=0; i<niters[dim]; i++) {
copy<size>bytes(dim-1, dummy, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(copy<size>bytes, CHECK_ALIGN, <size>, <size>);
'''
ALIGN_TEMPLATE = \
'''
static int align<size>bytes(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
return copyNbytes(dim, <size>, niters, input, inboffset, inbstrides,
output, outboffset, outbstrides);
}
STRIDING_DESCR2(align<size>bytes, !CHECK_ALIGN, <size>, <size>);
'''
BYTESWAP_TEMPLATE = \
'''
/******* byteswap *****/
static int byteswap<sizename>(long dim, long dummy, maybelong *niters,
void *input, long inboffset, maybelong *inbstrides,
void *output, long outboffset, maybelong *outbstrides) {
long i;
char *tin = (char *) input + inboffset;
char *tout = (char *) output + outboffset;
if (dim == 0) {
for (i=0; i<niters[dim]; i++) {
char t[<size>];
NA_COPY<size>(tin, t);
<swapkind><size>(t, tout);
tin += inbstrides[dim];
tout += outbstrides[dim];
}
}
else {
for (i=0; i<niters[dim]; i++) {
byteswap<sizename>(dim-1, dummy, niters,
input, inboffset + i*inbstrides[dim], inbstrides,
output, outboffset + i*outbstrides[dim], outbstrides);
}
}
return 0;
}
STRIDING_DESCR2(byteswap<sizename>, !CHECK_ALIGN, <size>, <size>);
'''
CHOOSE_TEMPLATE = \
'''
static int choose<size>bytes(long niter, long ninargs, long noutargs,
void **buffers, long *bsizes)
{
maybelong i, cMode, maxP, N, *selector;
char **population, *output;
int outi = ninargs + noutargs - 1;
if (NA_checkIo("choose<size>bytes", 2, 1, MIN(ninargs,2), noutargs))
return -1;
if (NA_checkOneCBuffer("choose<size>bytes", 2, buffers[0], bsizes[0], sizeof(maybelong)))
return -1;
else {
cMode = ((maybelong *) buffers[0])[0];
N = ((maybelong *) buffers[0])[1];
}
if (NA_checkOneCBuffer("choose<size>bytes", niter, buffers[1], bsizes[1],
sizeof(maybelong)))
return -1;
else
selector = (maybelong *) buffers[1];
if (ninargs-2 == 0)
return 0;
else
maxP = ninargs-2;
for(i=2; i<ninargs; i++)
if (NA_checkOneCBuffer("choose<size>bytes", niter,
buffers[i], bsizes[i], <size>))
return -1;
population = (char **) &buffers[2];
if (NA_checkOneCBuffer("choose<size>bytes", niter,
buffers[outi], bsizes[outi], <size>))
return -1;
else
output = (char *) buffers[outi];
if (maxP == 0)
return 0;
switch(cMode)
{
case WRAPPED:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
wrap(j, maxP);
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
default:
case CLIPPED:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
if (j < 0)
j = 0;
else if (j >= maxP)
j = maxP-1;
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
case RAISE:
for(i=0; i<niter; i++)
{
maybelong j = selector[i];
if ((j < 0) || (j >= maxP)) {
PyErr_Format(PyExc_IndexError, "Index out of range");
return -1;
}
NA_ACOPY<size>(&population[j][i*<size>], &output[i*<size>]);
}
break;
}
return 0;
}
SELF_CHECKED_CFUNC_DESCR(choose<size>bytes, CFUNC_UFUNC);
'''
BYTES_TEMPLATE = ( COPY_TEMPLATE +
ALIGN_TEMPLATE +
BYTESWAP_TEMPLATE +
CHOOSE_TEMPLATE )
# ============================================================================
# IMPORTANT: no <>-sugared strings below this point
# translate <var> --> %(var)s in templates seen *so far*
template.sugar_dict(globals())
# ============================================================================
bytesconfig = [
["1", "Int8"],
["2", "Int16"],
["4", "Int32"],
["8", "Float64"],
["16", "Complex64"],
];
class BytesParams:
def __init__(self, size, type):
self.size = size
self.sizename = str(size) + "bytes"
self.typename = type
self.swapkind = "NA_SWAP"
NBytesParams = BytesParams("N","AnyType")
class ComplexBytesParams:
def __init__(self, size, type):
self.size = size
self.sizename = type
self.typename = type
self.swapkind = "NA_COMPLEX_SWAP"
Complex32BytesCfg = ComplexBytesParams(8, "Complex32")
Complex64BytesCfg = ComplexBytesParams(16, "Complex64")
class BytesCodeGenerator(CodeGenerator):
def __init__(self, *components):
CodeGenerator.__init__(self, *components)
self.module = "_bytes"
self.qualified_module = "numarray._bytes"
def gen_body(self):
for cfg in bytesconfig:
t = apply(BytesParams, cfg)
self.codelist.append((self.separator + BYTES_TEMPLATE) %
t.__dict__)
self.addcfunc("copy"+ t.sizename)
self.addcfunc("byteswap"+t.sizename)
self.addcfunc("align"+t.sizename)
self.addcfunc("choose"+t.sizename)
self.codelist.append((self.separator + CHOOSE_TEMPLATE) %
NBytesParams.__dict__)
self.addcfunc("chooseNbytes")
self.addcfunc("copyNbytes")
self.addcfunc("putNbytes")
self.addcfunc("takeNbytes")
# Hack in the type based (not size based) methods for complex
self.codelist.append((self.separator + BYTESWAP_TEMPLATE) %
Complex32BytesCfg.__dict__)
self.addcfunc("byteswapComplex32" )
self.codelist.append((self.separator + BYTESWAP_TEMPLATE) %
Complex64BytesCfg.__dict__)
self.addcfunc("byteswapComplex64" )
self.addmethod("copyToString")
generate_bytes_code = BytesCodeGenerator(BYTES_HEADER)
| gpl-2.0 | -333,324,760,290,137,660 | 28.919804 | 106 | 0.542312 | false | 3.231 | false | false | false |
NorbertAgoston3pg/PythonLearning | DemoProject/src/input_output_menu.py | 1 | 1858 | import input_output
def launch_input_output_menu():
print("I/O Section - Enter the exercise number you want to run")
selection = 0
while selection != 7:
print("exercise #1")
print("exercise #2")
print("exercise #3")
print("exercise #4")
print("exercise #5")
print("exercise #6")
print("exit #7")
selection = int(input("Insert Selection = "))
if selection == 1:
print("Extract users form a file")
for key, value in sorted(input_output.extracted_users.items()):
print('{0} {1}'.format(key, value))
elif selection == 2:
print("Apply word count on a file")
file_info = input_output.wc('passwd')
print("{0} characters {1} words {2} lines and {3} "
"unique words in file".format(file_info[0], file_info[1],
file_info[2], file_info[3]))
elif selection == 3:
print("Output users to a file")
file_users = input_output.extract_users('passwd')
input_output.output_users_to_file(file_users, 'output.csv')
elif selection == 4:
print("Read text file")
print(input_output.read_text("text.txt"))
elif selection == 5:
some_words = input_output.read_text("text.txt")
words_statistics = input_output.word_count(some_words)
print(words_statistics)
elif selection == 6:
some_words = input_output.read_text("text.txt")
words_statistics = input_output.word_count(some_words)
word = input_output.word_with_max_occurence(words_statistics)
print("Word with most occurences = " + word)
elif selection == 7:
print("exit")
launch_input_output_menu()
| mit | 9,058,435,035,290,472,000 | 37.708333 | 76 | 0.548977 | false | 3.995699 | false | false | false |
onebeartoe/3D-Modeling | openscad/models/src/main/openscad/external-resources/stl-to-scad/stl2scad-python-2.py | 1 | 4179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" STL to SCAD converter.
This code is based on Riham javascript code.
See http://www.thingiverse.com/thing:62666
Ascii STL file:
solid _40x10
facet normal 0.000000e+000 0.000000e+000 1.000000e+000
outer loop
vertex 1.286803e+001 2.957990e+001 1.200000e+001
vertex 1.173648e+001 2.984808e+001 1.200000e+001
vertex 1.115715e+001 2.953001e+001 1.200000e+001
endloop
endfacet
facet normal 0.000000e+000 0.000000e+000 1.000000e+000
outer loop
vertex 1.115715e+001 2.953001e+001 1.200000e+001
vertex 1.173648e+001 2.984808e+001 1.200000e+001
vertex 1.058145e+001 2.998308e+001 1.200000e+001
endloop
endfacet
...
ensolid
Binary STL file:
"""
import re
import sys
import struct
import os.path
USE_FACES = True # set to False for OpenSCAD version < 2014.03
def parseAscii(inputFile):
"""
"""
inputFile.seek(0)
inputStr = inputFile.read()
modules = []
solidName = None
vertices = None
faces = None
face = None
for solidStr in re.findall(r"solid\s(.*?)endsolid", inputStr, re.S):
solidName = re.match(r"^(.*)$", solidStr, re.M).group(0)
print "Processing object %s..." % solidName
vertices = []
faces = []
for facetStr in re.findall(r"facet\s(.*?)endfacet", solidStr, re.S):
for outerLoopStr in re.findall(r"outer\sloop(.*?)endloop", facetStr, re.S):
face = []
for vertexStr in re.findall(r"vertex\s(.*)$", outerLoopStr, re.M):
vertex = [float(coord) for coord in vertexStr.split()]
try:
face.append(vertices.index(vertex))
except ValueError:
vertices.append(str(vertex))
face.append(len(vertices) - 1)
faces.append(str(face))
modules.append((solidName, vertices, faces))
return modules
def parseBinary(inputFile, solidName="stl2scad"):
"""
"""
# Skip header
inputFile.seek(80)
nbTriangles = struct.unpack("<I", inputFile.read(4))[0]
print "found %d faces" % nbTriangles
modules = []
vertices = []
faces = []
face = None
# Iterate over faces
for i in range(nbTriangles):
face = []
# Skip normal vector (3x uint32)
inputFile.seek(3*4, 1)
# Iterate over vertices
for j in range(3):
vertex = struct.unpack("<fff", inputFile.read(3*4))
#print repr(s), repr(vertex)
try:
face.append(vertices.index(vertex))
except ValueError:
vertices.append(str(list(vertex)))
face.append(len(vertices) - 1)
faces.append(str(face))
# Skip byte count
inputFile.seek(2, 1)
modules.append((solidName, vertices, faces))
return modules
def convert(outputFile, modules):
"""
"""
for solidName, vertices, faces in modules:
points_ = ",\n\t\t\t".join(vertices)
faces_ = ",\n\t\t\t".join(faces)
if USE_FACES:
module = "module %s() {\n\tpolyhedron(\n\t\tpoints=[\n\t\t\t%s\n\t\t],\n\t\tfaces=[\n\t\t\t%s\n\t\t]\n\t);\n}\n\n\n%s();\n" % (solidName, points_, faces_, solidName)
else:
module = "module %s() {\n\tpolyhedron(\n\t\tpoints=[\n\t\t\t%s\n\t\t],\n\t\ttriangles=[\n\t\t\t%s\n\t\t]\n\t);\n}\n\n\n%s();\n" % (solidName, points_, faces_, solidName)
outputFile.write(module)
outputFile.close()
def main():
inputFileName = sys.argv[1]
inputFile = file(inputFileName)
# Check if ascii or binary
if inputFile.read(5) == "solid":
print "ascii file"
modules = parseAscii(inputFile)
else:
print "binary file"
modules = parseBinary(inputFile)
outputFileName = "%s%s%s" % (os.path.splitext(inputFileName)[0], os.path.extsep, "scad")
outputFile = file(outputFileName, "w")
convert(outputFile, modules)
print "%s saved" % outputFileName
if __name__ == "__main__":
main()
| lgpl-2.1 | 4,506,072,384,435,413,000 | 26.313725 | 181 | 0.572146 | false | 3.272514 | false | false | false |
unrza72/qplotutils | qplotutils/player.py | 1 | 4353 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
User controls for interactivity.
"""
import logging
import numpy as np
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from qplotutils import CONFIG
from .ui.playback import Ui_PlaybackControl
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "philipp.baust@gmail.com"
__status__ = "Development"
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
class PlaybackWidget(QWidget):
""" Playback control widget with the following button
* toggle playback / pause
* advance one step forward
* advance one step back
The current timestamp is inidcated by slider and a line edit.
Models / Visualization that choose to be controlled through the playback widget should
connect to :meth:`qplotutils.player.PlaybackWidget.timestamp_changed`.
"""
#: emited whenever the timestamp is changed.
timestamp_changed = Signal(int, float)
def __init__(self, parent=None):
super(PlaybackWidget, self).__init__(parent)
self.ui = Ui_PlaybackControl()
self.ui.setupUi(self)
self.__is_playing = False
self.__timestamps = None
self.__last_index = None
self.ui.button_play_pause.clicked.connect(self.play_pause)
self.ui.button_back.clicked.connect(self.step_back)
self.ui.button_next.clicked.connect(self.step_forward)
self.ui.slider_index.valueChanged.connect(self._slider_value_changed)
self.ui.slider_index.sliderPressed.connect(self._slider_pressed)
self.ui.edit_timestamp.textEdited.connect(self.jump_to_timestamp)
if CONFIG.debug:
self.timestamp_changed.connect(self.debug_slider)
def jump_to_timestamp(self, text):
try:
_log.debug(text)
ts = float(text)
idx, = np.where(self.timestamps == ts)
self.ui.slider_index.setValue(idx[0])
except Exception as ex:
_log.info(
"Could not set timestamp. Format no recognized or out of interval."
)
_log.debug("Exception %s", ex)
def debug_slider(self, index, timestamp):
_log.debug("{}: {}".format(index, timestamp))
@property
def timestamps(self):
return self.__timestamps
@timestamps.setter
def timestamps(self, value):
self.__timestamps = value
self.__last_index = len(value)
self.ui.slider_index.setMinimum(0)
self.ui.slider_index.setMaximum(self.__last_index)
self.ui.slider_index.setValue(0)
def _slider_pressed(self):
self.pause()
def _slider_value_changed(self, value):
ts = self.timestamps[value]
self.ui.edit_timestamp.setText("{}".format(ts))
self.timestamp_changed.emit(value, ts)
def play_pause(self):
if self.__is_playing:
self.pause()
else:
self.play()
def pause(self):
if not self.__is_playing:
return
self.ui.button_play_pause.setIcon(
QIcon(":/player/icons/media-playback-start.svg")
)
self.__is_playing = False
def play(self):
if self.__is_playing:
return
self.ui.button_play_pause.setIcon(
QIcon(":/player/icons/media-playback-pause.svg")
)
self.__is_playing = True
self.advance()
def step_back(self):
self.pause()
self.advance(reverse=True)
def step_forward(self):
self.pause()
self.advance()
def advance(self, reverse=False):
if reverse:
next_index = self.ui.slider_index.value() - 1
else:
next_index = self.ui.slider_index.value() + 1
if not 0 < next_index < self.__last_index:
self.pause()
return
self.ui.slider_index.setValue(next_index)
if self.__is_playing:
QTimer.singleShot(10, self.advance)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
qapp = QApplication([])
CONFIG.debug = True
p = PlaybackWidget()
p.show()
p.timestamps = np.arange(0, 1000, 12) * 141000
qapp.exec_()
| mit | 3,355,007,466,227,761,700 | 26.726115 | 90 | 0.606938 | false | 3.710997 | false | false | false |
gatsinski/kindergarten-management-system | kindergarten_management_system/kms/contrib/kindergartens/migrations/0002_auto_20170327_1844.py | 1 | 1460 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kindergartens', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=254)),
('description', models.CharField(verbose_name='Description', max_length=1000)),
('kindergarten', models.ForeignKey(to='kindergartens.Kindergarten')),
],
options={
'verbose_name': 'Group',
'verbose_name_plural': 'Groups',
},
),
migrations.CreateModel(
name='GroupType',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=254)),
],
options={
'verbose_name': 'Group type',
'verbose_name_plural': 'Group types',
},
),
migrations.AddField(
model_name='group',
name='type',
field=models.ForeignKey(to='kindergartens.GroupType'),
),
]
| gpl-3.0 | 5,348,770,137,331,171,000 | 32.953488 | 114 | 0.526712 | false | 4.464832 | false | false | false |
kawamon/hue | desktop/core/ext-py/jaeger-client-4.0.0/jaeger_client/ioloop_util.py | 2 | 2199 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from tornado import gen
from tornado.concurrent import Future
def submit(fn, io_loop, *args, **kwargs):
"""Submit Tornado Coroutine to IOLoop.current().
:param fn: Tornado Coroutine to execute
:param io_loop: Tornado IOLoop where to schedule the coroutine
:param args: Args to pass to coroutine
:param kwargs: Kwargs to pass to coroutine
:returns tornado.concurrent.Future: future result of coroutine
"""
future = Future()
def execute():
"""Execute fn on the IOLoop."""
try:
result = gen.maybe_future(fn(*args, **kwargs))
except Exception:
# The function we ran didn't return a future and instead raised
# an exception. Let's pretend that it returned this dummy
# future with our stack trace.
f = gen.Future()
f.set_exc_info(sys.exc_info())
on_done(f)
else:
result.add_done_callback(on_done)
def on_done(tornado_future):
"""
Set tornado.Future results to the concurrent.Future.
:param tornado_future:
"""
exception = tornado_future.exception()
if not exception:
future.set_result(tornado_future.result())
else:
future.set_exception(exception)
io_loop.add_callback(execute)
return future
def future_result(result):
future = Future()
future.set_result(result)
return future
def future_exception(exception):
future = Future()
future.set_exception(exception)
return future
| apache-2.0 | 4,988,481,130,676,824,000 | 29.541667 | 75 | 0.663029 | false | 4.204589 | false | false | false |
jphnoel/udata | udata/core/discussions/models.py | 1 | 1281 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import datetime
from udata.models import db
log = logging.getLogger(__name__)
class Message(db.EmbeddedDocument):
content = db.StringField(required=True)
posted_on = db.DateTimeField(default=datetime.now, required=True)
posted_by = db.ReferenceField('User')
class Discussion(db.Document):
user = db.ReferenceField('User')
subject = db.GenericReferenceField()
title = db.StringField(required=True)
discussion = db.ListField(db.EmbeddedDocumentField(Message))
created = db.DateTimeField(default=datetime.now, required=True)
closed = db.DateTimeField()
closed_by = db.ReferenceField('User')
meta = {
'indexes': [
'user',
'subject',
'created'
],
'ordering': ['created'],
}
def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion)
@property
def external_url(self):
return self.subject.url_for(
_anchor='discussion-{id}'.format(id=self.id),
_external=True)
| agpl-3.0 | 290,421,496,397,661,800 | 26.255319 | 78 | 0.640125 | false | 4.053797 | false | false | false |
Jaapp-/cloudomate | cloudomate/test/test_cmdline.py | 1 | 5478 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
from argparse import Namespace
from future import standard_library
from mock.mock import MagicMock
import cloudomate.cmdline as cmdline
from cloudomate.hoster.vpn.azirevpn import AzireVpn
from cloudomate.hoster.vps.linevast import LineVast
from cloudomate.hoster.vps.vps_hoster import VpsOption
standard_library.install_aliases()
class TestCmdLine(unittest.TestCase):
def setUp(self):
self.settings_file = os.path.join(os.path.dirname(__file__), 'resources/test_settings.cfg')
self.vps_options_real = LineVast.get_options
self.vps_purchase_real = LineVast.purchase
def tearDown(self):
LineVast.get_options = self.vps_options_real
LineVast.purchase = self.vps_purchase_real
def test_execute_vps_list(self):
command = ["vps", "list"]
cmdline.execute(command)
def test_execute_vpn_list(self):
command = ["vpn", "list"]
cmdline.execute(command)
def test_execute_vps_options(self):
mock_method = self._mock_vps_options()
command = ["vps", "options", "linevast"]
cmdline.providers["vps"]["linevast"].configurations = []
cmdline.execute(command)
mock_method.assert_called_once()
self._restore_vps_options()
def test_execute_vpn_options(self):
mock_method = self._mock_vpn_options()
command = ["vpn", "options", "azirevpn"]
cmdline.providers["vpn"]["azirevpn"].configurations = []
cmdline.execute(command)
mock_method.assert_called_once()
self._restore_vpn_options()
def test_execute_vps_purchase(self):
self._mock_vps_options([self._create_option()])
purchase = LineVast.purchase
LineVast.purchase = MagicMock()
command = ["vps", "purchase", "linevast", "-f", "-c", self.settings_file, "-rp", "asdf", "0"]
cmdline.execute(command)
LineVast.purchase.assert_called_once()
LineVast.purchase = purchase
self._restore_vps_options()
@staticmethod
def _create_option():
return VpsOption(
name="Option name",
memory="Option ram",
cores="Option cpu",
storage="Option storage",
bandwidth="Option bandwidth",
price=12,
connection="Option connection",
purchase_url="Option url"
)
def test_execute_vps_purchase_verify_options_failure(self):
self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-f", "-c", self.settings_file, "1"]
self._check_exit_code(1, cmdline.execute, command)
self._restore_vps_options()
def test_execute_vps_purchase_unknown_provider(self):
command = ["vps", "purchase", "nonode", "-f", "-rp", "asdf", "1"]
self._check_exit_code(2, cmdline.execute, command)
def test_execute_vps_options_unknown_provider(self):
command = ["vps", "options", "nonode"]
self._check_exit_code(2, cmdline.execute, command)
def _check_exit_code(self, exit_code, method, args):
try:
method(args)
except SystemExit as e:
self.assertEqual(exit_code, e.code)
def test_execute_vps_options_no_provider(self):
command = ["vps", "options"]
self._check_exit_code(2, cmdline.execute, command)
def test_purchase_vps_unknown_provider(self):
args = Namespace()
args.provider = "sd"
args.type = "vps"
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_no_provider(self):
args = Namespace()
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_vps_bad_provider(self):
args = Namespace()
args.provider = False
args.type = "vps"
self._check_exit_code(2, cmdline.purchase, args)
def test_purchase_bad_type(self):
args = Namespace()
args.provider = "azirevpn"
args.type = False
self._check_exit_code(2, cmdline.purchase, args)
def test_execute_vps_purchase_high_id(self):
self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-c", self.settings_file, "-rp", "asdf", "1000"]
self._check_exit_code(1, cmdline.execute, command)
self._restore_vps_options()
def test_execute_vps_purchase_low_id(self):
mock = self._mock_vps_options()
command = ["vps", "purchase", "linevast", "-c", self.settings_file, "-rp", "asdf", "-1"]
self._check_exit_code(1, cmdline.execute, command)
mock.assert_called_once()
self._restore_vps_options()
def _mock_vps_options(self, items=None):
if items is None:
items = []
self.vps_options = LineVast.get_options
LineVast.get_options = MagicMock(return_value=items)
return LineVast.get_options
def _restore_vps_options(self):
LineVast.get_options = self.vps_options
def _mock_vpn_options(self, items=None):
if items is None:
items = []
self.vpn_options = AzireVpn.get_options
AzireVpn.get_options = MagicMock(return_value=items)
return AzireVpn.get_options
def _restore_vpn_options(self):
AzireVpn.get_options = self.vpn_options
if __name__ == '__main__':
unittest.main(exit=False)
| lgpl-3.0 | -5,257,675,227,843,671,000 | 33.45283 | 101 | 0.62322 | false | 3.573386 | true | false | false |
scragg0x/realms-wiki | realms/lib/flask_csrf_test_client.py | 2 | 2839 | # Source: https://gist.github.com/singingwolfboy/2fca1de64950d5dfed72
# Want to run your Flask tests with CSRF protections turned on, to make sure
# that CSRF works properly in production as well? Here's an excellent way
# to do it!
# First some imports. I'm assuming you're using Flask-WTF for CSRF protection.
import flask
from flask.testing import FlaskClient as BaseFlaskClient
from flask_wtf.csrf import generate_csrf
# Flask's assumptions about an incoming request don't quite match up with
# what the test client provides in terms of manipulating cookies, and the
# CSRF system depends on cookies working correctly. This little class is a
# fake request that forwards along requests to the test client for setting
# cookies.
class RequestShim(object):
"""
A fake request that proxies cookie-related methods to a Flask test client.
"""
def __init__(self, client):
self.client = client
def set_cookie(self, key, value='', *args, **kwargs):
"Set the cookie on the Flask test client."
server_name = flask.current_app.config["SERVER_NAME"] or "localhost"
return self.client.set_cookie(
server_name, key=key, value=value, *args, **kwargs
)
def delete_cookie(self, key, *args, **kwargs):
"Delete the cookie on the Flask test client."
server_name = flask.current_app.config["SERVER_NAME"] or "localhost"
return self.client.delete_cookie(
server_name, key=key, *args, **kwargs
)
# We're going to extend Flask's built-in test client class, so that it knows
# how to look up CSRF tokens for you!
class FlaskClient(BaseFlaskClient):
@property
def csrf_token(self):
# First, we'll wrap our request shim around the test client, so that
# it will work correctly when Flask asks it to set a cookie.
request = RequestShim(self)
# Next, we need to look up any cookies that might already exist on
# this test client, such as the secure cookie that powers `flask.session`,
# and make a test request context that has those cookies in it.
environ_overrides = {}
self.cookie_jar.inject_wsgi(environ_overrides)
with flask.current_app.test_request_context(
"/login", environ_overrides=environ_overrides,
):
# Now, we call Flask-WTF's method of generating a CSRF token...
csrf_token = generate_csrf()
# ...which also sets a value in `flask.session`, so we need to
# ask Flask to save that value to the cookie jar in the test
# client. This is where we actually use that request shim we made!
flask.current_app.save_session(flask.session, request)
# And finally, return that CSRF token we got from Flask-WTF.
return csrf_token
| gpl-2.0 | 8,793,346,743,163,914,000 | 45.540984 | 82 | 0.673124 | false | 4.090778 | true | false | false |