org_text
stringlengths 830
329k
| texts
sequence | scores
sequence | num_lines
int64 1
8.05k
| avg_score
float64 0
0.27
| check
bool 1
class |
---|---|---|---|---|---|
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
import csv, codecs, cStringIO
from feedback2013.models import Subject, Student, Score, Feedback
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def write(self, data):
self.stream.write(data)
def simple_export_fb2013(request):
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=feedback2013.csv'
csvwriter = UnicodeWriter(response)
csvwriter.write(codecs.BOM_UTF8)
csvwriter.writerow([u'中文姓名',
u'英文姓名',
u'就读高中',
u'Email',
u'来澳前(中国)所读学校',
u'来澳前(中国)所学最高年级',
u'来澳年份',
u'最终ATAR成绩',
u'录取大学(校区)与专业',
u'意见建议',
u'反馈发布时间',
u'Unit3/4科目',
u'原始分',
u'加减分后',
u'是否为2013年所学,或为2012提前已考?',
u'备注',])
for item in Feedback.objects.all():
csvwriter.writerow([unicode(item.student.chinese_name),
unicode(item.student.english_name),
unicode(item.student.high_school),
unicode(item.student.email),
unicode(item.student.school_in_china),
unicode(item.student.education_in_china),
unicode(item.student.year_study_in_au),
unicode(item.student.final_atar_score),
unicode(item.student.uni_and_major),
unicode(item.comment),
unicode(item.created_date)])
for item2 in Score.objects.filter(student=item.student):
csvwriter.writerow([unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(),
unicode(item2.subject.name),
unicode(item2.study_score),
unicode(item2.scaled_score),
unicode('Yes' if item2.for_2012_2011 else 'No'),
unicode(item2.remark)])
return response | [
"from django.http import HttpResponse\n",
"from django.core.servers.basehttp import FileWrapper\n",
"import csv, codecs, cStringIO\n",
"\n",
"from feedback2013.models import Subject, Student, Score, Feedback\n",
"\n",
"\n",
"class UnicodeWriter:\n",
"\t\"\"\"\n",
"\tA CSV writer which will write rows to CSV file \"f\",\n",
"\twhich is encoded in the given encoding.\n",
"\t\"\"\"\n",
"\n",
"\tdef __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n",
"\t\t# Redirect output to a queue\n",
"\t\tself.queue = cStringIO.StringIO()\n",
"\t\tself.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n",
"\t\tself.stream = f\n",
"\t\tself.encoder = codecs.getincrementalencoder(encoding)()\n",
"\n",
"\tdef writerow(self, row):\n",
"\t\tself.writer.writerow([s.encode(\"utf-8\") for s in row])\n",
"\t\t# Fetch UTF-8 output from the queue ...\n",
"\t\tdata = self.queue.getvalue()\n",
"\t\tdata = data.decode(\"utf-8\")\n",
"\t\t# ... and reencode it into the target encoding\n",
"\t\tdata = self.encoder.encode(data)\n",
"\t\t# write to the target stream\n",
"\t\tself.stream.write(data)\n",
"\t\t# empty queue\n",
"\t\tself.queue.truncate(0)\n",
"\n",
"\tdef writerows(self, rows):\n",
"\t\tfor row in rows:\n",
"\t\t\tself.writerow(row)\n",
"\t\n",
"\tdef write(self, data):\n",
"\t\tself.stream.write(data)\n",
"\n",
"def simple_export_fb2013(request):\n",
"\tresponse = HttpResponse(mimetype='text/csv')\n",
"\tresponse['Content-Disposition'] = 'attachment; filename=feedback2013.csv'\n",
"\tcsvwriter = UnicodeWriter(response)\n",
"\tcsvwriter.write(codecs.BOM_UTF8)\n",
"\tcsvwriter.writerow([u'中文姓名', \n",
"\t\t\t\t\t\t\t\tu'英文姓名', \n",
"\t\t\t\t\t\t\t\tu'就读高中', \n",
"\t\t\t\t\t\t\t\tu'Email', \n",
"\t\t\t\t\t\t\t\tu'来澳前(中国)所读学校', \n",
"\t\t\t\t\t\t\t\tu'来澳前(中国)所学最高年级', \n",
"\t\t\t\t\t\t\t\tu'来澳年份', \n",
"\t\t\t\t\t\t\t\tu'最终ATAR成绩', \n",
"\t\t\t\t\t\t\t\tu'录取大学(校区)与专业', \n",
"\t\t\t\t\t\t\t\tu'意见建议',\n",
"\t\t\t\t\t\t\t\tu'反馈发布时间',\n",
"\t\t\t\t\t\t\t\tu'Unit3/4科目', \n",
"\t\t\t\t\t\t\t\tu'原始分', \n",
"\t\t\t\t\t\t\t\tu'加减分后', \n",
"\t\t\t\t\t\t\t\tu'是否为2013年所学,或为2012提前已考?', \n",
"\t\t\t\t\t\t\t\tu'备注',])\n",
"\tfor item in Feedback.objects.all():\n",
"\t\tcsvwriter.writerow([unicode(item.student.chinese_name), \n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.english_name),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.high_school),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.email),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.school_in_china),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.education_in_china),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.year_study_in_au),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.final_atar_score),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.student.uni_and_major),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.comment),\n",
"\t\t\t\t\t\t\t\t\tunicode(item.created_date)])\n",
"\t\tfor item2 in Score.objects.filter(student=item.student):\n",
"\t\t\tcsvwriter.writerow([unicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(item2.subject.name),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(item2.study_score),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(item2.scaled_score),\n",
"\t\t\t\t\t\t\t\t\t\tunicode('Yes' if item2.for_2012_2011 else 'No'),\n",
"\t\t\t\t\t\t\t\t\t\tunicode(item2.remark)])\n",
"\treturn response\t\t"
] | [
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0.2,
0.018867924528301886,
0.024390243902439025,
0.2,
0,
0.014492753623188406,
0.03225806451612903,
0.027777777777777776,
0.015625,
0.05555555555555555,
0.017241379310344827,
0,
0.038461538461538464,
0.017543859649122806,
0.023809523809523808,
0.03225806451612903,
0.03333333333333333,
0.02040816326530612,
0.02857142857142857,
0.03225806451612903,
0.038461538461538464,
0.0625,
0.04,
0,
0.03571428571428571,
0.05263157894736842,
0.045454545454545456,
1,
0.041666666666666664,
0.038461538461538464,
0,
0.02857142857142857,
0.021739130434782608,
0.013333333333333334,
0.02702702702702703,
0.029411764705882353,
0.06451612903225806,
0.16666666666666666,
0.16666666666666666,
0.15789473684210525,
0.12,
0.1111111111111111,
0.16666666666666666,
0.13636363636363635,
0.12,
0.11764705882352941,
0.10526315789473684,
0.13043478260869565,
0.17647058823529413,
0.16666666666666666,
0.08333333333333333,
0.17647058823529413,
0.02702702702702703,
0.03389830508474576,
0.044444444444444446,
0.022727272727272728,
0.02631578947368421,
0.020833333333333332,
0.0196078431372549,
0.02040816326530612,
0.02040816326530612,
0.021739130434782608,
0.03125,
0.02631578947368421,
0.01694915254237288,
0.029411764705882353,
0.09523809523809523,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.02564102564102564,
0.02631578947368421,
0.02564102564102564,
0.01694915254237288,
0.029411764705882353,
0.16666666666666666
] | 90 | 0.063146 | false |
#FLM: Component Dump
# Version 2.0
#
# Will look through a font and write out a text file that lists any glyph with a
# component(s), one glyph per line of the file. On each line, the script writes
# the glyph name, the width of the glyph, and then each component name and x, y
# offset for that compnent. These values are all semicolon seperated.
#
# Examples:
# Agrave;587.0;A;0;0;grave;70;0
# Aringacute;587.0;A;0;0;ring;155;139;acute;155;312
#
# This script was originally written in 2006 for John Hudson at Tiro Typeworks
#
# Version 2.0: Tested to work in RoboFont, license changed from GPL to MIT, and
# put on Github.
#
# ---------------------
# The MIT License (MIT)
#
# Copyright (c) 2015 Typefounding
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Imports
from robofab.world import CurrentFont
from robofab.interface.all.dialogs import PutFile, Message, ProgressBar
#Script
font = CurrentFont()
filePath = PutFile()
file = open(filePath, 'w')
tickCount = len(font)
bar = ProgressBar('Writing dump file', tickCount)
tick = 0
outList = []
for glyph in font:
bar.tick(tick)
tick = tick+1
if len(glyph.components) != 0:
output = glyph.name + ';' + str(glyph.width)
componentNumber = 0
while componentNumber < len(glyph.components):
x, y = glyph.components[componentNumber].offset
output = output + ';' + glyph.components[componentNumber].baseGlyph + ';' + str(x) + ';' + str(y)
componentNumber = componentNumber + 1
output = output + '\n'
outList.append((glyph.index, output))
outDictionary = dict(outList)
outKeys = outDictionary.keys()
outKeys.sort()
keyCount = 0
while keyCount < len(outKeys):
file.write(outDictionary[outKeys[keyCount]])
keyCount = keyCount + 1
bar.close()
file.close()
Message('Dump file written')
| [
"#FLM: Component Dump\n",
"# Version 2.0\n",
"#\n",
"# Will look through a font and write out a text file that lists any glyph with a\n",
"# component(s), one glyph per line of the file. On each line, the script writes\n",
"# the glyph name, the width of the glyph, and then each component name and x, y\n",
"# offset for that compnent. These values are all semicolon seperated.\n",
"#\n",
"# Examples:\n",
"# Agrave;587.0;A;0;0;grave;70;0\n",
"# Aringacute;587.0;A;0;0;ring;155;139;acute;155;312\n",
"#\n",
"# This script was originally written in 2006 for John Hudson at Tiro Typeworks\n",
"#\n",
"# Version 2.0: Tested to work in RoboFont, license changed from GPL to MIT, and\n",
"# put on Github.\n",
"#\n",
"# ---------------------\n",
"# The MIT License (MIT)\n",
"# \n",
"# Copyright (c) 2015 Typefounding\n",
"# \n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"Software\"), to deal\n",
"# in the Software without restriction, including without limitation the rights\n",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"# copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"# \n",
"# The above copyright notice and this permission notice shall be included in all\n",
"# copies or substantial portions of the Software.\n",
"# \n",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n",
"# SOFTWARE.\n",
"\n",
"\n",
"#Imports\n",
"from robofab.world import CurrentFont\n",
"from robofab.interface.all.dialogs import PutFile, Message, ProgressBar\n",
"\n",
"#Script\n",
"font = CurrentFont()\n",
"filePath = PutFile()\n",
"file = open(filePath, 'w')\n",
"tickCount = len(font)\n",
"bar = ProgressBar('Writing dump file', tickCount)\n",
"tick = 0\n",
"outList = []\n",
"for glyph in font:\n",
"\tbar.tick(tick)\n",
"\ttick = tick+1\n",
"\tif len(glyph.components) != 0:\n",
"\t\toutput = glyph.name + ';' + str(glyph.width)\n",
"\t\tcomponentNumber = 0\n",
"\t\twhile componentNumber < len(glyph.components):\n",
"\t\t\tx, y = glyph.components[componentNumber].offset\n",
"\t\t\toutput = output + ';' + glyph.components[componentNumber].baseGlyph + ';' + str(x) + ';' + str(y)\n",
"\t\t\tcomponentNumber = componentNumber + 1\n",
"\t\toutput = output + '\\n'\n",
"\t\toutList.append((glyph.index, output))\n",
"outDictionary = dict(outList)\n",
"outKeys = outDictionary.keys()\n",
"outKeys.sort()\n",
"keyCount = 0\n",
"while keyCount < len(outKeys):\n",
"\tfile.write(outDictionary[outKeys[keyCount]])\n",
"\tkeyCount = keyCount + 1\n",
"\n",
"bar.close()\n",
"file.close()\n",
"Message('Dump file written')\n"
] | [
0.047619047619047616,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.3333333333333333,
0,
0.3333333333333333,
0,
0,
0,
0,
0,
0,
0.3333333333333333,
0.012345679012345678,
0,
0.3333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.125,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0.06666666666666667,
0.03125,
0.02127659574468085,
0.045454545454545456,
0.02040816326530612,
0.0196078431372549,
0.019801980198019802,
0.024390243902439025,
0.04,
0.025,
0,
0,
0,
0,
0,
0.021739130434782608,
0.04,
0,
0,
0,
0
] | 76 | 0.027366 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem109.py
#
# Darts
# =====
# Published on Friday, 18th November 2005, 06:00 pm
#
# In the game of darts a player throws three darts at a target board which is
# split into twenty equal sized sections numbered one to twenty. The score
# of a dart is determined by the number of the region that the dart lands in. A
# dart landing outside the red/green outer ring scores zero. The black and
# cream regions inside this ring represent single scores. However, the
# red/green outer ring and middle ring score double and treble scores
# respectively. At the centre of the board are two concentric circles called
# the bull region, or bulls-eye. The outer bull is worth 25 points and the
# inner bull is a double, worth 50 points. There are many variations of rules
# but in the most popular game the players will begin with a score 301 or 501
# and the first player to reduce their running total to zero is a winner.
# However, it is normal to play a "doubles out" system, which means that the
# player must land a double (including the double bulls-eye at the centre of
# the board) on their final dart to win; any other dart that would reduce their
# running total to one or lower means the score for that set of three darts is
# "bust". When a player is able to finish on their current score it is called
# a "checkout" and the highest checkout is 170: T20 T20 D25 (two treble 20s and
# double bull). There are exactly eleven distinct ways to checkout on a score
# of 6: D3 D1 D2 S2 D2 D2 D1 S4 D1 S1 S1 D2 S1 T1 D1
# S1 S3 D1 D1 D1 D1 D1 S2 D1 S2 S2 D1 Note that D1 D2 is considered
# different to D2 D1 as they finish on different doubles. However, the
# combination S1 T1 D1 is considered the same as T1 S1 D1. In addition we shall
# not include misses in considering combinations; for example, D3 is the same
# as 0 D3 and 0 0 D3. Incredibly there are 42336 distinct ways of checking out
# in total. How many distinct ways can a player checkout with a score less than
# 100?
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem109.py\n",
"#\n",
"# Darts\n",
"# =====\n",
"# Published on Friday, 18th November 2005, 06:00 pm\n",
"#\n",
"# In the game of darts a player throws three darts at a target board which is\n",
"# split into twenty equal sized sections numbered one to twenty. The score\n",
"# of a dart is determined by the number of the region that the dart lands in. A\n",
"# dart landing outside the red/green outer ring scores zero. The black and\n",
"# cream regions inside this ring represent single scores. However, the\n",
"# red/green outer ring and middle ring score double and treble scores\n",
"# respectively. At the centre of the board are two concentric circles called\n",
"# the bull region, or bulls-eye. The outer bull is worth 25 points and the\n",
"# inner bull is a double, worth 50 points. There are many variations of rules\n",
"# but in the most popular game the players will begin with a score 301 or 501\n",
"# and the first player to reduce their running total to zero is a winner.\n",
"# However, it is normal to play a \"doubles out\" system, which means that the\n",
"# player must land a double (including the double bulls-eye at the centre of\n",
"# the board) on their final dart to win; any other dart that would reduce their\n",
"# running total to one or lower means the score for that set of three darts is\n",
"# \"bust\". When a player is able to finish on their current score it is called\n",
"# a \"checkout\" and the highest checkout is 170: T20 T20 D25 (two treble 20s and\n",
"# double bull). There are exactly eleven distinct ways to checkout on a score\n",
"# of 6: D3 D1 D2 S2 D2 D2 D1 S4 D1 S1 S1 D2 S1 T1 D1\n",
"# S1 S3 D1 D1 D1 D1 D1 S2 D1 S2 S2 D1 Note that D1 D2 is considered\n",
"# different to D2 D1 as they finish on different doubles. However, the\n",
"# combination S1 T1 D1 is considered the same as T1 S1 D1. In addition we shall\n",
"# not include misses in considering combinations; for example, D3 is the same\n",
"# as 0 D3 and 0 0 D3. Incredibly there are 42336 distinct ways of checking out\n",
"# in total. How many distinct ways can a player checkout with a score less than\n",
"# 100?\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 41 | 0.002936 | false |
import cv2
from PyQt5.QtWidgets import QErrorMessage
#from socket import socket
#from pickle import loads
from Socket import Socket
class CannotReadFrame(BaseException) :
pass
class StreamReader :
def __init__(self) :
self._server = None
self._capturedDevice = None
pass
def connect(self, addr = '127.0.0.1', port = 4242) :
try:
self._server = Socket()
statusCode = self._server.connect_ex((addr, port))
if statusCode != 0 :
raise statusCode
except :
self.connectLocalCamera()
def connectLocalCamera(self) :
self.close()
qem = QErrorMessage()
qem.showMessage('Не удаётся подключиться к Raspberry Pi: Будет подключена локальная камера')
qem.exec()
self._capturedDevice = cv2.VideoCapture(0)
def releseLocalCamera(self) :
self._capturedDevice.relese()
self._capturedDevice = None
def __del__(self) :
self.close()
def getFrame(self) :
if self._server is not None :
try:
return self._getFrameFromRemoteCamera()
except:
self.connectLocalCamera()
if self._capturedDevice is not None :
try:
return self._getFrameFromLocalCamera()
except:
raise CannotReadFrame
raise CannotReadFrame
def _getFrameFromRemoteCamera(self) :
self._server.sendObject('get_frame')
frame = self._server.recvObject()
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def _getFrameFromLocalCamera(self) :
retVal, frame = self._capturedDevice.read()
if retVal == False :
raise CannotReadFrame
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def readable(self) :
#заглушка
return True
def recvall(self, sock, size) :
binary = sock.recv(size)
diff = size - len(binary)
while diff :
buf = sock.recv(diff)
diff = diff - len(buf)
binary = binary + buf
return binary
def close(self) :
if self._capturedDevice is not None :
self._capturedDevice.release()
self._capturedDevice = None
if self._server is not None :
try:
#self._server.sendObject('close_conection')
self._server.sendObject('exit')
self._server.close()
except:
pass
finally:
self._server = None
| [
"import cv2\n",
"from PyQt5.QtWidgets import QErrorMessage\n",
"#from socket import socket\n",
"#from pickle import loads\n",
"from Socket import Socket\n",
"\n",
"class CannotReadFrame(BaseException) :\n",
" pass\n",
"\n",
"class StreamReader :\n",
" def __init__(self) :\n",
" self._server = None\n",
" self._capturedDevice = None\n",
" pass\n",
"\n",
" def connect(self, addr = '127.0.0.1', port = 4242) :\n",
" try:\n",
" self._server = Socket()\n",
" statusCode = self._server.connect_ex((addr, port))\n",
" if statusCode != 0 :\n",
" raise statusCode\n",
" except :\n",
" self.connectLocalCamera()\n",
"\n",
" def connectLocalCamera(self) :\n",
" self.close()\n",
" qem = QErrorMessage()\n",
" qem.showMessage('Не удаётся подключиться к Raspberry Pi: Будет подключена локальная камера')\n",
" qem.exec()\n",
" self._capturedDevice = cv2.VideoCapture(0) \n",
"\n",
" def releseLocalCamera(self) :\n",
" self._capturedDevice.relese()\n",
" self._capturedDevice = None\n",
"\n",
" def __del__(self) :\n",
" self.close()\n",
"\n",
" def getFrame(self) :\n",
" if self._server is not None :\n",
" try: \n",
" return self._getFrameFromRemoteCamera() \n",
" except: \n",
" self.connectLocalCamera()\n",
"\n",
" if self._capturedDevice is not None :\n",
" try: \n",
" return self._getFrameFromLocalCamera() \n",
" except: \n",
" raise CannotReadFrame\n",
" raise CannotReadFrame\n",
"\n",
"\n",
" def _getFrameFromRemoteCamera(self) :\n",
" self._server.sendObject('get_frame')\n",
" frame = self._server.recvObject()\n",
" return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"\n",
" def _getFrameFromLocalCamera(self) :\n",
" retVal, frame = self._capturedDevice.read()\n",
" if retVal == False :\n",
" raise CannotReadFrame\n",
" return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"\n",
" def readable(self) :\n",
" #заглушка\n",
" return True\n",
"\n",
" def recvall(self, sock, size) :\n",
" binary = sock.recv(size)\n",
" diff = size - len(binary)\n",
" while diff :\n",
" buf = sock.recv(diff)\n",
" diff = diff - len(buf)\n",
" binary = binary + buf\n",
" return binary\n",
"\n",
" def close(self) :\n",
" if self._capturedDevice is not None :\n",
" self._capturedDevice.release()\n",
" self._capturedDevice = None\n",
" if self._server is not None :\n",
" try:\n",
" #self._server.sendObject('close_conection')\n",
" self._server.sendObject('exit')\n",
" self._server.close()\n",
" except:\n",
" pass\n",
" finally:\n",
" self._server = None\n",
"\n",
"\n",
"\n"
] | [
0,
0,
0.037037037037037035,
0.038461538461538464,
0,
0,
0.05128205128205128,
0,
0,
0.09523809523809523,
0.04,
0,
0,
0,
0,
0.08771929824561403,
0,
0,
0,
0.030303030303030304,
0,
0.11764705882352941,
0,
0,
0.02857142857142857,
0,
0,
0.009900990099009901,
0,
0.01818181818181818,
0,
0.029411764705882353,
0,
0,
0,
0.041666666666666664,
0,
0,
0.04,
0.02631578947368421,
0.05555555555555555,
0.017543859649122806,
0.09523809523809523,
0,
0,
0.021739130434782608,
0.05555555555555555,
0.017857142857142856,
0.09523809523809523,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0.024390243902439025,
0,
0.06896551724137931,
0,
0,
0,
0.04,
0.05555555555555555,
0,
0,
0.027777777777777776,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0.045454545454545456,
0.021739130434782608,
0,
0,
0.02631578947368421,
0,
0.016666666666666666,
0,
0,
0.05,
0,
0,
0,
0,
0,
1
] | 93 | 0.027124 | false |
#!/usr/bin/env python
# coding: UTF-8
print('Hello World!')
#如果try中有异常发生时,将执行异常的归属,执行except。
#异常层层比较,看是否是exception1, exception2...,直到找到其归属,执行相应的except中的语句。
#如果except后面没有任何参数,那么表示所有的exception都交给这段程序处理。
re1 = iter(range(5))
try:
#当循环进行到第6次的时候,re.next()不会再返回元素,而是抛出(raise)StopIteration的异常。
for i in range(100):
print( re1.__next__())
except StopIteration:
print ('here is end ',i)
except TypeError:
print("TypeError")
except:
print("UnHandled Error")
print( '=======with exception handle==')
def func():
try:
re = iter(range(5))
for i in range(20):
print(re.__next__())
except ZeroDivisionError:
print("ZeroDivisionError")
#如果无法将异常交给合适的对象,异常将继续向上层抛出,直到被捕捉或者造成主程序报错。
#如果try中没有异常,那么except部分将跳过,执行else中的语句。
#finally是无论是否有异常,最后都要做的一些事情。
try:
print ('----------------------Call func')
func()
except StopIteration:
print ('func StopIteration')
except NameError:
print ('func NameError')
except:
print("func UnHandled Error")
else:
print ("func No exception")
finally:
print( 'func running out')
#raise exception
def func_ex():
print( '----------------------Begin func_ex' )
raise StopIteration()
print( 'Finished func_ex' )
try:
func_ex()
except StopIteration:
print ('func_ex StopIteration')
try:
1/0
except ZeroDivisionError as reason:
print( '========', reason)
#自定义异常
class ShortInputException(Exception):
'''A user -defined exception class'''
def __init__(self, length, atleast):
Exception.__init__(self)
self.length=length
self.atleast=atleast
#import traceback
try:
s=input('Enter soemthing --> ')
if len(s) < 3:
raise ShortInputException(len(s),3)
except ShortInputException as ex:
#print(traceback.format_exc())
print("==ShortInputException : the input was of length %d, was excepting at least %d ." % (ex.length, ex.atleast))
else:
print( 'Done')
# return 是用于函数返回的,不能打断程序执行,所以try中不能用return。
def func_break(x):
try:
print('-----------Begin func_try_return')
return x+3
#return x
finally:
print('-----------finally func_try_return')
return ++x #在Python中++x不会改变x的值x++根本就是错的。。。
#无论如何都会执行finally, try的return没有用。
print (func_break(11))
#在中断点并不会退出,而是继续执行finally后,才退出。
import sys
def func_break1(x):
try:
print('-----------Begin func_try_return')
sys.exit()
finally:
print('-----------finally func_try_return')
return ++x #11
print (func_break1(11))
input('Please enter a code to quit:')
| [
"#!/usr/bin/env python\n",
"# coding: UTF-8\n",
"\n",
"print('Hello World!')\n",
"\n",
"#如果try中有异常发生时,将执行异常的归属,执行except。\n",
"#异常层层比较,看是否是exception1, exception2...,直到找到其归属,执行相应的except中的语句。\n",
"#如果except后面没有任何参数,那么表示所有的exception都交给这段程序处理。\n",
"re1 = iter(range(5))\n",
"try:\n",
" #当循环进行到第6次的时候,re.next()不会再返回元素,而是抛出(raise)StopIteration的异常。\n",
" for i in range(100):\n",
" print( re1.__next__())\n",
"except StopIteration:\n",
" print ('here is end ',i)\n",
"except TypeError:\n",
" print(\"TypeError\")\n",
"except:\n",
" print(\"UnHandled Error\") \n",
"print( '=======with exception handle==')\n",
"\n",
"\n",
"def func():\n",
" try:\n",
" re = iter(range(5))\n",
" for i in range(20):\n",
" print(re.__next__()) \n",
" except ZeroDivisionError:\n",
" print(\"ZeroDivisionError\")\n",
"#如果无法将异常交给合适的对象,异常将继续向上层抛出,直到被捕捉或者造成主程序报错。\n",
"#如果try中没有异常,那么except部分将跳过,执行else中的语句。\n",
"#finally是无论是否有异常,最后都要做的一些事情。\n",
"try:\n",
" print ('----------------------Call func')\n",
" func()\n",
"except StopIteration:\n",
" print ('func StopIteration')\n",
"except NameError:\n",
" print ('func NameError')\n",
"except:\n",
" print(\"func UnHandled Error\") \n",
"else:\n",
" print (\"func No exception\")\n",
"finally:\n",
" print( 'func running out')\n",
"\n",
" \n",
"#raise exception\n",
"def func_ex():\n",
" print( '----------------------Begin func_ex' )\n",
" raise StopIteration()\n",
" print( 'Finished func_ex' )\n",
" \n",
"try:\n",
" func_ex()\n",
"except StopIteration:\n",
" print ('func_ex StopIteration')\n",
"\n",
" \n",
"try:\n",
" 1/0 \n",
"except ZeroDivisionError as reason:\n",
" print( '========', reason)\n",
" \n",
"#自定义异常\n",
"class ShortInputException(Exception):\n",
" '''A user -defined exception class'''\n",
" def __init__(self, length, atleast):\n",
" Exception.__init__(self)\n",
" self.length=length\n",
" self.atleast=atleast\n",
"\n",
"#import traceback\n",
"try:\n",
" s=input('Enter soemthing --> ')\n",
" if len(s) < 3:\n",
" raise ShortInputException(len(s),3)\n",
"except ShortInputException as ex:\n",
" #print(traceback.format_exc())\n",
" print(\"==ShortInputException : the input was of length %d, was excepting at least %d .\" % (ex.length, ex.atleast))\n",
"else:\n",
" print( 'Done')\n",
" \n",
" \n",
"# return 是用于函数返回的,不能打断程序执行,所以try中不能用return。\n",
"def func_break(x):\n",
" try:\n",
" print('-----------Begin func_try_return')\n",
" return x+3\n",
" #return x\n",
" finally:\n",
" print('-----------finally func_try_return')\n",
" return ++x #在Python中++x不会改变x的值x++根本就是错的。。。\n",
" \n",
"#无论如何都会执行finally, try的return没有用。\n",
"print (func_break(11))\n",
"\n",
"#在中断点并不会退出,而是继续执行finally后,才退出。\n",
"import sys\n",
"def func_break1(x):\n",
" try:\n",
" print('-----------Begin func_try_return')\n",
" sys.exit()\n",
" finally:\n",
" print('-----------finally func_try_return')\n",
" return ++x #11\n",
"print (func_break1(11))\n",
"\n",
"input('Please enter a code to quit:')\n",
"\n",
"\n",
"\n",
" \n",
" "
] | [
0,
0,
0,
0,
0,
0.030303030303030304,
0.015873015873015872,
0.022222222222222223,
0,
0,
0.015625,
0,
0.03225806451612903,
0,
0.06896551724137931,
0,
0,
0.125,
0.03125,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.05555555555555555,
0.023255813953488372,
0.02631578947368421,
0.034482758620689655,
0.2,
0.021739130434782608,
0,
0.043478260869565216,
0.02857142857142857,
0.05263157894736842,
0.03225806451612903,
0.125,
0.02564102564102564,
0,
0.029411764705882353,
0,
0.030303030303030304,
0,
0.2,
0.058823529411764705,
0,
0.0392156862745098,
0,
0.0625,
0.2,
0.2,
0,
0.043478260869565216,
0.027777777777777776,
0,
0.2,
0,
0.1111111111111111,
0,
0.03225806451612903,
0.2,
0.14285714285714285,
0.02631578947368421,
0,
0,
0,
0.037037037037037035,
0.034482758620689655,
0,
0.05555555555555555,
0.2,
0.027777777777777776,
0,
0.022727272727272728,
0,
0.02857142857142857,
0.008333333333333333,
0,
0.05263157894736842,
0.2,
0.2,
0,
0,
0,
0,
0.05,
0.05555555555555555,
0,
0,
0.0392156862745098,
0.25,
0.030303030303030304,
0.08695652173913043,
0,
0.03225806451612903,
0.09090909090909091,
0.05,
0,
0,
0,
0,
0,
0.041666666666666664,
0.08333333333333333,
0,
0,
0,
0,
0,
0.5,
3
] | 114 | 0.068821 | false |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OrgFileExporter, a python module exporter for exporting WikidPad files to
# orgfiles.
# Copyright (c) 2012 Josep Mones Teixidor
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################
"""
OrgFileExporter.py
https://github.com/jmones/wikidpad_orgfile_exporter
DESCRIPTION
WikidPad exporter to emacs org-mode files (http://orgmode.org).
FEATURES
This exporter lacks a lot of features. It's just a quick hack to export some data
from WikidPad. Feel free to improved. Current supported features:
* Exporting data to a unique file, each word in a node.
* It uses WikidPad parser classes to get WikidPad data.
* It uses PyOrgMode library to generate org files.
* It set ups links from wiki words in pages to actual nodes (inserting CUSTOM_ID properties).
* It processes bold and italics.
* It processes tables (only simple ones).
* It processes horizontal rules.
* It processes unordered and ordered lists.
However this features known to be missing:
* Does not support footnotes.
* Does not support insertion.
* Does not support roman lists.
* Does not support alpha lists.
* Does not support wikidpad anchors in text.
* Only strategy available to layout nodes is "one word, one node".
* Doesn't have a clever way to solve presence of headings in words.
REQUIREMENTS
* WikidPad version >= 2.2.
* PyOrgMode (included).
INSTALLATION
1. If user_extensions/ folder in WikidPad installation doesn't exist, create it as a sibling of extensions/
2. Copy OrgFileExporter.py to user_extensions/
3. Copy PyOrgMode.py to user_extensions/
USAGE
1. Select Extra/Export
2. Select "Org mode file" in "Export to:" dropdown.
3. Select destination file (it will create a single file).
4. Adjust all other settings as desired.
5. Press OK.
AUTHOR
Josep Mones Teixidor < jmones at gmail dot com >
"""
from pwiki.StringOps import *
from pwiki.Exporters import AbstractExporter
from pwiki.WikiPyparsing import SyntaxNode
import PyOrgMode
import string
import re
from copy import copy
WIKIDPAD_PLUGIN = (("Exporters", 1),)
LOG = False
def describeExportersV01(mainControl):
return (OrgFileExporter,)
class OrgFileExporter(AbstractExporter):
"""
Exports org mode files from WikidPad.
"""
def __init__(self, mainControl):
AbstractExporter.__init__(self, mainControl)
self.wordList = None
self.exportDest = None
self.currentContent = []
self.currentLine = ""
self.currentIndent = 2
self.currentWord = ""
self.niceTitles = {}
self.listItems = []
@staticmethod
def getExportTypes(mainControl, continuousExport=False):
"""
Return sequence of tuples with the description of export types provided
by this object. A tuple has the form (<exp. type>,
<human readable description>)
All exporters must provide this as a static method (which can be called
without constructing an object first.
mainControl -- PersonalWikiFrame object
continuousExport -- If True, only types with support for continuous export
are listed.
"""
if continuousExport:
# Continuous export not supported
return ()
return (
(u"org_mode", _(u'Org mode file')),
)
def getAddOptPanelsForTypes(self, guiparent, exportTypes):
"""
Construct all necessary GUI panels for additional options
for the types contained in exportTypes.
Returns sequence of tuples (<exp. type>, <panel for add. options or None>)
The panels should use guiparent as parent.
If the same panel is used for multiple export types the function can
and should include all export types for this panel even if some of
them weren't requested. Panel objects must not be shared by different
exporter classes.
"""
if not u"org_mode" in exportTypes:
return ()
return (
(u"org_mode", None),
)
def getExportDestinationWildcards(self, exportType):
"""
If an export type is intended to go to a file, this function
returns a (possibly empty) sequence of tuples
(wildcard description, wildcard filepattern).
If an export type goes to a directory, None is returned
"""
if exportType == u"org_mode":
return ((_(u"Org mode file (*.org)"), "*.org"),)
return None
def getAddOptVersion(self):
"""
Returns the version of the additional options information returned
by getAddOpt(). If the return value is -1, the version info can't
be stored between application sessions.
Otherwise, the addopt information can be stored between sessions
and can later handled back to the export method of the object
without previously showing the export dialog.
"""
return -1
def getAddOpt(self, addoptpanel):
"""
Reads additional options from panel addoptpanel.
If getAddOptVersion() > -1, the return value must be a sequence
of simple string and/or numeric objects. Otherwise, any object
can be returned (normally the addoptpanel itself)
"""
return (1,)
def setAddOpt(self, addOpt, addoptpanel):
"""
Shows content of addOpt in the addoptpanel (must not be None).
This function is only called if getAddOptVersion() != -1.
"""
pass
def flushLine(self, force=False):
if force or len(self.currentLine) > 0:
line = (" "*self.currentIndent) + self.currentLine + "\n"
self.currentContent.append(line.encode("utf-8"))
self.currentLine = ""
def shouldExport(self, wikiWord, wikiPage=None):
if not wikiPage:
try:
wikiPage = self.wikiDocument.getWikiPage(wikiWord)
except WikiWordNotFoundException:
return False
return strToBool(wikiPage.getAttributes().get("export", ("True",))[-1])
def getLinkForWikiWord(self, word, default = None):
relUnAlias = self.wikiDocument.getWikiPageNameForLinkTerm(word)
if relUnAlias is None:
return default
if not self.shouldExport(word):
return default
return relUnAlias
def processWikiWord(self, astNodeOrWord, fullContent=None):
if isinstance(astNodeOrWord, SyntaxNode):
wikiWord = astNodeOrWord.wikiWord
titleNode = astNodeOrWord.titleNode
else:
wikiWord = astNodeOrWord
titleNode = None
if titleNode == None:
title = self.niceTitles.get(wikiWord, None)
link = self.getLinkForWikiWord(wikiWord)
if link:
if titleNode is not None:
self.currentLine += u"[[#%s][" % link
self.processAst(fullContent, titleNode)
self.currentLine += u"]]"
else:
if title is None:
self.currentLine += u"[[#%s]]" % (link)
else:
self.currentLine += u"[[#%s][%s]]" % (link, title)
else:
if titleNode is not None:
self.processAst(fullContent, titleNode)
else:
if isinstance(astNodeOrWord, SyntaxNode):
self.currentLine += astNodeOrWord.getString()
else:
self.currentLine += astNodeOrWord
def processUrlLink(self, fullContent, astNode):
link = astNode.url
self.currentLine += u"[[%s][" % link
if astNode.titleNode is not None:
self.processAst(fullContent, astNode.titleNode)
else:
self.currentLine += astNode.coreNode.getString()
self.currentLine += "]]"
def processTable(self, content, astNode):
"""
Write out content of a table as HTML code.
astNode -- node of type "table"
"""
self.flushLine()
table = PyOrgMode.OrgTable.Element()
table.content = []
for row in astNode.iterFlatByName("tableRow"):
orgRow = []
for cell in row.iterFlatByName("tableCell"):
orgRow.append(cell.getString().encode("utf-8"))
table.content.append(orgRow)
self.currentContent.append(table)
def processAst(self, content, pageAst):
"""
Actual token to org-mode converter. May be called recursively.
"""
for node in pageAst.iterFlatNamed():
tname = node.name
# self.currentLine += "{" + tname + "}"
if tname is None:
continue
elif tname == "plainText":
if self.removePlainText:
# This it the text for the first title in a wikiword,
# we use it as a nice title
pass
else:
self.currentLine += node.getString()
elif tname == "lineBreak":
self.flushLine(True)
elif tname == "newParagraph":
self.flushLine()
self.flushLine(True)
elif tname == "whitespace":
self.currentLine += " "
elif tname == "indentedText":
self.flushLine()
self.currentIndent += 2
self.processAst(content, node)
elif tname == "orderedList":
self.flushLine()
self.processAst(content, node)
self.flushLine()
elif tname == "unorderedList":
self.flushLine()
self.listItems.append(0)
self.processAst(content, node)
self.listItems.pop()
self.flushLine()
elif tname == "romanList":
self.flushLine()
print "[ERROR: romanList is not implemented]"
self.processAst(content, node)
self.flushLine()
elif tname == "alphaList":
self.flushLine()
print "[ERROR: alphaList is not implemented]"
self.processAst(content, node)
self.flushLine()
elif tname == "bullet":
self.currentLine += "- ";
elif tname == "number":
self.listItems[-1] += 1
self.currentLine += "%d. " % self.listItems[-1];
elif tname == "roman":
print "[ERROR: roman is not implemented]"
elif tname == "alpha":
print "[ERROR: alpha is not implemented]"
elif tname == "italics":
self.currentLine += "/"
self.processAst(content, node)
self.currentLine += "/"
elif tname == "bold":
self.currentLine += "*"
self.processAst(content, node)
self.currentLine += "*"
elif tname == "htmlTag" or tname == "htmlEntity":
self.currentLine += node.getString()
elif tname == "heading":
# we ignore the heading, it doesn't fit very well in the
# exporting model we are using (every wikiword is a node)
self.flushLine()
# we use the first heading as a friendly title for the node
if self.itemsProcessed == 0:
self.removePlainText = True
self.processAst(content, node.contentNode)
self.removePlainText = False
else:
self.processAst(content, node.contentNode)
elif tname == "horizontalLine":
self.flushLine()
self.currentLine += "-----"
self.flushLine()
elif tname == "preBlock":
self.flushLine()
self.currentLine += "#+BEGIN_EXAMPLE"
self.flushLine()
for line in string.split(node.findFlatByName("preText").getString(), "\n"):
self.currentLine += line
self.flushLine()
self.currentLine += "#+END_EXAMPLE"
elif tname == "todoEntry":
# we should create nodes but it's difficult to fit in current "each wiki word is a node scheme"
self.flushLine()
self.currentLine += "TODO: %s%s" % (node.key, node.delimiter)
self.processAst(content, node.valueNode)
self.flushLine()
elif tname == "script":
pass # Hide scripts
elif tname == "noExport":
pass # Hide no export areas
elif tname == "anchorDef":
self.currentLine += u"[ERROR: We can't process anchors]"
elif tname == "wikiWord":
self.processWikiWord(node, content)
elif tname == "table":
self.processTable(content, node)
elif tname == "footnote":
self.flushLine()
self.currentLine += u"[ERROR: We can't process footnotes]"
self.flushLine()
elif tname == "urlLink":
self.processUrlLink(content, node)
elif tname == "stringEnd":
pass
else:
self.flushLine()
self.currentLine += u'[Unknown parser node with name "%s" found]' % tname
self.flushLine()
self.itemsProcessed += 1
# if we have a line to flush do it now
self.flushLine()
def updateNiceTitle(self, content, word, pageAst):
"""
This gets Nice title
"""
item = pageAst.iterFlatNamed().next()
if item.name == 'heading':
item = item.contentNode.iterFlatNamed().next()
if item.name == 'plainText':
self.niceTitles[word] = item.getString()
def export(self, wikiDocument, wordList, exportType, exportDest,
compatFilenames, addopt, progressHandler):
"""
Run export operation.
wikiDocument -- WikiDocument object
wordList -- Sequence of wiki words to export
exportType -- string tag to identify how to export
exportDest -- Path to destination directory or file to export to
compatFilenames -- Should the filenames be encoded to be lowest
level compatible
addopt -- additional options returned by getAddOpt()
"""
self.wikiDocument = wikiDocument
self.wordList = wordList
self.exportDest = exportDest
try:
org = PyOrgMode.OrgDataStructure()
# capture nice titles
for word in self.wordList:
wikiPage = self.wikiDocument.getWikiPage(word)
word = wikiPage.getWikiWord()
content = wikiPage.getLiveText()
basePageAst = wikiPage.getLivePageAst()
# set default setting
self.niceTitles[word] = word
self.updateNiceTitle(content, word, basePageAst)
for word in self.wordList:
wikiPage = self.wikiDocument.getWikiPage(word)
word = wikiPage.getWikiWord()
formatDetails = wikiPage.getFormatDetails()
content = wikiPage.getLiveText()
basePageAst = wikiPage.getLivePageAst()
self.currentContent = []
self.currentWord = word
self.currentLine = ""
self.itemsProcessed = 0
self.removePlainText = False
self.currentIndent = 2
self.listItems = []
self.processAst(content, basePageAst)
node = PyOrgMode.OrgNode.Element()
node.level = 1
node.heading = self.niceTitles[word].encode("utf-8")
drawer = PyOrgMode.OrgDrawer.Element("PROPERTIES")
customId = ":CUSTOM_ID: " + word
drawer.content.append(customId.encode("utf-8"))
node.content.append(drawer)
node.content.extend(self.currentContent)
org.root.append_clean(node)
org.save_to_file(self.exportDest)
except:
traceback.print_exc()
| [
"# -*- encoding: utf-8 -*-\n",
"##############################################################################\n",
"#\n",
"# OrgFileExporter, a python module exporter for exporting WikidPad files to\n",
"# orgfiles.\n",
"# Copyright (c) 2012 Josep Mones Teixidor\n",
"# All rights reserved.\n",
"# \n",
"# \n",
"# Redistribution and use in source and binary forms, with or without modification,\n",
"# are permitted provided that the following conditions are met:\n",
"# \n",
"# * Redistributions of source code must retain the above copyright notice,\n",
"# this list of conditions and the following disclaimer.\n",
"# * Redistributions in binary form must reproduce the above copyright notice,\n",
"# this list of conditions and the following disclaimer in the documentation\n",
"# and/or other materials provided with the distribution.\n",
"# * Neither the name of the <ORGANIZATION> nor the names of its contributors\n",
"# may be used to endorse or promote products derived from this software\n",
"# without specific prior written permission.\n",
"# \n",
"# \n",
"# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n",
"# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n",
"# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n",
"# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n",
"# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n",
"# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n",
"# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n",
"# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n",
"# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n",
"# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n",
"# POSSIBILITY OF SUCH DAMAGE.\n",
"# \n",
"##############################################################################\n",
"\n",
"\"\"\"\n",
"\tOrgFileExporter.py\n",
" https://github.com/jmones/wikidpad_orgfile_exporter\n",
"\n",
" DESCRIPTION\n",
" WikidPad exporter to emacs org-mode files (http://orgmode.org).\n",
" \n",
" FEATURES\n",
" This exporter lacks a lot of features. It's just a quick hack to export some data\n",
" from WikidPad. Feel free to improved. Current supported features:\n",
" * Exporting data to a unique file, each word in a node.\n",
" * It uses WikidPad parser classes to get WikidPad data.\n",
" * It uses PyOrgMode library to generate org files.\n",
" * It set ups links from wiki words in pages to actual nodes (inserting CUSTOM_ID properties).\n",
" * It processes bold and italics.\n",
" * It processes tables (only simple ones).\n",
" * It processes horizontal rules.\n",
" * It processes unordered and ordered lists.\n",
" \n",
" However this features known to be missing:\n",
" * Does not support footnotes.\n",
" * Does not support insertion.\n",
" * Does not support roman lists.\n",
" * Does not support alpha lists.\n",
" * Does not support wikidpad anchors in text.\n",
" * Only strategy available to layout nodes is \"one word, one node\".\n",
" * Doesn't have a clever way to solve presence of headings in words.\n",
" \n",
" \n",
" REQUIREMENTS\n",
" * WikidPad version >= 2.2.\n",
" * PyOrgMode (included).\n",
" \n",
"\n",
" INSTALLATION\n",
" 1. If user_extensions/ folder in WikidPad installation doesn't exist, create it as a sibling of extensions/\n",
" 2. Copy OrgFileExporter.py to user_extensions/\n",
" 3. Copy PyOrgMode.py to user_extensions/\n",
" \n",
" USAGE\n",
" 1. Select Extra/Export\n",
" 2. Select \"Org mode file\" in \"Export to:\" dropdown.\n",
" 3. Select destination file (it will create a single file).\n",
" 4. Adjust all other settings as desired.\n",
" 5. Press OK.\n",
" \n",
" AUTHOR\n",
" Josep Mones Teixidor < jmones at gmail dot com >\n",
"\"\"\"\n",
"\n",
"from pwiki.StringOps import *\n",
"from pwiki.Exporters import AbstractExporter\n",
"from pwiki.WikiPyparsing import SyntaxNode\n",
"import PyOrgMode\n",
"import string\n",
"import re\n",
"from copy import copy\n",
"\n",
"WIKIDPAD_PLUGIN = ((\"Exporters\", 1),)\n",
"LOG = False\n",
"\n",
"\n",
"def describeExportersV01(mainControl):\n",
" return (OrgFileExporter,)\n",
"\n",
"class OrgFileExporter(AbstractExporter):\n",
" \"\"\"\n",
" Exports org mode files from WikidPad.\n",
" \"\"\"\n",
" def __init__(self, mainControl):\n",
" AbstractExporter.__init__(self, mainControl)\n",
" self.wordList = None\n",
" self.exportDest = None\n",
" self.currentContent = []\n",
" self.currentLine = \"\"\n",
" self.currentIndent = 2\n",
" self.currentWord = \"\"\n",
" self.niceTitles = {}\n",
" self.listItems = []\n",
"\n",
" @staticmethod\n",
" def getExportTypes(mainControl, continuousExport=False):\n",
" \"\"\"\n",
" Return sequence of tuples with the description of export types provided\n",
" by this object. A tuple has the form (<exp. type>,\n",
" <human readable description>)\n",
" All exporters must provide this as a static method (which can be called\n",
" without constructing an object first.\n",
"\n",
" mainControl -- PersonalWikiFrame object\n",
" continuousExport -- If True, only types with support for continuous export\n",
" are listed.\n",
" \"\"\"\n",
" if continuousExport:\n",
" # Continuous export not supported\n",
" return ()\n",
" return (\n",
" (u\"org_mode\", _(u'Org mode file')),\n",
" )\n",
"\n",
" def getAddOptPanelsForTypes(self, guiparent, exportTypes):\n",
" \"\"\"\n",
" Construct all necessary GUI panels for additional options\n",
" for the types contained in exportTypes.\n",
" Returns sequence of tuples (<exp. type>, <panel for add. options or None>)\n",
"\n",
" The panels should use guiparent as parent.\n",
" If the same panel is used for multiple export types the function can\n",
" and should include all export types for this panel even if some of\n",
" them weren't requested. Panel objects must not be shared by different\n",
" exporter classes.\n",
" \"\"\"\n",
" if not u\"org_mode\" in exportTypes:\n",
" return ()\n",
"\n",
" return (\n",
" (u\"org_mode\", None),\n",
" )\n",
"\n",
"\n",
"\n",
" def getExportDestinationWildcards(self, exportType):\n",
" \"\"\"\n",
" If an export type is intended to go to a file, this function\n",
" returns a (possibly empty) sequence of tuples\n",
" (wildcard description, wildcard filepattern).\n",
" \n",
" If an export type goes to a directory, None is returned\n",
" \"\"\"\n",
" if exportType == u\"org_mode\":\n",
" return ((_(u\"Org mode file (*.org)\"), \"*.org\"),) \n",
" return None\n",
"\n",
" def getAddOptVersion(self):\n",
" \"\"\"\n",
" Returns the version of the additional options information returned\n",
" by getAddOpt(). If the return value is -1, the version info can't\n",
" be stored between application sessions.\n",
" \n",
" Otherwise, the addopt information can be stored between sessions\n",
" and can later handled back to the export method of the object\n",
" without previously showing the export dialog.\n",
" \"\"\"\n",
" return -1\n",
"\n",
"\n",
" def getAddOpt(self, addoptpanel):\n",
" \"\"\"\n",
" Reads additional options from panel addoptpanel.\n",
" If getAddOptVersion() > -1, the return value must be a sequence\n",
" of simple string and/or numeric objects. Otherwise, any object\n",
" can be returned (normally the addoptpanel itself)\n",
" \"\"\"\n",
" return (1,)\n",
"\n",
"\n",
" def setAddOpt(self, addOpt, addoptpanel):\n",
" \"\"\"\n",
" Shows content of addOpt in the addoptpanel (must not be None).\n",
" This function is only called if getAddOptVersion() != -1.\n",
" \"\"\"\n",
" pass\n",
" \n",
" def flushLine(self, force=False):\n",
" if force or len(self.currentLine) > 0:\n",
" line = (\" \"*self.currentIndent) + self.currentLine + \"\\n\"\n",
" self.currentContent.append(line.encode(\"utf-8\"))\n",
" self.currentLine = \"\"\n",
" \n",
"\n",
" def shouldExport(self, wikiWord, wikiPage=None):\n",
" if not wikiPage:\n",
" try:\n",
" wikiPage = self.wikiDocument.getWikiPage(wikiWord)\n",
" except WikiWordNotFoundException:\n",
" return False\n",
"\n",
" return strToBool(wikiPage.getAttributes().get(\"export\", (\"True\",))[-1])\n",
"\n",
" def getLinkForWikiWord(self, word, default = None):\n",
" relUnAlias = self.wikiDocument.getWikiPageNameForLinkTerm(word)\n",
" if relUnAlias is None:\n",
" return default\n",
" if not self.shouldExport(word):\n",
" return default\n",
" \n",
" return relUnAlias\n",
"\n",
" def processWikiWord(self, astNodeOrWord, fullContent=None):\n",
" if isinstance(astNodeOrWord, SyntaxNode):\n",
" wikiWord = astNodeOrWord.wikiWord\n",
" titleNode = astNodeOrWord.titleNode\n",
" else:\n",
" wikiWord = astNodeOrWord\n",
" titleNode = None\n",
" \n",
" if titleNode == None:\n",
" title = self.niceTitles.get(wikiWord, None)\n",
" \n",
"\n",
" link = self.getLinkForWikiWord(wikiWord)\n",
"\n",
" if link:\n",
" if titleNode is not None:\n",
" self.currentLine += u\"[[#%s][\" % link\n",
" self.processAst(fullContent, titleNode)\n",
" self.currentLine += u\"]]\"\n",
" else:\n",
" if title is None: \n",
" self.currentLine += u\"[[#%s]]\" % (link)\n",
" else:\n",
" self.currentLine += u\"[[#%s][%s]]\" % (link, title)\n",
" else:\n",
" if titleNode is not None:\n",
" self.processAst(fullContent, titleNode)\n",
" else:\n",
" if isinstance(astNodeOrWord, SyntaxNode):\n",
" self.currentLine += astNodeOrWord.getString()\n",
" else:\n",
" self.currentLine += astNodeOrWord\n",
"\n",
" def processUrlLink(self, fullContent, astNode):\n",
" link = astNode.url\n",
" self.currentLine += u\"[[%s][\" % link\n",
" if astNode.titleNode is not None:\n",
" self.processAst(fullContent, astNode.titleNode)\n",
" else:\n",
" self.currentLine += astNode.coreNode.getString()\n",
" self.currentLine += \"]]\"\n",
"\n",
"\n",
" def processTable(self, content, astNode):\n",
" \"\"\"\n",
" Write out content of a table as HTML code.\n",
" \n",
" astNode -- node of type \"table\"\n",
" \"\"\"\n",
" self.flushLine()\n",
" table = PyOrgMode.OrgTable.Element()\n",
" table.content = []\n",
" \n",
" for row in astNode.iterFlatByName(\"tableRow\"):\n",
" orgRow = []\n",
" for cell in row.iterFlatByName(\"tableCell\"):\n",
" orgRow.append(cell.getString().encode(\"utf-8\"))\n",
" table.content.append(orgRow)\n",
" self.currentContent.append(table)\n",
" \n",
"\n",
" def processAst(self, content, pageAst):\n",
" \"\"\"\n",
" Actual token to org-mode converter. May be called recursively.\n",
" \"\"\"\n",
" for node in pageAst.iterFlatNamed():\n",
" tname = node.name\n",
"\n",
" # self.currentLine += \"{\" + tname + \"}\"\n",
" \n",
" if tname is None:\n",
" continue \n",
" elif tname == \"plainText\":\n",
" if self.removePlainText:\n",
" # This it the text for the first title in a wikiword,\n",
" # we use it as a nice title\n",
" pass\n",
" else:\n",
" self.currentLine += node.getString()\n",
" elif tname == \"lineBreak\":\n",
" self.flushLine(True)\n",
" elif tname == \"newParagraph\":\n",
" self.flushLine()\n",
" self.flushLine(True)\n",
" elif tname == \"whitespace\":\n",
" self.currentLine += \" \"\n",
" elif tname == \"indentedText\":\n",
" self.flushLine()\n",
" self.currentIndent += 2\n",
" self.processAst(content, node)\n",
" elif tname == \"orderedList\":\n",
" self.flushLine()\n",
" self.processAst(content, node)\n",
" self.flushLine()\n",
" elif tname == \"unorderedList\":\n",
" self.flushLine()\n",
" self.listItems.append(0)\n",
" self.processAst(content, node)\n",
" self.listItems.pop()\n",
" self.flushLine()\n",
" elif tname == \"romanList\":\n",
" self.flushLine()\n",
" print \"[ERROR: romanList is not implemented]\"\n",
" self.processAst(content, node)\n",
" self.flushLine()\n",
" elif tname == \"alphaList\":\n",
" self.flushLine()\n",
" print \"[ERROR: alphaList is not implemented]\"\n",
" self.processAst(content, node)\n",
" self.flushLine()\n",
" elif tname == \"bullet\":\n",
" self.currentLine += \"- \";\n",
" elif tname == \"number\":\n",
" self.listItems[-1] += 1\n",
" self.currentLine += \"%d. \" % self.listItems[-1];\n",
" elif tname == \"roman\":\n",
" print \"[ERROR: roman is not implemented]\"\n",
" elif tname == \"alpha\":\n",
" print \"[ERROR: alpha is not implemented]\"\n",
" elif tname == \"italics\":\n",
" self.currentLine += \"/\"\n",
" self.processAst(content, node)\n",
" self.currentLine += \"/\"\n",
" elif tname == \"bold\":\n",
" self.currentLine += \"*\"\n",
" self.processAst(content, node)\n",
" self.currentLine += \"*\"\n",
" \n",
" elif tname == \"htmlTag\" or tname == \"htmlEntity\":\n",
" self.currentLine += node.getString()\n",
"\n",
" elif tname == \"heading\":\n",
" # we ignore the heading, it doesn't fit very well in the\n",
" # exporting model we are using (every wikiword is a node)\n",
" self.flushLine()\n",
" \n",
" # we use the first heading as a friendly title for the node\n",
" if self.itemsProcessed == 0:\n",
" self.removePlainText = True\n",
" self.processAst(content, node.contentNode)\n",
" self.removePlainText = False\n",
" else:\n",
" self.processAst(content, node.contentNode)\n",
"\n",
" elif tname == \"horizontalLine\":\n",
" self.flushLine()\n",
" self.currentLine += \"-----\"\n",
" self.flushLine()\n",
"\n",
" elif tname == \"preBlock\":\n",
" self.flushLine()\n",
" self.currentLine += \"#+BEGIN_EXAMPLE\"\n",
" self.flushLine()\n",
" for line in string.split(node.findFlatByName(\"preText\").getString(), \"\\n\"):\n",
" self.currentLine += line\n",
" self.flushLine()\n",
" self.currentLine += \"#+END_EXAMPLE\"\n",
"\n",
" elif tname == \"todoEntry\":\n",
" # we should create nodes but it's difficult to fit in current \"each wiki word is a node scheme\"\n",
" self.flushLine()\n",
" self.currentLine += \"TODO: %s%s\" % (node.key, node.delimiter)\n",
" self.processAst(content, node.valueNode)\n",
" self.flushLine()\n",
" elif tname == \"script\":\n",
" pass # Hide scripts\n",
" elif tname == \"noExport\":\n",
" pass # Hide no export areas\n",
" elif tname == \"anchorDef\":\n",
" self.currentLine += u\"[ERROR: We can't process anchors]\"\n",
" elif tname == \"wikiWord\":\n",
" self.processWikiWord(node, content)\n",
" elif tname == \"table\":\n",
" self.processTable(content, node)\n",
" elif tname == \"footnote\":\n",
" self.flushLine()\n",
" self.currentLine += u\"[ERROR: We can't process footnotes]\"\n",
" self.flushLine()\n",
" elif tname == \"urlLink\":\n",
" self.processUrlLink(content, node)\n",
" elif tname == \"stringEnd\":\n",
" pass\n",
" else:\n",
" self.flushLine()\n",
" self.currentLine += u'[Unknown parser node with name \"%s\" found]' % tname\n",
" self.flushLine()\n",
" \n",
" self.itemsProcessed += 1\n",
" \n",
"\n",
" # if we have a line to flush do it now\n",
" self.flushLine()\n",
" \n",
" def updateNiceTitle(self, content, word, pageAst):\n",
" \"\"\"\n",
" This gets Nice title\n",
" \"\"\"\n",
" item = pageAst.iterFlatNamed().next()\n",
" if item.name == 'heading': \n",
" item = item.contentNode.iterFlatNamed().next()\n",
" if item.name == 'plainText':\n",
" self.niceTitles[word] = item.getString()\n",
" \n",
" \n",
" def export(self, wikiDocument, wordList, exportType, exportDest,\n",
" compatFilenames, addopt, progressHandler):\n",
" \"\"\"\n",
" Run export operation.\n",
" \n",
" wikiDocument -- WikiDocument object\n",
" wordList -- Sequence of wiki words to export\n",
" exportType -- string tag to identify how to export\n",
" exportDest -- Path to destination directory or file to export to\n",
" compatFilenames -- Should the filenames be encoded to be lowest\n",
" level compatible\n",
" addopt -- additional options returned by getAddOpt()\n",
" \"\"\"\n",
" self.wikiDocument = wikiDocument\n",
" self.wordList = wordList\n",
" self.exportDest = exportDest\n",
" \n",
" try:\n",
" org = PyOrgMode.OrgDataStructure()\n",
"\n",
" # capture nice titles\n",
" for word in self.wordList:\n",
" wikiPage = self.wikiDocument.getWikiPage(word)\n",
"\n",
" word = wikiPage.getWikiWord()\n",
" content = wikiPage.getLiveText()\n",
" basePageAst = wikiPage.getLivePageAst()\n",
" # set default setting\n",
" self.niceTitles[word] = word\n",
" self.updateNiceTitle(content, word, basePageAst)\n",
"\n",
" for word in self.wordList:\n",
" wikiPage = self.wikiDocument.getWikiPage(word)\n",
"\n",
" word = wikiPage.getWikiWord()\n",
" formatDetails = wikiPage.getFormatDetails()\n",
" content = wikiPage.getLiveText()\n",
" basePageAst = wikiPage.getLivePageAst()\n",
" \n",
" self.currentContent = []\n",
" self.currentWord = word\n",
" self.currentLine = \"\"\n",
" self.itemsProcessed = 0\n",
" self.removePlainText = False\n",
" self.currentIndent = 2\n",
" self.listItems = []\n",
" self.processAst(content, basePageAst)\n",
" \n",
"\n",
" node = PyOrgMode.OrgNode.Element()\n",
" node.level = 1\n",
" node.heading = self.niceTitles[word].encode(\"utf-8\")\n",
" \n",
" drawer = PyOrgMode.OrgDrawer.Element(\"PROPERTIES\")\n",
" customId = \":CUSTOM_ID: \" + word\n",
" drawer.content.append(customId.encode(\"utf-8\"))\n",
" node.content.append(drawer)\n",
" node.content.extend(self.currentContent)\n",
"\n",
" org.root.append_clean(node)\n",
" org.save_to_file(self.exportDest) \n",
" except:\n",
" traceback.print_exc()\n",
" \n",
"\n",
"\n",
"\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0.16666666666666666,
0.011627906976744186,
0,
0.16666666666666666,
0.012195121951219513,
0,
0.011764705882352941,
0.011764705882352941,
0,
0.011904761904761904,
0.012345679012345678,
0,
0.16666666666666666,
0.16666666666666666,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0.05,
0.017857142857142856,
0,
0,
0,
0.2,
0,
0.011627906976744186,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.2,
0,
0,
0,
0.2,
0,
0,
0.008928571428571428,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0.03333333333333333,
0,
0.07692307692307693,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.5,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.008928571428571428,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0.058823529411764705,
0,
0.07692307692307693,
0,
0.02127659574468085,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0.058823529411764705,
0.1111111111111111,
0.014492753623188406,
0.01818181818181818,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0.0196078431372549,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0.0625,
0,
0.07692307692307693,
0,
0,
0,
1
] | 496 | 0.013093 | false |
# Copyright (C) 2009 Tim Gaggstatter <Tim.Gaggstatter AT gmx DOT net>
# Copyright (C) 2010 Eduardo Robles Elvira <edulix AT gmail DOT com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from djangoratings.fields import RatingField
from django.db.models import signals
from user.models import Profile
from tbmessages.utils import new_transfer_email
class Area(models.Model):
name = models.CharField(_("Area"), max_length=40)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
class Category(models.Model):
name = models.CharField(_(u"Category"), max_length=45)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name = _(u"Category")
verbose_name_plural = _(u"Categories")
OFFER_CHOICES = (
(True, _('offer')),
(False, _('demand'))
)
class Service(models.Model):
creator = models.ForeignKey(Profile, related_name="services",
verbose_name=_("Creator"))
is_offer = models.BooleanField(_("Service type"), choices=OFFER_CHOICES, default=True)
pub_date = models.DateTimeField(_(u"Publish date"),
auto_now=True, auto_now_add=True)
is_active = models.BooleanField(default=True)
description = models.TextField(_(u"Description"), max_length=400)
category = models.ForeignKey(Category, verbose_name=_('Category'))
area = models.ForeignKey(Area, null=True, blank=True,
verbose_name=_("Area"))
def __unicode__(self):
if self.is_offer:
msj = _("offered")
else:
msj = _("demanded")
msj = unicode(msj)
return "%d: '%s' %s from %s" % (self.id, self.short_name(), msj, self.creator)
def short_name(self):
if len(self.description) < 53:
return self.description
return "%s..." % self.description[:50]
def transfers_count(self):
return self.transfers.count()
def sorted_transfers(self):
return self.transfers.order_by('-request_date')
def messages_count(self):
from tbmessages.models import Message
return Message.objects.filter(service=self).count()
def messages(self):
from tbmessages.models import Message
return Message.objects.filter(service=self)
def credits_transfered(self):
ret = self.transfers.filter(status='d').aggregate(models.Sum('credits'))
return ret['credits__sum'] and ret['credits__sum'] or 0
def credit_hours_transfered(self):
credits = self.credits_transfered()
if credits % 60 == 0:
return credits/60
return credits/60.0
def ongoing_transfers(self, user):
if self.is_offer:
return Transfer.objects.filter(credits_debtor=user, service=self,
status__in=["q", "a"])
else:
return Transfer.objects.filter(credits_payee=user, service=self,
status__in=["q", "a"])
class Meta:
ordering = ('-pub_date', )
TRANSFER_STATUS = (
('q', _('requested')), # q for reQuest
('a', _('accepted')), # a for Accepted
('r', _('cancelled')), # r for Rejected TODO: (but it actually should be c for cancelled)
('d', _('done')), # d for Done
)
class Transfer(models.Model):
rating = RatingField(range=5, allow_anonymous=False, can_change_vote=True)
def int_rating(self):
return int(self.rating.score / self.rating.votes)
# will only be set and used when transfer is not associated with a service
direct_transfer_creator = models.ForeignKey(Profile,
related_name='direct_transfers_created', null=True, blank=True,
verbose_name=_("Direct transfer creator"))
# Person receiving the credits (and giving the service)
credits_payee = models.ForeignKey(Profile, related_name='transfers_received',
verbose_name=_("Credits payee"))
# Person giving the credits (and receiving the service)
credits_debtor = models.ForeignKey(Profile, related_name='transfers_given',
verbose_name=_("Credits debtor"))
service = models.ForeignKey(Service, related_name='transfers', null=True,
blank=True, verbose_name=_("Service"))
# Small description for the received service
description = models.TextField(_(u"Description"), max_length=300)
request_date = models.DateTimeField(_("Transfer request date"),
auto_now=True, auto_now_add=True)
confirmation_date = models.DateTimeField(_(u"Transfer confirmation date"),
null=True)
status = models.CharField(_(u"Status"), max_length=1, choices=TRANSFER_STATUS)
is_public = models.BooleanField(_(u"Is public"), default=False)
# credits in minutes
credits = models.PositiveIntegerField(_(u"Credits"))
def credit_hours(self):
return self.credits/60.0
class meta:
ordering = ['-request_date']
def creator(self):
'''
Transfer creator
'''
if self.service:
return self.service.creator == self.credits_debtor and\
self.credits_payee or self.credits_debtor
else:
return self.direct_transfer_creator
def recipient(self):
'''
the user which is not the creator
'''
if self.service:
return self.service.creator != self.credits_debtor and\
self.credits_payee or self.credits_debtor
else:
return self.direct_transfer_creator == self.credits_debtor and\
self.credits_payee or self.credits_debtor
def is_direct(self):
return not self.service
def status_readable(self):
return TRANSFER_STATUS[self.status]
def __unicode__(self):
return self.description[0:53] + '...'
signals.post_save.connect(new_transfer_email, sender=Transfer)
| [
"# Copyright (C) 2009 Tim Gaggstatter <Tim.Gaggstatter AT gmx DOT net>\n",
"# Copyright (C) 2010 Eduardo Robles Elvira <edulix AT gmail DOT com>\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU Affero General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU Affero General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU Affero General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\n",
"from django.db import models\n",
"from django.contrib.auth.models import User\n",
"from django.utils.translation import ugettext_lazy as _\n",
"from djangoratings.fields import RatingField\n",
"from django.db.models import signals\n",
"\n",
"from user.models import Profile\n",
"from tbmessages.utils import new_transfer_email\n",
"\n",
"class Area(models.Model):\n",
"\n",
" name = models.CharField(_(\"Area\"), max_length=40)\n",
"\n",
" def __unicode__(self):\n",
" return self.name\n",
"\n",
" class Meta:\n",
" ordering = [\"name\"]\n",
"\n",
"\n",
"class Category(models.Model):\n",
"\n",
" name = models.CharField(_(u\"Category\"), max_length=45)\n",
"\n",
" def __unicode__(self):\n",
" return self.name\n",
"\n",
" class Meta:\n",
" ordering = [\"name\"]\n",
" verbose_name = _(u\"Category\")\n",
" verbose_name_plural = _(u\"Categories\")\n",
"\n",
"\n",
"OFFER_CHOICES = (\n",
" (True, _('offer')),\n",
" (False, _('demand'))\n",
")\n",
"\n",
"class Service(models.Model):\n",
" creator = models.ForeignKey(Profile, related_name=\"services\",\n",
" verbose_name=_(\"Creator\"))\n",
" is_offer = models.BooleanField(_(\"Service type\"), choices=OFFER_CHOICES, default=True)\n",
" pub_date = models.DateTimeField(_(u\"Publish date\"),\n",
" auto_now=True, auto_now_add=True)\n",
" is_active = models.BooleanField(default=True)\n",
" description = models.TextField(_(u\"Description\"), max_length=400)\n",
" category = models.ForeignKey(Category, verbose_name=_('Category'))\n",
" area = models.ForeignKey(Area, null=True, blank=True,\n",
" verbose_name=_(\"Area\"))\n",
"\n",
" def __unicode__(self):\n",
" if self.is_offer:\n",
" msj = _(\"offered\")\n",
" else:\n",
" msj = _(\"demanded\")\n",
" msj = unicode(msj)\n",
" return \"%d: '%s' %s from %s\" % (self.id, self.short_name(), msj, self.creator)\n",
"\n",
" def short_name(self):\n",
" if len(self.description) < 53:\n",
" return self.description\n",
"\n",
" return \"%s...\" % self.description[:50]\n",
"\n",
" def transfers_count(self):\n",
" return self.transfers.count()\n",
"\n",
" def sorted_transfers(self):\n",
" return self.transfers.order_by('-request_date')\n",
"\n",
" def messages_count(self):\n",
" from tbmessages.models import Message\n",
" return Message.objects.filter(service=self).count()\n",
"\n",
" def messages(self):\n",
" from tbmessages.models import Message\n",
" return Message.objects.filter(service=self)\n",
"\n",
" def credits_transfered(self):\n",
" ret = self.transfers.filter(status='d').aggregate(models.Sum('credits'))\n",
" return ret['credits__sum'] and ret['credits__sum'] or 0\n",
"\n",
" def credit_hours_transfered(self):\n",
" credits = self.credits_transfered()\n",
" if credits % 60 == 0:\n",
" return credits/60\n",
"\n",
" return credits/60.0\n",
"\n",
" def ongoing_transfers(self, user):\n",
" if self.is_offer:\n",
" return Transfer.objects.filter(credits_debtor=user, service=self,\n",
" status__in=[\"q\", \"a\"])\n",
" else:\n",
" return Transfer.objects.filter(credits_payee=user, service=self,\n",
" status__in=[\"q\", \"a\"])\n",
"\n",
" class Meta:\n",
" ordering = ('-pub_date', )\n",
"\n",
"\n",
"TRANSFER_STATUS = (\n",
" ('q', _('requested')), # q for reQuest\n",
" ('a', _('accepted')), # a for Accepted\n",
" ('r', _('cancelled')), # r for Rejected TODO: (but it actually should be c for cancelled)\n",
" ('d', _('done')), # d for Done\n",
")\n",
"\n",
"class Transfer(models.Model):\n",
" rating = RatingField(range=5, allow_anonymous=False, can_change_vote=True)\n",
"\n",
" def int_rating(self):\n",
" return int(self.rating.score / self.rating.votes)\n",
"\n",
" # will only be set and used when transfer is not associated with a service\n",
" direct_transfer_creator = models.ForeignKey(Profile,\n",
" related_name='direct_transfers_created', null=True, blank=True,\n",
" verbose_name=_(\"Direct transfer creator\"))\n",
"\n",
" # Person receiving the credits (and giving the service)\n",
" credits_payee = models.ForeignKey(Profile, related_name='transfers_received',\n",
" verbose_name=_(\"Credits payee\"))\n",
"\n",
" # Person giving the credits (and receiving the service)\n",
" credits_debtor = models.ForeignKey(Profile, related_name='transfers_given',\n",
" verbose_name=_(\"Credits debtor\"))\n",
"\n",
" service = models.ForeignKey(Service, related_name='transfers', null=True,\n",
" blank=True, verbose_name=_(\"Service\"))\n",
"\n",
" # Small description for the received service\n",
" description = models.TextField(_(u\"Description\"), max_length=300)\n",
"\n",
" request_date = models.DateTimeField(_(\"Transfer request date\"),\n",
" auto_now=True, auto_now_add=True)\n",
"\n",
" confirmation_date = models.DateTimeField(_(u\"Transfer confirmation date\"),\n",
" null=True)\n",
"\n",
" status = models.CharField(_(u\"Status\"), max_length=1, choices=TRANSFER_STATUS)\n",
"\n",
" is_public = models.BooleanField(_(u\"Is public\"), default=False)\n",
"\n",
" # credits in minutes\n",
" credits = models.PositiveIntegerField(_(u\"Credits\"))\n",
"\n",
" def credit_hours(self):\n",
" return self.credits/60.0\n",
"\n",
" class meta:\n",
" ordering = ['-request_date']\n",
"\n",
" def creator(self):\n",
" '''\n",
" Transfer creator\n",
" '''\n",
" if self.service:\n",
" return self.service.creator == self.credits_debtor and\\\n",
" self.credits_payee or self.credits_debtor\n",
" else:\n",
" return self.direct_transfer_creator\n",
"\n",
" def recipient(self):\n",
" '''\n",
" the user which is not the creator\n",
" '''\n",
" if self.service:\n",
" return self.service.creator != self.credits_debtor and\\\n",
" self.credits_payee or self.credits_debtor\n",
" else:\n",
" return self.direct_transfer_creator == self.credits_debtor and\\\n",
" self.credits_payee or self.credits_debtor\n",
"\n",
" def is_direct(self):\n",
" return not self.service\n",
"\n",
" def status_readable(self):\n",
" return TRANSFER_STATUS[self.status]\n",
"\n",
" def __unicode__(self):\n",
" return self.description[0:53] + '...'\n",
"\n",
"signals.post_save.connect(new_transfer_email, sender=Transfer)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.02857142857142857,
0.01098901098901099,
0,
0.023809523809523808,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.023255813953488372,
0.02127659574468085,
0.02857142857142857,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888,
0.0196078431372549,
0,
0,
0.012195121951219513,
0.024390243902439025,
0,
0,
0,
0.023809523809523808,
0,
0,
0.02127659574468085,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0.05263157894736842,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872
] | 200 | 0.00296 | false |
RowNum = int(input("Row: "))
ColumnNum = int(input("Column: "))
MatrixRow = []
MatrixColumn = []
for i in range(0, RowNum):
for j in range(0, ColumnNum):
MatrixColumn.append(j)
MatrixRow.append(MatrixColumn)
MatrixColumn = []
LineIndex = 0
j = 1
Max = ColumnNum * RowNum
while 1:
for i in range(LineIndex, ColumnNum-1-LineIndex):
MatrixRow[LineIndex][i] = j
j = j + 1
if j > Max:
break
for i in range(LineIndex, RowNum-1-LineIndex):
MatrixRow[i][ColumnNum-1-LineIndex] = j
j = j + 1
if j > Max:
break
for i in range(LineIndex+1, ColumnNum-LineIndex):
MatrixRow[RowNum-1-LineIndex][ColumnNum-i] = j
j = j + 1
if j > Max:
break
for i in range(LineIndex+1, RowNum-LineIndex):
MatrixRow[RowNum-i][LineIndex] = j
j = j + 1
if j > Max:
break
LineIndex = LineIndex + 1
if j > Max:
break
for i in range(0, RowNum):
print(MatrixRow[i])
| [
"RowNum = int(input(\"Row: \"))\n",
"ColumnNum = int(input(\"Column: \"))\n",
"MatrixRow = []\n",
"MatrixColumn = []\n",
"for i in range(0, RowNum):\n",
" for j in range(0, ColumnNum):\n",
" MatrixColumn.append(j)\n",
" MatrixRow.append(MatrixColumn)\n",
" MatrixColumn = []\n",
"LineIndex = 0\n",
"j = 1\n",
"Max = ColumnNum * RowNum\n",
"while 1:\n",
" for i in range(LineIndex, ColumnNum-1-LineIndex):\n",
" MatrixRow[LineIndex][i] = j\n",
" j = j + 1\n",
" if j > Max:\n",
" break\n",
" for i in range(LineIndex, RowNum-1-LineIndex):\n",
" MatrixRow[i][ColumnNum-1-LineIndex] = j\n",
" j = j + 1\n",
" if j > Max:\n",
" break\n",
" for i in range(LineIndex+1, ColumnNum-LineIndex):\n",
" MatrixRow[RowNum-1-LineIndex][ColumnNum-i] = j\n",
" j = j + 1\n",
" if j > Max:\n",
" break\n",
" for i in range(LineIndex+1, RowNum-LineIndex):\n",
" MatrixRow[RowNum-i][LineIndex] = j\n",
" j = j + 1\n",
" if j > Max:\n",
" break\n",
" LineIndex = LineIndex + 1\n",
" if j > Max:\n",
" break\n",
"for i in range(0, RowNum):\n",
" print(MatrixRow[i])\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 38 | 0.001012 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem428.py
#
# Necklace of circles
# ===================
# Published on Sunday, 19th May 2013, 01:00 am
#
# Let a, b and c be positive numbers. Let W, X, Y, Z be four collinear points
# where |WX| = a, |XY| = b, |YZ| = c and |WZ| = a + b + c. Let Cin be the
# circle having the diameter XY. Let Cout be the circle having the diameter
# WZ. The triplet (a, b, c) is called a necklace triplet if you can place k
# 3 distinct circles C1, C2, ..., Ck such that: Ci has no common interior
# points with any Cj for 1 i, j k and i j, Ci is tangent to both Cin and
# Cout for 1 i k, Ci is tangent to Ci+1 for 1 i < k, and Ck is tangent to
# C1. For example, (5, 5, 5) and (4, 3, 21) are necklace triplets, while it
# can be shown that (2, 2, 5) is not. Let T(n) be the number of necklace
# triplets (a, b, c) such that a, b and c are positive integers, and b n. For
# example, T(1) = 9, T(20) = 732 and T(3000) = 438106. Find T(1 000 000
# 000).
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem428.py\n",
"#\n",
"# Necklace of circles\n",
"# ===================\n",
"# Published on Sunday, 19th May 2013, 01:00 am\n",
"#\n",
"# Let a, b and c be positive numbers. Let W, X, Y, Z be four collinear points\n",
"# where |WX| = a, |XY| = b, |YZ| = c and |WZ| = a + b + c. Let Cin be the\n",
"# circle having the diameter XY. Let Cout be the circle having the diameter\n",
"# WZ. The triplet (a, b, c) is called a necklace triplet if you can place k\n",
"# 3 distinct circles C1, C2, ..., Ck such that: Ci has no common interior\n",
"# points with any Cj for 1 i, j k and i j, Ci is tangent to both Cin and\n",
"# Cout for 1 i k, Ci is tangent to Ci+1 for 1 i < k, and Ck is tangent to\n",
"# C1. For example, (5, 5, 5) and (4, 3, 21) are necklace triplets, while it\n",
"# can be shown that (2, 2, 5) is not. Let T(n) be the number of necklace\n",
"# triplets (a, b, c) such that a, b and c are positive integers, and b n. For\n",
"# example, T(1) = 9, T(20) = 732 and T(3000) = 438106. Find T(1 000 000\n",
"# 000).\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 27 | 0.004458 | false |
# This file is part of Plex:CS.
#
# Plex:CS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plex:CS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plex:CS. If not, see <http://www.gnu.org/licenses/>.
from operator import itemgetter
from xml.dom import minidom
import unicodedata
import plexcs
import datetime
import fnmatch
import shutil
import time
import sys
import re
import os
import json
import xmltodict
import math
def multikeysort(items, columns):
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
return 'Checked'
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {
0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
if unicrap:
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms / 1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def convert_milliseconds_to_minutes(ms):
if str(ms).isdigit():
seconds = float(ms) / 1000
minutes = round(seconds / 60, 0)
return math.trunc(minutes)
return 0
def convert_seconds(s):
gmtime = time.gmtime(s)
if s > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def today():
today = datetime.date.today()
yyyymmdd = datetime.date.isoformat(today)
return yyyymmdd
def now():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def human_duration(s):
hd = ''
if str(s).isdigit():
d = int(s / 84600)
h = int((s % 84600) / 3600)
m = int(((s % 84600) % 3600) / 60)
s = int(((s % 84600) % 3600) % 60)
hd_list = []
if d > 0:
hd_list.append(str(d) + ' days')
if h > 0:
hd_list.append(str(h) + ' hrs')
if m > 0:
hd_list.append(str(m) + ' mins')
if s > 0:
hd_list.append(str(s) + ' secs')
hd = ' '.join(hd_list)
return hd
else:
return hd
def get_age(date):
try:
split_date = date.split('-')
except:
return False
try:
days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])
except IndexError:
days_old = False
return days_old
def bytes_to_mb(bytes):
mb = int(bytes) / 1048576
size = '%.1f MB' % mb
return size
def mb_to_bytes(mb_str):
result = re.search('^(\d+(?:\.\d+)?)\s?(?:mb)?', mb_str, flags=re.I)
if result:
return int(float(result.group(1)) * 1048576)
def piratesize(size):
split = size.split(" ")
factor = float(split[0])
unit = split[1].upper()
if unit == 'MiB':
size = factor * 1048576
elif unit == 'MB':
size = factor * 1000000
elif unit == 'GiB':
size = factor * 1073741824
elif unit == 'GB':
size = factor * 1000000000
elif unit == 'KiB':
size = factor * 1024
elif unit == 'KB':
size = factor * 1000
elif unit == "B":
size = factor
else:
size = 0
return size
def replace_all(text, dic, normalize=False):
if not text:
return ''
for i, j in dic.iteritems():
if normalize:
try:
if sys.platform == 'darwin':
j = unicodedata.normalize('NFD', j)
else:
j = unicodedata.normalize('NFC', j)
except TypeError:
j = unicodedata.normalize('NFC', j.decode(plexcs.SYS_ENCODING, 'replace'))
text = text.replace(i, j)
return text
def replace_illegal_chars(string, type="file"):
if type == "file":
string = re.sub('[\?"*:|<>/]', '_', string)
if type == "folder":
string = re.sub('[:\?<>"|]', '_', string)
return string
def cleanName(string):
pass1 = latinToAscii(string).lower()
out_string = re.sub('[\.\-\/\!\@\#\$\%\^\&\*\(\)\+\-\"\'\,\;\:\[\]\{\}\<\>\=\_]', '', pass1).encode('utf-8')
return out_string
def cleanTitle(title):
title = re.sub('[\.\-\/\_]', ' ', title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
title = title.title()
return title
def split_path(f):
"""
Split a path into components, starting with the drive letter (if any). Given
a path, os.path.join(*split_path(f)) should be path equal to f.
"""
components = []
drive, path = os.path.splitdrive(f)
# Strip the folder from the path, iterate until nothing is left
while True:
path, folder = os.path.split(path)
if folder:
components.append(folder)
else:
if path:
components.append(path)
break
# Append the drive (if any)
if drive:
components.append(drive)
# Reverse components
components.reverse()
# Done
return components
def extract_logline(s):
# Default log format
pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE)
match = pattern.match(s)
if match:
timestamp = match.group("timestamp")
level = match.group("level")
thread = match.group("thread")
message = match.group("message")
return (timestamp, level, thread, message)
else:
return None
def split_string(mystring, splitvar=','):
mylist = []
for each_word in mystring.split(splitvar):
mylist.append(each_word.strip())
return mylist
def create_https_certificates(ssl_cert, ssl_key):
"""
Create a pair of self-signed HTTPS certificares and store in them in
'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.
This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
"""
from plexcs import logger
from OpenSSL import crypto
from certgen import createKeyPair, createCertRequest, createCertificate, \
TYPE_RSA, serial
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 2048)
careq = createCertRequest(cakey, CN="Certificate Authority")
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
pkey = createKeyPair(TYPE_RSA, 2048)
req = createCertRequest(pkey, CN="Plex:CS")
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
with open(ssl_key, "w") as fp:
fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
with open(ssl_cert, "w") as fp:
fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except IOError as e:
logger.error("Error creating SSL key and certificate: %s", e)
return False
return True
def cast_to_float(s):
try:
return float(s)
except ValueError:
return -1
def convert_xml_to_json(xml):
o = xmltodict.parse(xml)
return json.dumps(o)
def convert_xml_to_dict(xml):
o = xmltodict.parse(xml)
return o
def get_percent(value1, value2):
if str(value1).isdigit() and str(value2).isdigit():
value1 = cast_to_float(value1)
value2 = cast_to_float(value2)
else:
return 0
if value1 != 0 and value2 != 0:
percent = (value1 / value2) * 100
else:
percent = 0
return math.trunc(percent)
def parse_xml(unparsed=None):
from plexcs import logger
if unparsed:
try:
xml_parse = minidom.parseString(unparsed)
return xml_parse
except Exception as e:
logger.warn("Error parsing XML. %s" % e)
return []
except:
logger.warn("Error parsing XML.")
return []
else:
logger.warn("XML parse request made but no data received.")
return []
"""
Validate xml keys to make sure they exist and return their attribute value, return blank value is none found
"""
def get_xml_attr(xml_key, attribute, return_bool=False, default_return=''):
if xml_key.getAttribute(attribute):
if return_bool:
return True
else:
return xml_key.getAttribute(attribute)
else:
if return_bool:
return False
else:
return default_return
def process_json_kwargs(json_kwargs):
params = {}
if json_kwargs:
params = json.loads(json_kwargs)
return params
def sanitize(string):
if string:
return unicode(string).replace('<','<').replace('>','>')
else:
return ''
| [
"# This file is part of Plex:CS.\n",
"#\n",
"# Plex:CS is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Plex:CS is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with Plex:CS. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"from operator import itemgetter\n",
"from xml.dom import minidom\n",
"\n",
"import unicodedata\n",
"import plexcs\n",
"import datetime\n",
"import fnmatch\n",
"import shutil\n",
"import time\n",
"import sys\n",
"import re\n",
"import os\n",
"import json\n",
"import xmltodict\n",
"import math\n",
"\n",
"\n",
"def multikeysort(items, columns):\n",
" comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]\n",
"\n",
" def comparer(left, right):\n",
" for fn, mult in comparers:\n",
" result = cmp(fn(left), fn(right))\n",
" if result:\n",
" return mult * result\n",
" else:\n",
" return 0\n",
"\n",
" return sorted(items, cmp=comparer)\n",
"\n",
"\n",
"def checked(variable):\n",
" if variable:\n",
" return 'Checked'\n",
" else:\n",
" return ''\n",
"\n",
"\n",
"def radio(variable, pos):\n",
"\n",
" if variable == pos:\n",
" return 'Checked'\n",
" else:\n",
" return ''\n",
"\n",
"\n",
"def latinToAscii(unicrap):\n",
" \"\"\"\n",
" From couch potato\n",
" \"\"\"\n",
" xlate = {\n",
" 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n",
" 0xc6: 'Ae', 0xc7: 'C',\n",
" 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',\n",
" 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n",
" 0xd0: 'Th', 0xd1: 'N',\n",
" 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n",
" 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n",
" 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n",
" 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n",
" 0xe6: 'ae', 0xe7: 'c',\n",
" 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',\n",
" 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n",
" 0xf0: 'th', 0xf1: 'n',\n",
" 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n",
" 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n",
" 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n",
" 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n",
" 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n",
" 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n",
" 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n",
" 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n",
" 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n",
" 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n",
" 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n",
" 0xd7: '*', 0xf7: '/'\n",
" }\n",
"\n",
" r = ''\n",
" if unicrap:\n",
" for i in unicrap:\n",
" if ord(i) in xlate:\n",
" r += xlate[ord(i)]\n",
" elif ord(i) >= 0x80:\n",
" pass\n",
" else:\n",
" r += str(i)\n",
"\n",
" return r\n",
"\n",
"\n",
"def convert_milliseconds(ms):\n",
"\n",
" seconds = ms / 1000\n",
" gmtime = time.gmtime(seconds)\n",
" if seconds > 3600:\n",
" minutes = time.strftime(\"%H:%M:%S\", gmtime)\n",
" else:\n",
" minutes = time.strftime(\"%M:%S\", gmtime)\n",
"\n",
" return minutes\n",
"\n",
"def convert_milliseconds_to_minutes(ms):\n",
"\n",
" if str(ms).isdigit():\n",
" seconds = float(ms) / 1000\n",
" minutes = round(seconds / 60, 0)\n",
"\n",
" return math.trunc(minutes)\n",
"\n",
" return 0\n",
"\n",
"def convert_seconds(s):\n",
"\n",
" gmtime = time.gmtime(s)\n",
" if s > 3600:\n",
" minutes = time.strftime(\"%H:%M:%S\", gmtime)\n",
" else:\n",
" minutes = time.strftime(\"%M:%S\", gmtime)\n",
"\n",
" return minutes\n",
"\n",
"\n",
"def today():\n",
" today = datetime.date.today()\n",
" yyyymmdd = datetime.date.isoformat(today)\n",
" return yyyymmdd\n",
"\n",
"\n",
"def now():\n",
" now = datetime.datetime.now()\n",
" return now.strftime(\"%Y-%m-%d %H:%M:%S\")\n",
"\n",
"def human_duration(s):\n",
"\n",
" hd = ''\n",
"\n",
" if str(s).isdigit():\n",
" d = int(s / 84600)\n",
" h = int((s % 84600) / 3600)\n",
" m = int(((s % 84600) % 3600) / 60)\n",
" s = int(((s % 84600) % 3600) % 60)\n",
"\n",
" hd_list = []\n",
" if d > 0:\n",
" hd_list.append(str(d) + ' days')\n",
" if h > 0:\n",
" hd_list.append(str(h) + ' hrs')\n",
" if m > 0:\n",
" hd_list.append(str(m) + ' mins')\n",
" if s > 0:\n",
" hd_list.append(str(s) + ' secs')\n",
"\n",
" hd = ' '.join(hd_list)\n",
"\n",
" return hd\n",
" else:\n",
" return hd\n",
"\n",
"def get_age(date):\n",
"\n",
" try:\n",
" split_date = date.split('-')\n",
" except:\n",
" return False\n",
"\n",
" try:\n",
" days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])\n",
" except IndexError:\n",
" days_old = False\n",
"\n",
" return days_old\n",
"\n",
"\n",
"def bytes_to_mb(bytes):\n",
"\n",
" mb = int(bytes) / 1048576\n",
" size = '%.1f MB' % mb\n",
" return size\n",
"\n",
"\n",
"def mb_to_bytes(mb_str):\n",
" result = re.search('^(\\d+(?:\\.\\d+)?)\\s?(?:mb)?', mb_str, flags=re.I)\n",
" if result:\n",
" return int(float(result.group(1)) * 1048576)\n",
"\n",
"\n",
"def piratesize(size):\n",
" split = size.split(\" \")\n",
" factor = float(split[0])\n",
" unit = split[1].upper()\n",
"\n",
" if unit == 'MiB':\n",
" size = factor * 1048576\n",
" elif unit == 'MB':\n",
" size = factor * 1000000\n",
" elif unit == 'GiB':\n",
" size = factor * 1073741824\n",
" elif unit == 'GB':\n",
" size = factor * 1000000000\n",
" elif unit == 'KiB':\n",
" size = factor * 1024\n",
" elif unit == 'KB':\n",
" size = factor * 1000\n",
" elif unit == \"B\":\n",
" size = factor\n",
" else:\n",
" size = 0\n",
"\n",
" return size\n",
"\n",
"\n",
"def replace_all(text, dic, normalize=False):\n",
"\n",
" if not text:\n",
" return ''\n",
"\n",
" for i, j in dic.iteritems():\n",
" if normalize:\n",
" try:\n",
" if sys.platform == 'darwin':\n",
" j = unicodedata.normalize('NFD', j)\n",
" else:\n",
" j = unicodedata.normalize('NFC', j)\n",
" except TypeError:\n",
" j = unicodedata.normalize('NFC', j.decode(plexcs.SYS_ENCODING, 'replace'))\n",
" text = text.replace(i, j)\n",
" return text\n",
"\n",
"\n",
"def replace_illegal_chars(string, type=\"file\"):\n",
" if type == \"file\":\n",
" string = re.sub('[\\?\"*:|<>/]', '_', string)\n",
" if type == \"folder\":\n",
" string = re.sub('[:\\?<>\"|]', '_', string)\n",
"\n",
" return string\n",
"\n",
"\n",
"def cleanName(string):\n",
"\n",
" pass1 = latinToAscii(string).lower()\n",
" out_string = re.sub('[\\.\\-\\/\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\+\\-\\\"\\'\\,\\;\\:\\[\\]\\{\\}\\<\\>\\=\\_]', '', pass1).encode('utf-8')\n",
"\n",
" return out_string\n",
"\n",
"\n",
"def cleanTitle(title):\n",
"\n",
" title = re.sub('[\\.\\-\\/\\_]', ' ', title).lower()\n",
"\n",
" # Strip out extra whitespace\n",
" title = ' '.join(title.split())\n",
"\n",
" title = title.title()\n",
"\n",
" return title\n",
"\n",
"\n",
"def split_path(f):\n",
" \"\"\"\n",
" Split a path into components, starting with the drive letter (if any). Given\n",
" a path, os.path.join(*split_path(f)) should be path equal to f.\n",
" \"\"\"\n",
"\n",
" components = []\n",
" drive, path = os.path.splitdrive(f)\n",
"\n",
" # Strip the folder from the path, iterate until nothing is left\n",
" while True:\n",
" path, folder = os.path.split(path)\n",
"\n",
" if folder:\n",
" components.append(folder)\n",
" else:\n",
" if path:\n",
" components.append(path)\n",
"\n",
" break\n",
"\n",
" # Append the drive (if any)\n",
" if drive:\n",
" components.append(drive)\n",
"\n",
" # Reverse components\n",
" components.reverse()\n",
"\n",
" # Done\n",
" return components\n",
"\n",
"\n",
"def extract_logline(s):\n",
" # Default log format\n",
" pattern = re.compile(r'(?P<timestamp>.*?)\\s\\-\\s(?P<level>.*?)\\s*\\:\\:\\s(?P<thread>.*?)\\s\\:\\s(?P<message>.*)', re.VERBOSE)\n",
" match = pattern.match(s)\n",
" if match:\n",
" timestamp = match.group(\"timestamp\")\n",
" level = match.group(\"level\")\n",
" thread = match.group(\"thread\")\n",
" message = match.group(\"message\")\n",
" return (timestamp, level, thread, message)\n",
" else:\n",
" return None\n",
"\n",
"\n",
"def split_string(mystring, splitvar=','):\n",
" mylist = []\n",
" for each_word in mystring.split(splitvar):\n",
" mylist.append(each_word.strip())\n",
" return mylist\n",
"\n",
"def create_https_certificates(ssl_cert, ssl_key):\n",
" \"\"\"\n",
" Create a pair of self-signed HTTPS certificares and store in them in\n",
" 'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.\n",
"\n",
" This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).\n",
" \"\"\"\n",
"\n",
" from plexcs import logger\n",
"\n",
" from OpenSSL import crypto\n",
" from certgen import createKeyPair, createCertRequest, createCertificate, \\\n",
" TYPE_RSA, serial\n",
"\n",
" # Create the CA Certificate\n",
" cakey = createKeyPair(TYPE_RSA, 2048)\n",
" careq = createCertRequest(cakey, CN=\"Certificate Authority\")\n",
" cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years\n",
"\n",
" pkey = createKeyPair(TYPE_RSA, 2048)\n",
" req = createCertRequest(pkey, CN=\"Plex:CS\")\n",
" cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years\n",
"\n",
" # Save the key and certificate to disk\n",
" try:\n",
" with open(ssl_key, \"w\") as fp:\n",
" fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))\n",
" with open(ssl_cert, \"w\") as fp:\n",
" fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n",
" except IOError as e:\n",
" logger.error(\"Error creating SSL key and certificate: %s\", e)\n",
" return False\n",
"\n",
" return True\n",
"\n",
"\n",
"def cast_to_float(s):\n",
" try:\n",
" return float(s)\n",
" except ValueError:\n",
" return -1\n",
"\n",
"\n",
"def convert_xml_to_json(xml):\n",
" o = xmltodict.parse(xml)\n",
" return json.dumps(o)\n",
"\n",
"\n",
"def convert_xml_to_dict(xml):\n",
" o = xmltodict.parse(xml)\n",
" return o\n",
"\n",
"\n",
"def get_percent(value1, value2):\n",
"\n",
" if str(value1).isdigit() and str(value2).isdigit():\n",
" value1 = cast_to_float(value1)\n",
" value2 = cast_to_float(value2)\n",
" else:\n",
" return 0\n",
"\n",
" if value1 != 0 and value2 != 0:\n",
" percent = (value1 / value2) * 100\n",
" else:\n",
" percent = 0\n",
"\n",
" return math.trunc(percent)\n",
"\n",
"def parse_xml(unparsed=None):\n",
" from plexcs import logger\n",
"\n",
" if unparsed:\n",
" try:\n",
" xml_parse = minidom.parseString(unparsed)\n",
" return xml_parse\n",
" except Exception as e:\n",
" logger.warn(\"Error parsing XML. %s\" % e)\n",
" return []\n",
" except:\n",
" logger.warn(\"Error parsing XML.\")\n",
" return []\n",
" else:\n",
" logger.warn(\"XML parse request made but no data received.\")\n",
" return []\n",
"\n",
"\"\"\"\n",
"Validate xml keys to make sure they exist and return their attribute value, return blank value is none found\n",
"\"\"\"\n",
"def get_xml_attr(xml_key, attribute, return_bool=False, default_return=''):\n",
" if xml_key.getAttribute(attribute):\n",
" if return_bool:\n",
" return True\n",
" else:\n",
" return xml_key.getAttribute(attribute)\n",
" else:\n",
" if return_bool:\n",
" return False\n",
" else:\n",
" return default_return\n",
"\n",
"def process_json_kwargs(json_kwargs):\n",
" params = {}\n",
" if json_kwargs:\n",
" params = json.loads(json_kwargs)\n",
"\n",
" return params\n",
"\n",
"def sanitize(string):\n",
" if string:\n",
" return unicode(string).replace('<','<').replace('>','>')\n",
" else:\n",
" return ''\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0547945205479452,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0.23893805309734514,
0,
0,
0,
0,
0,
0,
0.07547169811320754,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0.019801980198019802,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0.25,
0.009174311926605505,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0.028169014084507043,
0,
0
] | 438 | 0.002839 | false |
"""
Represents the Depend settings
"""
# Always try to import cElementTree since it's faster if it exists
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import platform
from os.path import join, abspath, exists
from pylib.logwrapper import LogWrapper
from pylib.depend.depsource import DepSource
# XML Settings for Download of Depends
class DependSettings(object):
def __init__(self):
"""Dependency Settings"""
super().__init__()
self.log = LogWrapper.getlogger()
# Path to the config file
self.ConfigPath = None
self.platform = None
# XML Root Tag
self.xmlroot = None
# custom properties
self.DepsDirectory = None
self.ArchiveDirectory = None
self.SoxVersion = None
self.CMakeGenerator = None
# List of Sources
self.sources = []
def read_element(self, tag):
"""Read XML Value Element"""
nextval = next(self.xmlroot.iter(tag), None)
if nextval == None : raise ValueError('Element not found: ' + tag)
return nextval.text
def loadxml(self):
"""Load XML"""
# Load in the xml
tree = ET.ElementTree(file=self.ConfigPath)
self.xmlroot = tree.getroot()
if self.xmlroot.tag != 'Settings':
raise ValueError('Root Element is not Settings')
# Custom Settings
self.DepsDirectory = self.read_element('DepsDirectory')
self.DepsDirectory = abspath(self.DepsDirectory)
self.ArchiveDirectory = self.read_element('ArchiveDirectory')
self.ArchiveDirectory = join(self.DepsDirectory, self.ArchiveDirectory)
self.SoxVersion = self.read_element('SoxVersion')
self.CMakeGenerator = self.read_element('CMakeGenerator')
# Set the Archive directory for downloaded sources
DepSource.ArchiveDir = self.ArchiveDirectory
# Set the root Extract directory for extracting sources
DepSource.RootExtractDir = self.DepsDirectory
# Load in the list of download sources
self.sources = DepSource.parsexml(self.xmlroot)
return
def getdeps(self):
"""Download and Extract Sources"""
for source in self.sources:
self.log.info("")
self.log.info("#####################################################")
# Skip anything already extracted
extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir))
if exists(extractdir):
self.log.warn("Deps Subdir: " + source.destsubdir + " already exists, skipping")
continue
source.Extracted = False
downloaded = source.download()
if downloaded == False:
self.log.error("Download Failed")
else:
source.Extracted = source.extract()
# Remove the archive file
source.remove_archivefile()
# Re-jig the directories for those that need it
for source in self.sources:
if source.Extracted == True:
source.movetoparent_multiple()
return
def get_configpath(self):
log = LogWrapper.getlogger()
"""Determine which config filename / path to use"""
self.platform = platform.system()
settingsfile = ""
if self.platform == "Windows":
settingsfile = "Settings_win32.xml"
elif self.platform == "Linux":
settingsfile = "Settings_linux.xml"
else:
log.critical("Unsupported platform")
self.ConfigPath = None
self.log.info("Platform identified as: " + self.platform)
self.log.info("Settings file: " + settingsfile)
self.ConfigPath = abspath(settingsfile)
return self.ConfigPath
| [
"\"\"\"\n",
"Represents the Depend settings\n",
"\"\"\"\n",
"\n",
"# Always try to import cElementTree since it's faster if it exists\n",
"try:\n",
" import xml.etree.cElementTree as ET\n",
"except ImportError:\n",
" import xml.etree.ElementTree as ET\n",
"\n",
"import platform\n",
"from os.path import join, abspath, exists\n",
"from pylib.logwrapper import LogWrapper\n",
"from pylib.depend.depsource import DepSource\n",
"\n",
"# XML Settings for Download of Depends\n",
"class DependSettings(object):\n",
"\n",
" def __init__(self):\n",
" \"\"\"Dependency Settings\"\"\"\n",
" super().__init__()\n",
" self.log = LogWrapper.getlogger()\n",
"\n",
" # Path to the config file\n",
" self.ConfigPath = None\n",
" self.platform = None\n",
"\n",
" # XML Root Tag\n",
" self.xmlroot = None\n",
"\n",
" # custom properties\n",
" self.DepsDirectory = None\n",
" self.ArchiveDirectory = None\n",
" self.SoxVersion = None\n",
" self.CMakeGenerator = None\n",
"\n",
" # List of Sources\n",
" self.sources = []\n",
"\n",
" def read_element(self, tag):\n",
" \"\"\"Read XML Value Element\"\"\"\n",
" nextval = next(self.xmlroot.iter(tag), None)\n",
" if nextval == None : raise ValueError('Element not found: ' + tag)\n",
" return nextval.text\n",
"\n",
" def loadxml(self):\n",
" \"\"\"Load XML\"\"\"\n",
" # Load in the xml\n",
" tree = ET.ElementTree(file=self.ConfigPath)\n",
" self.xmlroot = tree.getroot()\n",
" if self.xmlroot.tag != 'Settings':\n",
" raise ValueError('Root Element is not Settings')\n",
"\n",
" # Custom Settings\n",
" self.DepsDirectory = self.read_element('DepsDirectory')\n",
" self.DepsDirectory = abspath(self.DepsDirectory)\n",
" self.ArchiveDirectory = self.read_element('ArchiveDirectory')\n",
" self.ArchiveDirectory = join(self.DepsDirectory, self.ArchiveDirectory)\n",
" self.SoxVersion = self.read_element('SoxVersion')\n",
" self.CMakeGenerator = self.read_element('CMakeGenerator')\n",
"\n",
" # Set the Archive directory for downloaded sources\n",
" DepSource.ArchiveDir = self.ArchiveDirectory\n",
" # Set the root Extract directory for extracting sources\n",
" DepSource.RootExtractDir = self.DepsDirectory\n",
"\n",
" # Load in the list of download sources\n",
" self.sources = DepSource.parsexml(self.xmlroot)\n",
" return\n",
"\n",
" def getdeps(self):\n",
" \"\"\"Download and Extract Sources\"\"\"\n",
" for source in self.sources:\n",
" self.log.info(\"\")\n",
" self.log.info(\"#####################################################\")\n",
"\n",
" # Skip anything already extracted\n",
" extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir))\n",
" if exists(extractdir):\n",
" self.log.warn(\"Deps Subdir: \" + source.destsubdir + \" already exists, skipping\")\n",
" continue\n",
"\n",
" source.Extracted = False\n",
" downloaded = source.download()\n",
" if downloaded == False:\n",
" self.log.error(\"Download Failed\")\n",
" else:\n",
" source.Extracted = source.extract()\n",
"\n",
" # Remove the archive file\n",
" source.remove_archivefile()\n",
"\n",
" # Re-jig the directories for those that need it\n",
" for source in self.sources:\n",
" if source.Extracted == True:\n",
" source.movetoparent_multiple()\n",
" return\n",
"\n",
" def get_configpath(self):\n",
" log = LogWrapper.getlogger()\n",
" \"\"\"Determine which config filename / path to use\"\"\"\n",
" self.platform = platform.system()\n",
" settingsfile = \"\"\n",
" if self.platform == \"Windows\":\n",
" settingsfile = \"Settings_win32.xml\"\n",
" elif self.platform == \"Linux\":\n",
" settingsfile = \"Settings_linux.xml\"\n",
" else:\n",
" log.critical(\"Unsupported platform\")\n",
" self.ConfigPath = None\n",
" self.log.info(\"Platform identified as: \" + self.platform)\n",
" self.log.info(\"Settings file: \" + settingsfile)\n",
" self.ConfigPath = abspath(settingsfile)\n",
" return self.ConfigPath\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0.011904761904761904,
0,
0.010309278350515464,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 114 | 0.001588 | false |
# coding: UTF-8
# Name: 配置信息
# Author: LYC
# Created: 2014-04-08
import re
class Configuration(object):
"""
配置
"""
UserDeclarator = ":"
VarPrefix = "$"
FuncPrefix = "#"
ConstantPrefix = "$$"
VarRealPrefix = "_"
FuncRealPrefix = "func_"
UnknownNumber = "$$"
AnswerConstant = "$$ans"
AutoNumFunc = "_"
UserVarRegex = re.compile("^\s*(\$[a-z]+\d*)\s*$")
UserFuncRegex = re.compile("^\s*(#[a-z]+\d*)\s*$")
HexRegex = re.compile("0x[0-9a-f]+")
OctRegex = re.compile("0o[0-8]+")
class OPLEVEL(object):
"""
运算符优先级与权值
"""
LBK = 00,
CMM = 10,
ADD = 20,
SUB = 20,
MUL = 30,
DIV = 30,
MOD = 40,
POW = 40,
UOP = 50,
class OPRegex(object):
"""
运算符正则
"""
UOPRegex = re.compile(r"^(\-|\+|[a-z]\w*)$")
NUMRegex = re.compile(r"^(\.|\d)+[jl]?$")
BOPRegex = re.compile(r"^([^\w\(\)\[\]]+|[a-z]+)$")
VARRegex = re.compile(r"^_[a-z_0-9]+$")
LBKRegex = re.compile(r"^[\(\[]$")
RBKRegex = re.compile(r"^[\)\]]$")
NONRegex = re.compile(r"^$") | [
"# coding: UTF-8\n",
"# Name: 配置信息\n",
"# Author: LYC\n",
"# Created: 2014-04-08\n",
"\n",
"import re\n",
"\n",
"class Configuration(object):\n",
" \"\"\"\n",
" 配置\n",
" \"\"\"\n",
" UserDeclarator = \":\"\n",
" VarPrefix = \"$\"\n",
" FuncPrefix = \"#\"\n",
" ConstantPrefix = \"$$\"\n",
"\n",
" VarRealPrefix = \"_\"\n",
" FuncRealPrefix = \"func_\"\n",
"\n",
" UnknownNumber = \"$$\"\n",
" AnswerConstant = \"$$ans\"\n",
"\n",
" AutoNumFunc = \"_\"\n",
"\n",
" UserVarRegex = re.compile(\"^\\s*(\\$[a-z]+\\d*)\\s*$\")\n",
" UserFuncRegex = re.compile(\"^\\s*(#[a-z]+\\d*)\\s*$\")\n",
" HexRegex = re.compile(\"0x[0-9a-f]+\")\n",
" OctRegex = re.compile(\"0o[0-8]+\")\n",
"\n",
"class OPLEVEL(object):\n",
" \"\"\"\n",
" 运算符优先级与权值\n",
" \"\"\"\n",
" LBK = 00,\n",
" CMM = 10,\n",
" ADD = 20,\n",
" SUB = 20,\n",
" MUL = 30,\n",
" DIV = 30,\n",
" MOD = 40,\n",
" POW = 40,\n",
" UOP = 50,\n",
"\n",
"\n",
"class OPRegex(object):\n",
" \"\"\"\n",
" 运算符正则\n",
" \"\"\"\n",
"\n",
" UOPRegex = re.compile(r\"^(\\-|\\+|[a-z]\\w*)$\")\n",
" NUMRegex = re.compile(r\"^(\\.|\\d)+[jl]?$\")\n",
" BOPRegex = re.compile(r\"^([^\\w\\(\\)\\[\\]]+|[a-z]+)$\")\n",
" VARRegex = re.compile(r\"^_[a-z_0-9]+$\")\n",
" LBKRegex = re.compile(r\"^[\\(\\[]$\")\n",
" RBKRegex = re.compile(r\"^[\\)\\]]$\")\n",
" NONRegex = re.compile(r\"^$\")"
] | [
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07272727272727272,
0.05454545454545454,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125
] | 56 | 0.004223 | false |
# -*- coding: utf-8 -*-
"""
Region - descendant class of BaseRegion.
"""
import datetime
import os
import time
from common_exceptions import FailExit, FindFailed
from Location import Location
from BaseRegion import BaseRegion, logger, DELAY_BETWEEN_CV_ATTEMPT
from Match import Match
from Pattern import Pattern
from Screen import Screen
from Settings import settings
class Region(BaseRegion):
def offset(self, *args, **kwargs):
"""
Return area moved relates to self.
Option 1 (Sikuli-like):
loc_offs := args[0] - Location type; where to move; (w,h) don't change
Option 2:
x_offs := args[0] - int; where to move by x
y_offs := args[1] - int; where to move by y
(w,h) don't change
"""
if len(kwargs) != 0:
raise FailExit('Unknown keys in kwargs = %s' % str(kwargs))
offset_title = 'Offset of {}'.format(self.title)
if len(args) == 2 and \
(isinstance(args[0], int) or isinstance(args[0], float)) and \
(isinstance(args[1], int) or isinstance(args[1], float)):
return Region(self.x + int(args[0]),
self.y + int(args[1]),
self.w, self.h, find_timeout=self._find_timeout,
title=offset_title)
elif len(args) == 1 and isinstance(args[0], Location):
return Region(self.x + args[0].x,
self.y + args[0].y,
self.w, self.h, find_timeout=self._find_timeout,
title=offset_title)
else:
raise FailExit('Incorrect offset() method call:\n\targs = {}'.format(args))
def right(self, length=None):
"""
Return right area relates to self, do not including self.
Height of new area is equal to height of self.
Width of new area is equal to 'length' or till the end of the screen
"""
right_title = 'Region right of {}'.format(self.title)
try:
if length is None:
scr = Region(*Screen(self.screen_number).area)
reg = Region(self.x + self.w, self.y,
(scr.x + scr.w - 1) - (self.x + self.w) + 1,
self.h, find_timeout=self._find_timeout,
title=right_title)
elif isinstance(length, int) and length > 0:
reg = Region(self.x + self.w, self.y,
length,
self.h, find_timeout=self._find_timeout,
title=right_title)
else:
raise FailExit('Incorrect length: type is {type}; value is {length}'.format(
typr=str(type(length)), length=str(length)))
except FailExit:
raise FailExit('Incorrect right() method call:\n\tlength = {length}'.format(
length=length))
return reg
def left(self, length=None):
"""
Return left area relates to self, do not including self.
Height of new area is equal to height of self.
Width of new area is equal to 'length' or till the end of the screen
if 'length' is not set
"""
left_title = 'Region left of {}'.format(self.title)
try:
if length is None:
scr = Region(*Screen(self.screen_number).area)
reg = Region(scr.x, self.y, (self.x - 1) - scr.x + 1,
self.h, find_timeout=self._find_timeout,
title=left_title)
elif isinstance(length, int) and length > 0:
reg = Region(self.x - length, self.y, length,
self.h, find_timeout=self._find_timeout,
title=left_title)
else:
raise FailExit('Incorrect length: type is {type}; value is {length}'.format(
typr=str(type(length)), length=str(length)))
except FailExit:
raise FailExit('Incorrect left() method call:\n\tlength = {length}'.format(
length=length))
return reg
def above(self, length=None):
"""
Return top area relates to self, do not including self.
Width of new area is equal to width of self.
Height of new area is equal to 'length' or till the end of the screen
if 'length' is not set
"""
try:
if length is None:
scr = Region(*Screen(self.screen_number).area)
reg = Region(self.x, scr.y, self.w, (self.y - 1) - scr.y + 1,
find_timeout=self._find_timeout,
title='Region top of %s' % self.title)
elif isinstance(length, int) and length > 0:
reg = Region(self.x, self.y - length, self.w, length,
find_timeout=self._find_timeout,
title='Region top of %s' % self.title)
else:
raise FailExit('Incorrect length: type is {type}; value is {length}'.format(
typr=str(type(length)), length=str(length)))
except FailExit:
raise FailExit('Incorrect above() method call:\n\tlength = {length}'.format(
length=length))
return reg
def below(self, length=None):
"""
Return bottom area relates to self, do not including self.
Width of new area is equal to width of self.
Height of new area is equal to 'length' or till the end of the screen
if 'length' is not set
"""
try:
if length is None:
scr = Region(*Screen(self.screen_number).area)
reg = Region(self.x, self.y + self.h, self.w, (scr.y + scr.h - 1) - (self.y + self.h) + 1,
find_timeout=self._find_timeout,
title='Region bottom of %s' % self.title)
elif isinstance(length, int) and length > 0:
reg = Region(self.x, self.y + self.h, self.w, length,
find_timeout=self._find_timeout,
title='Region bottom of %s' % self.title)
else:
raise FailExit('Incorrect length: type is {type}; value is {length}'.format(
typr=str(type(length)), length=str(length)))
except FailExit:
raise FailExit('Incorrect below() method call:\n\tlength = {length}'.format(
length=length))
return reg
def nearby(self, length=0):
"""
Return area around self, including self.
"""
try:
if isinstance(length, int) and ((length >= 0) or
(length < 0 and
(-2 * length) < self.w and
(-2 * length) < self.h)):
reg = Region(self.x - length, self.y - length, self.w + 2 * length,
self.h + 2 * length, find_timeout=self._find_timeout,
title='Nearby region of {}'.format(self.title))
else:
raise FailExit('Incorrect length: type is {type}; value is {length}'.format(
typr=str(type(length)), length=str(length)))
except FailExit:
raise FailExit('Incorrect nearby() method call:\n\tlength = {length}'.format(
length=length))
return reg
def find_all(self, pattern, delay_before=0):
err_msg = 'Incorrect find_all() method call:' \
'\n\tpattern = {pattern}\n\tdelay_before = {delay}'.format(
pattern=str(pattern).split(os.pathsep)[-1], delay=delay_before)
try:
delay_before = float(delay_before)
except ValueError:
raise FailExit(err_msg)
if isinstance(pattern, str):
pattern = Pattern(pattern)
if not isinstance(pattern, Pattern):
raise FailExit(err_msg)
time.sleep(delay_before)
results = self._find(pattern, self.search_area)
self._last_match = map(lambda pt: Match(pt[0], pt[1],
pattern.get_w, pattern.get_h,
pt[2], pattern), results)
logger.info('total found {count} matches of "{file}"'.format(
count=len(self._last_match), file=pattern.get_filename(full_path=False)))
return self._last_match
def _wait_for_appear_or_vanish(self, pattern, timeout, condition):
"""
pattern - could be String or List.
If isinstance(pattern, list), the first element will return.
It can be used when it's necessary to find one of the several images
"""
fail_exit_text = 'bad "pattern" argument; it should be a string (path to image file) or Pattern object: {}'
if not isinstance(pattern, list):
pattern = [pattern]
for (_i, p) in enumerate(pattern):
if isinstance(p, str):
pattern[_i] = Pattern(p)
elif not isinstance(p, Pattern):
raise FailExit(fail_exit_text.format(p))
if timeout is None:
timeout = self._find_timeout
else:
try:
timeout = float(timeout)
if timeout < 0:
raise ValueError
except ValueError:
raise FailExit('Incorrect argument: timeout = {}'.format(timeout))
prev_field = None
elaps_time = 0
while True:
if prev_field is None or (prev_field != self.search_area).all():
for ptn in pattern:
results = self._find(ptn, self.search_area)
if condition == 'appear':
if len(results) != 0:
# Found something. Choose one result with best 'score'.
# If several results has the same 'score' a first found result will choose
res = max(results, key=lambda x: x[2])
logger.info(' "%s" has been found in(%i, %i)' % (
ptn.get_filename(full_path=False), res[0], res[1]))
return Match(int(res[0] / self.scaling_factor),
int(res[1] / self.scaling_factor),
int(ptn.get_w / self.scaling_factor),
int(ptn.get_h / self.scaling_factor),
res[2], ptn)
elif condition == 'vanish':
if len(results) == 0:
logger.info('"{}" has vanished'.format(ptn.get_filename(full_path=False)))
return
else:
raise FailExit('unknown condition: "{}"'.format(condition))
time.sleep(DELAY_BETWEEN_CV_ATTEMPT)
elaps_time += DELAY_BETWEEN_CV_ATTEMPT
if elaps_time >= timeout:
logger.warning('{} hasn`t been found'.format(ptn.get_filename(full_path=False)))
failed_images = ', '.join(map(lambda _p: _p.get_filename(full_path=False), pattern))
raise FindFailed('Unable to find "{file}" in {region}'.format(
file=failed_images, region=str(self)))
def find(self, image_path, timeout=None, similarity=settings.min_similarity,
exception_on_find_fail=True):
"""
Waits for pattern appear during timeout (in seconds)
if timeout = 0 - one search iteration will perform
if timeout = None - default value will use
Returns Region if pattern found.
If pattern did not found returns None if exception_on_find_fail is False
else raises FindFailed exception
"""
logger.info(' try to find "{img}" with similarity {s}'.format(
img=str(image_path).split(os.path.sep)[-1], s=similarity))
try:
self._last_match = self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'appear')
except FailExit:
self._last_match = None
raise
except FindFailed as ex:
if exception_on_find_fail:
self.save_as_jpg(os.path.join(
settings.find_failed_dir,
'%s_%s.jpg' % (datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
str(image_path).split('/')[-1])))
raise ex
else:
return None
else:
return self._last_match
def wait_vanish(self, image_path, timeout=None, similarity=settings.min_similarity):
"""
Waits for pattern vanish during timeout (in seconds).
If pattern already vanished before method call it return True
if timeout = 0 - one search iteration will perform
if timeout = None - default value will use
"""
logger.info('Check if "{file}" vanish during {t} with similarity {s}'.format(
file=str(image_path).split(os.path.sep)[-1],
t=timeout if timeout else str(self._find_timeout),
s=similarity))
try:
self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'vanish')
except FailExit:
raise FailExit('Incorrect wait_vanish() method call:'
'\n\tregion = {region}\n\timage_path = {path}\n\ttimeout = {t}'.format(
region=str(self), path=image_path, t=timeout))
except FindFailed:
logger.info('"{}" not vanished'.format(str(image_path).split(os.path.sep)[-1]))
return False
else:
logger.info('"{}" vanished'.format(str(image_path).split(os.path.sep)[-1]))
return True
finally:
self._last_match = None
def exists(self, image_path):
self._last_match = None
try:
self._last_match = self._wait_for_appear_or_vanish(image_path, 0, 'appear')
except FailExit:
raise FailExit('Incorrect exists() method call:'
'\n\tregion = {region}\n\timage_path = {path}'.format(
region=str(self), path=image_path))
except FindFailed:
return False
else:
return True
def wait(self, image_path=None, timeout=None):
"""
For compatibility with Sikuli.
Wait for pattern appear or just wait
"""
if image_path is None:
if timeout:
time.sleep(timeout)
else:
try:
self._last_match = self._wait_for_appear_or_vanish(image_path, timeout, 'appear')
except FailExit:
self._last_match = None
raise FailExit('Incorrect wait() method call:'
'\n\tregion = {region}\n\timage_path = {path}\n\ttimeout = {t}'.format(
region=str(self), path=image_path, t=timeout))
else:
return self._last_match
| [
"# -*- coding: utf-8 -*-\r\n",
"\r\n",
"\"\"\"\r\n",
" Region - descendant class of BaseRegion.\r\n",
"\"\"\"\r\n",
"\r\n",
"import datetime\r\n",
"import os\r\n",
"import time\r\n",
"\r\n",
"from common_exceptions import FailExit, FindFailed\r\n",
"from Location import Location\r\n",
"from BaseRegion import BaseRegion, logger, DELAY_BETWEEN_CV_ATTEMPT\r\n",
"from Match import Match\r\n",
"from Pattern import Pattern\r\n",
"from Screen import Screen\r\n",
"from Settings import settings\r\n",
"\r\n",
"\r\n",
"class Region(BaseRegion):\r\n",
" def offset(self, *args, **kwargs):\r\n",
" \"\"\"\r\n",
" Return area moved relates to self.\r\n",
" Option 1 (Sikuli-like):\r\n",
" loc_offs := args[0] - Location type; where to move; (w,h) don't change\r\n",
" Option 2:\r\n",
" x_offs := args[0] - int; where to move by x\r\n",
" y_offs := args[1] - int; where to move by y\r\n",
" (w,h) don't change\r\n",
" \"\"\"\r\n",
" if len(kwargs) != 0:\r\n",
" raise FailExit('Unknown keys in kwargs = %s' % str(kwargs))\r\n",
"\r\n",
" offset_title = 'Offset of {}'.format(self.title)\r\n",
" if len(args) == 2 and \\\r\n",
" (isinstance(args[0], int) or isinstance(args[0], float)) and \\\r\n",
" (isinstance(args[1], int) or isinstance(args[1], float)):\r\n",
" return Region(self.x + int(args[0]),\r\n",
" self.y + int(args[1]),\r\n",
" self.w, self.h, find_timeout=self._find_timeout,\r\n",
" title=offset_title)\r\n",
" elif len(args) == 1 and isinstance(args[0], Location):\r\n",
" return Region(self.x + args[0].x,\r\n",
" self.y + args[0].y,\r\n",
" self.w, self.h, find_timeout=self._find_timeout,\r\n",
" title=offset_title)\r\n",
" else:\r\n",
" raise FailExit('Incorrect offset() method call:\\n\\targs = {}'.format(args))\r\n",
"\r\n",
" def right(self, length=None):\r\n",
" \"\"\"\r\n",
" Return right area relates to self, do not including self.\r\n",
" Height of new area is equal to height of self.\r\n",
" Width of new area is equal to 'length' or till the end of the screen\r\n",
" \"\"\"\r\n",
" right_title = 'Region right of {}'.format(self.title)\r\n",
" try:\r\n",
" if length is None:\r\n",
" scr = Region(*Screen(self.screen_number).area)\r\n",
" reg = Region(self.x + self.w, self.y,\r\n",
" (scr.x + scr.w - 1) - (self.x + self.w) + 1,\r\n",
" self.h, find_timeout=self._find_timeout,\r\n",
" title=right_title)\r\n",
" elif isinstance(length, int) and length > 0:\r\n",
" reg = Region(self.x + self.w, self.y,\r\n",
" length,\r\n",
" self.h, find_timeout=self._find_timeout,\r\n",
" title=right_title)\r\n",
" else:\r\n",
" raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n",
" typr=str(type(length)), length=str(length)))\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect right() method call:\\n\\tlength = {length}'.format(\r\n",
" length=length))\r\n",
" return reg\r\n",
"\r\n",
" def left(self, length=None):\r\n",
" \"\"\"\r\n",
" Return left area relates to self, do not including self.\r\n",
" Height of new area is equal to height of self.\r\n",
" Width of new area is equal to 'length' or till the end of the screen\r\n",
" if 'length' is not set\r\n",
" \"\"\"\r\n",
" left_title = 'Region left of {}'.format(self.title)\r\n",
" try:\r\n",
" if length is None:\r\n",
" scr = Region(*Screen(self.screen_number).area)\r\n",
" reg = Region(scr.x, self.y, (self.x - 1) - scr.x + 1,\r\n",
" self.h, find_timeout=self._find_timeout,\r\n",
" title=left_title)\r\n",
" elif isinstance(length, int) and length > 0:\r\n",
" reg = Region(self.x - length, self.y, length,\r\n",
" self.h, find_timeout=self._find_timeout,\r\n",
" title=left_title)\r\n",
" else:\r\n",
" raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n",
" typr=str(type(length)), length=str(length)))\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect left() method call:\\n\\tlength = {length}'.format(\r\n",
" length=length))\r\n",
" return reg\r\n",
"\r\n",
" def above(self, length=None):\r\n",
" \"\"\"\r\n",
" Return top area relates to self, do not including self.\r\n",
" Width of new area is equal to width of self.\r\n",
" Height of new area is equal to 'length' or till the end of the screen\r\n",
" if 'length' is not set\r\n",
" \"\"\"\r\n",
" try:\r\n",
" if length is None:\r\n",
" scr = Region(*Screen(self.screen_number).area)\r\n",
" reg = Region(self.x, scr.y, self.w, (self.y - 1) - scr.y + 1,\r\n",
" find_timeout=self._find_timeout,\r\n",
" title='Region top of %s' % self.title)\r\n",
" elif isinstance(length, int) and length > 0:\r\n",
" reg = Region(self.x, self.y - length, self.w, length,\r\n",
" find_timeout=self._find_timeout,\r\n",
" title='Region top of %s' % self.title)\r\n",
" else:\r\n",
" raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n",
" typr=str(type(length)), length=str(length)))\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect above() method call:\\n\\tlength = {length}'.format(\r\n",
" length=length))\r\n",
" return reg\r\n",
"\r\n",
" def below(self, length=None):\r\n",
" \"\"\"\r\n",
" Return bottom area relates to self, do not including self.\r\n",
" Width of new area is equal to width of self.\r\n",
" Height of new area is equal to 'length' or till the end of the screen\r\n",
" if 'length' is not set\r\n",
" \"\"\"\r\n",
" try:\r\n",
" if length is None:\r\n",
" scr = Region(*Screen(self.screen_number).area)\r\n",
" reg = Region(self.x, self.y + self.h, self.w, (scr.y + scr.h - 1) - (self.y + self.h) + 1,\r\n",
" find_timeout=self._find_timeout,\r\n",
" title='Region bottom of %s' % self.title)\r\n",
" elif isinstance(length, int) and length > 0:\r\n",
" reg = Region(self.x, self.y + self.h, self.w, length,\r\n",
" find_timeout=self._find_timeout,\r\n",
" title='Region bottom of %s' % self.title)\r\n",
" else:\r\n",
" raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n",
" typr=str(type(length)), length=str(length)))\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect below() method call:\\n\\tlength = {length}'.format(\r\n",
" length=length))\r\n",
" return reg\r\n",
"\r\n",
" def nearby(self, length=0):\r\n",
" \"\"\"\r\n",
" Return area around self, including self.\r\n",
" \"\"\"\r\n",
" try:\r\n",
" if isinstance(length, int) and ((length >= 0) or\r\n",
" (length < 0 and\r\n",
" (-2 * length) < self.w and\r\n",
" (-2 * length) < self.h)):\r\n",
" reg = Region(self.x - length, self.y - length, self.w + 2 * length,\r\n",
" self.h + 2 * length, find_timeout=self._find_timeout,\r\n",
" title='Nearby region of {}'.format(self.title))\r\n",
" else:\r\n",
" raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n",
" typr=str(type(length)), length=str(length)))\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect nearby() method call:\\n\\tlength = {length}'.format(\r\n",
" length=length))\r\n",
" return reg\r\n",
"\r\n",
" def find_all(self, pattern, delay_before=0):\r\n",
" err_msg = 'Incorrect find_all() method call:' \\\r\n",
" '\\n\\tpattern = {pattern}\\n\\tdelay_before = {delay}'.format(\r\n",
" pattern=str(pattern).split(os.pathsep)[-1], delay=delay_before)\r\n",
" try:\r\n",
" delay_before = float(delay_before)\r\n",
" except ValueError:\r\n",
" raise FailExit(err_msg)\r\n",
"\r\n",
" if isinstance(pattern, str):\r\n",
" pattern = Pattern(pattern)\r\n",
" if not isinstance(pattern, Pattern):\r\n",
" raise FailExit(err_msg)\r\n",
"\r\n",
" time.sleep(delay_before)\r\n",
" results = self._find(pattern, self.search_area)\r\n",
" self._last_match = map(lambda pt: Match(pt[0], pt[1],\r\n",
" pattern.get_w, pattern.get_h,\r\n",
" pt[2], pattern), results)\r\n",
" logger.info('total found {count} matches of \"{file}\"'.format(\r\n",
" count=len(self._last_match), file=pattern.get_filename(full_path=False)))\r\n",
" return self._last_match\r\n",
"\r\n",
" def _wait_for_appear_or_vanish(self, pattern, timeout, condition):\r\n",
" \"\"\"\r\n",
" pattern - could be String or List.\r\n",
" If isinstance(pattern, list), the first element will return.\r\n",
" It can be used when it's necessary to find one of the several images\r\n",
" \"\"\"\r\n",
" fail_exit_text = 'bad \"pattern\" argument; it should be a string (path to image file) or Pattern object: {}'\r\n",
"\r\n",
" if not isinstance(pattern, list):\r\n",
" pattern = [pattern]\r\n",
"\r\n",
" for (_i, p) in enumerate(pattern):\r\n",
" if isinstance(p, str):\r\n",
" pattern[_i] = Pattern(p)\r\n",
" elif not isinstance(p, Pattern):\r\n",
" raise FailExit(fail_exit_text.format(p))\r\n",
"\r\n",
" if timeout is None:\r\n",
" timeout = self._find_timeout\r\n",
" else:\r\n",
" try:\r\n",
" timeout = float(timeout)\r\n",
" if timeout < 0:\r\n",
" raise ValueError\r\n",
" except ValueError:\r\n",
" raise FailExit('Incorrect argument: timeout = {}'.format(timeout))\r\n",
"\r\n",
" prev_field = None\r\n",
" elaps_time = 0\r\n",
" while True:\r\n",
" if prev_field is None or (prev_field != self.search_area).all():\r\n",
" for ptn in pattern:\r\n",
" results = self._find(ptn, self.search_area)\r\n",
" if condition == 'appear':\r\n",
" if len(results) != 0:\r\n",
" # Found something. Choose one result with best 'score'.\r\n",
" # If several results has the same 'score' a first found result will choose\r\n",
" res = max(results, key=lambda x: x[2])\r\n",
" logger.info(' \"%s\" has been found in(%i, %i)' % (\r\n",
" ptn.get_filename(full_path=False), res[0], res[1]))\r\n",
" return Match(int(res[0] / self.scaling_factor),\r\n",
" int(res[1] / self.scaling_factor),\r\n",
" int(ptn.get_w / self.scaling_factor),\r\n",
" int(ptn.get_h / self.scaling_factor),\r\n",
" res[2], ptn)\r\n",
" elif condition == 'vanish':\r\n",
" if len(results) == 0:\r\n",
" logger.info('\"{}\" has vanished'.format(ptn.get_filename(full_path=False)))\r\n",
" return\r\n",
" else:\r\n",
" raise FailExit('unknown condition: \"{}\"'.format(condition))\r\n",
"\r\n",
" time.sleep(DELAY_BETWEEN_CV_ATTEMPT)\r\n",
" elaps_time += DELAY_BETWEEN_CV_ATTEMPT\r\n",
" if elaps_time >= timeout:\r\n",
" logger.warning('{} hasn`t been found'.format(ptn.get_filename(full_path=False)))\r\n",
" failed_images = ', '.join(map(lambda _p: _p.get_filename(full_path=False), pattern))\r\n",
" raise FindFailed('Unable to find \"{file}\" in {region}'.format(\r\n",
" file=failed_images, region=str(self)))\r\n",
"\r\n",
" def find(self, image_path, timeout=None, similarity=settings.min_similarity,\r\n",
" exception_on_find_fail=True):\r\n",
" \"\"\"\r\n",
" Waits for pattern appear during timeout (in seconds)\r\n",
" if timeout = 0 - one search iteration will perform\r\n",
" if timeout = None - default value will use\r\n",
"\r\n",
" Returns Region if pattern found.\r\n",
" If pattern did not found returns None if exception_on_find_fail is False\r\n",
" else raises FindFailed exception\r\n",
" \"\"\"\r\n",
" logger.info(' try to find \"{img}\" with similarity {s}'.format(\r\n",
" img=str(image_path).split(os.path.sep)[-1], s=similarity))\r\n",
" try:\r\n",
" self._last_match = self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'appear')\r\n",
" except FailExit:\r\n",
" self._last_match = None\r\n",
" raise\r\n",
" except FindFailed as ex:\r\n",
" if exception_on_find_fail:\r\n",
" self.save_as_jpg(os.path.join(\r\n",
" settings.find_failed_dir,\r\n",
" '%s_%s.jpg' % (datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"),\r\n",
" str(image_path).split('/')[-1])))\r\n",
" raise ex\r\n",
" else:\r\n",
" return None\r\n",
" else:\r\n",
" return self._last_match\r\n",
"\r\n",
" def wait_vanish(self, image_path, timeout=None, similarity=settings.min_similarity):\r\n",
" \"\"\"\r\n",
" Waits for pattern vanish during timeout (in seconds).\r\n",
" If pattern already vanished before method call it return True\r\n",
"\r\n",
" if timeout = 0 - one search iteration will perform\r\n",
" if timeout = None - default value will use\r\n",
" \"\"\"\r\n",
" logger.info('Check if \"{file}\" vanish during {t} with similarity {s}'.format(\r\n",
" file=str(image_path).split(os.path.sep)[-1],\r\n",
" t=timeout if timeout else str(self._find_timeout),\r\n",
" s=similarity))\r\n",
" try:\r\n",
" self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'vanish')\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect wait_vanish() method call:'\r\n",
" '\\n\\tregion = {region}\\n\\timage_path = {path}\\n\\ttimeout = {t}'.format(\r\n",
" region=str(self), path=image_path, t=timeout))\r\n",
" except FindFailed:\r\n",
" logger.info('\"{}\" not vanished'.format(str(image_path).split(os.path.sep)[-1]))\r\n",
" return False\r\n",
" else:\r\n",
" logger.info('\"{}\" vanished'.format(str(image_path).split(os.path.sep)[-1]))\r\n",
" return True\r\n",
" finally:\r\n",
" self._last_match = None\r\n",
"\r\n",
" def exists(self, image_path):\r\n",
" self._last_match = None\r\n",
" try:\r\n",
" self._last_match = self._wait_for_appear_or_vanish(image_path, 0, 'appear')\r\n",
" except FailExit:\r\n",
" raise FailExit('Incorrect exists() method call:'\r\n",
" '\\n\\tregion = {region}\\n\\timage_path = {path}'.format(\r\n",
" region=str(self), path=image_path))\r\n",
" except FindFailed:\r\n",
" return False\r\n",
" else:\r\n",
" return True\r\n",
"\r\n",
" def wait(self, image_path=None, timeout=None):\r\n",
" \"\"\"\r\n",
" For compatibility with Sikuli.\r\n",
" Wait for pattern appear or just wait\r\n",
" \"\"\"\r\n",
" if image_path is None:\r\n",
" if timeout:\r\n",
" time.sleep(timeout)\r\n",
" else:\r\n",
" try:\r\n",
" self._last_match = self._wait_for_appear_or_vanish(image_path, timeout, 'appear')\r\n",
" except FailExit:\r\n",
" self._last_match = None\r\n",
" raise FailExit('Incorrect wait() method call:'\r\n",
" '\\n\\tregion = {region}\\n\\timage_path = {path}\\n\\ttimeout = {t}'.format(\r\n",
" region=str(self), path=image_path, t=timeout))\r\n",
" else:\r\n",
" return self._last_match\r\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.011904761904761904,
0,
0,
0.010638297872340425,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0.011904761904761904,
0.010869565217391304,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.009615384615384616,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0.01020408163265306,
0.00980392156862745,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0.01,
0,
0,
0.010752688172043012,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0.009615384615384616,
0.012048192771084338,
0,
0
] | 343 | 0.001366 | false |
# Used for perf measurements. Defines a simple function and makes 100 calls in a row.
# I ran 10K calls, total 1 million invocations, total time is around 250ms on my laptop (64bit 2.7GHz, Release mode, running .exe directly, no debugger);
# 0.4 microseconds per call.
m = 5
def addm(x, y):
x + y + m
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
x = addm(1, 2)
x = addm(2, 3)
x = addm(3, 4)
x = addm(4, 5)
x = addm(5, 6)
x = addm(6, 7)
x = addm(7, 8)
x = addm(8, 9)
x = addm(9, 10)
x = addm(10, 11)
| [
"# Used for perf measurements. Defines a simple function and makes 100 calls in a row.\n",
"# I ran 10K calls, total 1 million invocations, total time is around 250ms on my laptop (64bit 2.7GHz, Release mode, running .exe directly, no debugger);\n",
"# 0.4 microseconds per call. \n",
"\n",
"m = 5\n",
"\n",
"def addm(x, y):\n",
" x + y + m\n",
" \n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n",
"x = addm(1, 2)\n",
"x = addm(2, 3)\n",
"x = addm(3, 4)\n",
"x = addm(4, 5)\n",
"x = addm(5, 6)\n",
"x = addm(6, 7)\n",
"x = addm(7, 8)\n",
"x = addm(8, 9)\n",
"x = addm(9, 10)\n",
"x = addm(10, 11)\n",
"\n"
] | [
0.011627906976744186,
0.006493506493506494,
0.03333333333333333,
0,
0,
0,
0.0625,
0.08333333333333333,
0.3333333333333333,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 119 | 0.013423 | false |
from django import template
register = template.Library()
"""
Will be included in 1.4
"""
@register.filter()
def truncatechars(value, length):
"""
truncate after a certain number of characters,
if the last character is not space truncate at the next space
Edit by Taco: Now adds three dots at the end, and takes those dots
in to account when checking where to truncate and add the dots.
"""
le = length-3
if value == None:
return value
if len(value) > length:
try:
if value[le] == " ":
return '{0}...'.format(value[:le])
else:
while value[le] != " ":
le += 1
else:
return '{0}...'.format(value[:le])
except IndexError:
return value[:length]
else:
return value
def truncatewords(value, length):
"""
truncate after a certain number of characters,
if the last character is not space truncate at the next space
Edit by Taco: Now adds three dots at the end, and takes those dots
in to account when checking where to truncate and add the dots.
"""
if value == None or value == '':
return value
value_array = []
try:
value_array = str(value).lsplit(' ')
except Exception:
return value
try:
return_value = value_array[0]
le = 0
while le < length:
return_value += value_array[le]
le += 1
return_value += '...'
return return_value
except IndexError:
return value | [
"from django import template\n",
"\n",
"register = template.Library()\n",
"\n",
"\"\"\"\n",
" Will be included in 1.4\n",
"\"\"\"\n",
"\n",
"@register.filter()\n",
"def truncatechars(value, length):\n",
" \"\"\"\n",
" truncate after a certain number of characters,\n",
" if the last character is not space truncate at the next space\n",
" \n",
" Edit by Taco: Now adds three dots at the end, and takes those dots\n",
" in to account when checking where to truncate and add the dots.\n",
" \"\"\"\n",
" le = length-3\n",
" if value == None:\n",
" return value\n",
" if len(value) > length:\n",
" try:\n",
" if value[le] == \" \":\n",
" return '{0}...'.format(value[:le])\n",
" else:\n",
" while value[le] != \" \":\n",
" le += 1\n",
" else:\n",
" return '{0}...'.format(value[:le])\n",
"\n",
" except IndexError:\n",
" return value[:length]\n",
" else:\n",
" return value\n",
"\n",
"def truncatewords(value, length):\n",
" \"\"\"\n",
" truncate after a certain number of characters,\n",
" if the last character is not space truncate at the next space\n",
" \n",
" Edit by Taco: Now adds three dots at the end, and takes those dots\n",
" in to account when checking where to truncate and add the dots.\n",
" \"\"\"\n",
" if value == None or value == '':\n",
" return value\n",
" value_array = []\n",
" try:\n",
" value_array = str(value).lsplit(' ')\n",
" except Exception:\n",
" return value\n",
" \n",
" try:\n",
" return_value = value_array[0]\n",
" le = 0\n",
" while le < length:\n",
" return_value += value_array[le]\n",
" le += 1\n",
" return_value += '...'\n",
" return return_value\n",
" except IndexError:\n",
" return value"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 61 | 0.010695 | false |
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: a hack to generate XML containing CDATA by ElementTree
# Created: 26.05.2012
# Copyright (C) 2012, Manfred Moitzi
# License: GPLv3
# usage:
#
# from svgwrite.etree import etree, CDATA
#
# element = etree.Element('myTag')
# element.append(CDATA("< and >"))
#
# assert etree.tostring(element) == "<myTag><![CDATA[< and >]]></myTag>"
import sys
PY3 = sys.version_info[0] > 2
import xml.etree.ElementTree as etree
CDATA_TPL = "<![CDATA[%s]]>"
CDATA_TAG = CDATA_TPL
def CDATA(text):
element = etree.Element(CDATA_TAG)
element.text = text
return element
try:
original_serialize_xml = etree._serialize_xml
except AttributeError, e:
print 'etree patch error', str(e)
if PY3:
def _serialize_xml_with_CDATA_support(write, elem, qnames, namespaces):
if elem.tag == CDATA_TAG:
write(CDATA_TPL % elem.text)
else:
original_serialize_xml(write, elem, qnames, namespaces)
else:
def _serialize_xml_with_CDATA_support(write, elem, encoding, qnames, namespaces):
if elem.tag == CDATA_TAG:
write(CDATA_TPL % elem.text.encode(encoding))
else:
original_serialize_xml(write, elem, encoding, qnames, namespaces)
# ugly, ugly, ugly patching
try:
etree._serialize_xml = _serialize_xml_with_CDATA_support
except AttributeError, e:
print 'etree patch error', str(e)
| [
"#!/usr/bin/env python\r\n",
"#coding:utf-8\r\n",
"# Author: mozman\r\n",
"# Purpose: a hack to generate XML containing CDATA by ElementTree\r\n",
"# Created: 26.05.2012\r\n",
"# Copyright (C) 2012, Manfred Moitzi\r\n",
"# License: GPLv3\r\n",
"\r\n",
"# usage:\r\n",
"#\r\n",
"# from svgwrite.etree import etree, CDATA\r\n",
"#\r\n",
"# element = etree.Element('myTag')\r\n",
"# element.append(CDATA(\"< and >\"))\r\n",
"#\r\n",
"# assert etree.tostring(element) == \"<myTag><![CDATA[< and >]]></myTag>\"\r\n",
"\r\n",
"\r\n",
"import sys\r\n",
"PY3 = sys.version_info[0] > 2\r\n",
"\r\n",
"import xml.etree.ElementTree as etree\r\n",
"\r\n",
"CDATA_TPL = \"<![CDATA[%s]]>\"\r\n",
"CDATA_TAG = CDATA_TPL\r\n",
"\r\n",
"\r\n",
"def CDATA(text):\r\n",
" element = etree.Element(CDATA_TAG)\r\n",
" element.text = text\r\n",
" return element\r\n",
"\r\n",
"try:\r\n",
" original_serialize_xml = etree._serialize_xml\r\n",
"except AttributeError, e:\r\n",
" print 'etree patch error', str(e)\r\n",
"\r\n",
"if PY3:\r\n",
" def _serialize_xml_with_CDATA_support(write, elem, qnames, namespaces):\r\n",
" if elem.tag == CDATA_TAG:\r\n",
" write(CDATA_TPL % elem.text)\r\n",
" else:\r\n",
" original_serialize_xml(write, elem, qnames, namespaces)\r\n",
"else:\r\n",
" def _serialize_xml_with_CDATA_support(write, elem, encoding, qnames, namespaces):\r\n",
" if elem.tag == CDATA_TAG:\r\n",
" write(CDATA_TPL % elem.text.encode(encoding))\r\n",
" else:\r\n",
" original_serialize_xml(write, elem, encoding, qnames, namespaces)\r\n",
"\r\n",
"# ugly, ugly, ugly patching\r\n",
"try:\r\n",
" etree._serialize_xml = _serialize_xml_with_CDATA_support\r\n",
"except AttributeError, e:\r\n",
" print 'etree patch error', str(e)\r\n",
" \r\n"
] | [
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666
] | 56 | 0.007806 | false |
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2017
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.twagenthelper import twAgentGetPage
IPhone5Agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'
MyHeaders= {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-US,en;q=0.5'}
default_cover = "file://%s/beeg.png" % (config.mediaportal.iconcachepath.value + "logos")
beeg_apikey = ''
beeg_salt = ''
class beegGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_Plugin')
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("beeg.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.tags = 'popular'
self.looplock = False
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
if beeg_salt == '' or beeg_apikey == '':
self.onLayoutFinish.append(self.getApiKeys)
else:
self.onLayoutFinish.append(self.layoutFinished)
def getApiKeys(self):
CoverHelper(self['coverArt']).getCover(default_cover)
url = "http://beeg.com/1000000"
twAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getApiKeys2).addErrback(self.dataError)
def getApiKeys2(self, data):
cpl = re.findall('<script[^>]+src=["\']((?:/static|(?:https?:)?//static\.beeg\.com)/cpl/\d+\.js.*?)["\']', data, re.S)[0]
if cpl.startswith('/static'):
cpl = 'http://beeg.com' + cpl
twAgentGetPage(cpl, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getApiKeys3).addErrback(self.dataError)
def getApiKeys3(self, data):
global beeg_apikey
beeg_apikey = str(re.findall('beeg_version=(\d+)', data, re.S)[0])
global beeg_salt
beeg_salt = re.findall('beeg_salt="(.*?)"', data, re.S)[0]
if beeg_salt == '' or beeg_apikey == '':
message = self.session.open(MessageBoxExt, _("Broken URL parsing, please report to the developers."), MessageBoxExt.TYPE_INFO, timeout=3)
else:
self.layoutFinished()
def layoutFinished(self):
self.keyLocked = True
CoverHelper(self['coverArt']).getCover(default_cover)
url = "http://beeg.com/api/v6/%s/index/main/0/mobile" % beeg_apikey
twAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
parse = re.search('"%s":\[(.*?)\]' % self.tags, data, re.S)
if parse:
Cats = re.findall('"(.*?)"', parse.group(1), re.S)
if Cats:
for Title in Cats:
Url = 'http://beeg.com/api/v6/%s/index/tag/$PAGE$/mobile?tag=%s' % (beeg_apikey, Title)
Title = Title.title()
self.genreliste.append((Title, Url))
self.genreliste.sort()
self.genreliste.insert(0, ("Longest", "http://beeg.com/api/v6/%s/index/tag/$PAGE$/mobile?tag=long%svideos" % (beeg_apikey, "%20")))
self.genreliste.insert(0, ("Newest", "http://beeg.com/api/v6/%s/index/main/$PAGE$/mobile" % beeg_apikey))
if self.tags == 'popular':
self.genreliste.insert(0, ("- Show all Tags -", ""))
self.genreliste.insert(0, ("--- Search ---", "callSuchen"))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.keyLocked = False
else:
message = self.session.open(MessageBoxExt, _("Broken URL parsing, please report to the developers."), MessageBoxExt.TYPE_INFO, timeout=3)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
if Name == "- Show all Tags -":
self.tags = 'nonpopular'
self.genreliste = []
self.layoutFinished()
elif Name == "--- Search ---":
self.suchen()
else:
Link = self['liste'].getCurrent()[0][1]
self.session.open(beegFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '+')
Link = self.suchString
Name = "--- Search ---"
self.session.open(beegFilmScreen, Link, Name)
class beegFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("beeg.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match(".*Search", self.Name):
url = 'http://beeg.com/api/v6/%s/index/search/$PAGE$/mobile?query=%s' % (beeg_apikey, self.Link)
url = url.replace('$PAGE$', '%s' % str(self.page-1))
else:
url = self.Link.replace('$PAGE$', '%s' % str(self.page-1))
twAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, '', '"pages":(.*?),')
Videos = re.findall('\{"title":"(.*?)","id":"(.*?)"', data, re.S)
if Videos:
for (Title, VideoId) in Videos:
Url = 'http://beeg.com/api/v6/%s/video/%s' % (beeg_apikey, VideoId)
Image = 'http://img.beeg.com/800x450/%s.jpg' % VideoId
self.filmliste.append((decodeHtml(Title), Url, Image))
if len(self.filmliste) == 0:
self.filmliste.append((_('No videos found!'), '', None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
self['name'].setText(title)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][1]
twAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getVideoPage).addErrback(self.dataError)
def decrypt_key(self, key):
try:
import execjs
node = execjs.get("Node")
except:
printl('nodejs not found',self,'E')
self.session.open(MessageBoxExt, _("This plugin requires packages python-pyexecjs and nodejs."), MessageBoxExt.TYPE_INFO)
return
js = 'var jsalt="%s";' % beeg_salt + 'String.prototype.str_split=function(e,t){var n=this;e=e||1,t=!!t;var r=[];if(t){var a=n.length%e;a>0&&(r.push(n.substring(0,a)),n=n.substring(a))}for(;n.length>e;)r.push(n.substring(0,e)),n=n.substring(e);return r.push(n),r}; function jsaltDecode(e){e=decodeURIComponent(e);for(var s=jsalt.length,t="",o=0;o<e.length;o++){var l=e.charCodeAt(o),n=o%s,i=jsalt.charCodeAt(n)%21;t+=String.fromCharCode(l-i)}return t.str_split(3,!0).reverse().join("")};' + 'var key = "%s";' % key + 'vidurl = jsaltDecode(key); return vidurl;'
key = str(node.exec_(js))
return key
def decrypt_url(self, encrypted_url):
encrypted_url = 'https:' + encrypted_url.replace('/{DATA_MARKERS}/', '/data=pc_XX__%s/' % beeg_apikey)
key = re.search('/key=(.*?)%2Cend=', encrypted_url).group(1)
if not key:
return encrypted_url
return encrypted_url.replace(key, self.decrypt_key(key))
def getVideoPage(self, data):
streamlinks = re.findall('\d{3}p":"(.*?)"', data , re.S)
if streamlinks:
streamlink = self.decrypt_url(streamlinks[-1])
Title = self['liste'].getCurrent()[0][0]
Cover = self['liste'].getCurrent()[0][2]
mp_globals.player_agent = IPhone5Agent
self.session.open(SimplePlayer, [(Title, streamlink, Cover)], showPlaylist=False, ltype='beeg', cover=True)
else:
message = self.session.open(MessageBoxExt, _("Stream not found"), MessageBoxExt.TYPE_INFO, timeout=3) | [
"# -*- coding: utf-8 -*-\n",
"###############################################################################################\n",
"#\n",
"# MediaPortal for Dreambox OS\n",
"#\n",
"# Coded by MediaPortal Team (c) 2013-2017\n",
"#\n",
"# This plugin is open source but it is NOT free software.\n",
"#\n",
"# This plugin may only be distributed to and executed on hardware which\n",
"# is licensed by Dream Property GmbH. This includes commercial distribution.\n",
"# In other words:\n",
"# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way\n",
"# to hardware which is NOT licensed by Dream Property GmbH.\n",
"# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way\n",
"# on hardware which is NOT licensed by Dream Property GmbH.\n",
"#\n",
"# This applies to the source code as a whole as well as to parts of it, unless\n",
"# explicitely stated otherwise.\n",
"#\n",
"# If you want to use or modify the code or parts of it,\n",
"# you have to keep OUR license and inform us about the modifications, but it may NOT be\n",
"# commercially distributed other than under the conditions noted above.\n",
"#\n",
"# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware\n",
"# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.\n",
"#\n",
"# As an exception regarding modifcations, you are NOT permitted to remove\n",
"# any copy protections implemented in this plugin or change them for means of disabling\n",
"# or working around the copy protections, unless the change has been explicitly permitted\n",
"# by the original authors. Also decompiling and modification of the closed source\n",
"# parts is NOT permitted.\n",
"#\n",
"# Advertising with this plugin is NOT allowed.\n",
"# For other uses, permission from the authors is necessary.\n",
"#\n",
"###############################################################################################\n",
"\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"from Plugins.Extensions.MediaPortal.resources.twagenthelper import twAgentGetPage\n",
"\n",
"IPhone5Agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'\n",
"MyHeaders= {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n",
"\t\t\t'Accept-Language':'en-US,en;q=0.5'}\n",
"default_cover = \"file://%s/beeg.png\" % (config.mediaportal.iconcachepath.value + \"logos\")\n",
"\n",
"beeg_apikey = ''\n",
"beeg_salt = ''\n",
"\n",
"class beegGenreScreen(MPScreen):\n",
"\n",
"\tdef __init__(self, session):\n",
"\t\tMPScreen.__init__(self, session, skin='MP_Plugin')\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"beeg.com\")\n",
"\t\tself['ContentTitle'] = Label(\"Genre:\")\n",
"\t\tself.keyLocked = True\n",
"\t\tself.suchString = ''\n",
"\t\tself.tags = 'popular'\n",
"\t\tself.looplock = False\n",
"\n",
"\t\tself.genreliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tif beeg_salt == '' or beeg_apikey == '':\n",
"\t\t\tself.onLayoutFinish.append(self.getApiKeys)\n",
"\t\telse:\n",
"\t\t\tself.onLayoutFinish.append(self.layoutFinished)\n",
"\n",
"\tdef getApiKeys(self):\n",
"\t\tCoverHelper(self['coverArt']).getCover(default_cover)\n",
"\t\turl = \"http://beeg.com/1000000\"\n",
"\t\ttwAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getApiKeys2).addErrback(self.dataError)\n",
"\n",
"\tdef getApiKeys2(self, data):\n",
"\t\tcpl = re.findall('<script[^>]+src=[\"\\']((?:/static|(?:https?:)?//static\\.beeg\\.com)/cpl/\\d+\\.js.*?)[\"\\']', data, re.S)[0]\n",
"\t\tif cpl.startswith('/static'):\n",
"\t\t\tcpl = 'http://beeg.com' + cpl\n",
"\t\ttwAgentGetPage(cpl, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getApiKeys3).addErrback(self.dataError)\n",
"\n",
"\tdef getApiKeys3(self, data):\n",
"\t\tglobal beeg_apikey\n",
"\t\tbeeg_apikey = str(re.findall('beeg_version=(\\d+)', data, re.S)[0])\n",
"\t\tglobal beeg_salt\n",
"\t\tbeeg_salt = re.findall('beeg_salt=\"(.*?)\"', data, re.S)[0]\n",
"\t\tif beeg_salt == '' or beeg_apikey == '':\n",
"\t\t\tmessage = self.session.open(MessageBoxExt, _(\"Broken URL parsing, please report to the developers.\"), MessageBoxExt.TYPE_INFO, timeout=3)\n",
"\t\telse:\n",
"\t\t\tself.layoutFinished()\n",
"\n",
"\tdef layoutFinished(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tCoverHelper(self['coverArt']).getCover(default_cover)\n",
"\t\turl = \"http://beeg.com/api/v6/%s/index/main/0/mobile\" % beeg_apikey\n",
"\t\ttwAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.genreData).addErrback(self.dataError)\n",
"\n",
"\tdef genreData(self, data):\n",
"\t\tparse = re.search('\"%s\":\\[(.*?)\\]' % self.tags, data, re.S)\n",
"\t\tif parse:\n",
"\t\t\tCats = re.findall('\"(.*?)\"', parse.group(1), re.S)\n",
"\t\t\tif Cats:\n",
"\t\t\t\tfor Title in Cats:\n",
"\t\t\t\t\tUrl = 'http://beeg.com/api/v6/%s/index/tag/$PAGE$/mobile?tag=%s' % (beeg_apikey, Title)\n",
"\t\t\t\t\tTitle = Title.title()\n",
"\t\t\t\t\tself.genreliste.append((Title, Url))\n",
"\t\t\tself.genreliste.sort()\n",
"\t\t\tself.genreliste.insert(0, (\"Longest\", \"http://beeg.com/api/v6/%s/index/tag/$PAGE$/mobile?tag=long%svideos\" % (beeg_apikey, \"%20\")))\n",
"\t\t\tself.genreliste.insert(0, (\"Newest\", \"http://beeg.com/api/v6/%s/index/main/$PAGE$/mobile\" % beeg_apikey))\n",
"\t\t\tif self.tags == 'popular':\n",
"\t\t\t\tself.genreliste.insert(0, (\"- Show all Tags -\", \"\"))\n",
"\t\t\tself.genreliste.insert(0, (\"--- Search ---\", \"callSuchen\"))\n",
"\t\t\tself.ml.setList(map(self._defaultlistcenter, self.genreliste))\n",
"\t\t\tself.keyLocked = False\n",
"\t\telse:\n",
"\t\t\tmessage = self.session.open(MessageBoxExt, _(\"Broken URL parsing, please report to the developers.\"), MessageBoxExt.TYPE_INFO, timeout=3)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tName = self['liste'].getCurrent()[0][0]\n",
"\t\tif Name == \"- Show all Tags -\":\n",
"\t\t\tself.tags = 'nonpopular'\n",
"\t\t\tself.genreliste = []\n",
"\t\t\tself.layoutFinished()\n",
"\t\telif Name == \"--- Search ---\":\n",
"\t\t\tself.suchen()\n",
"\t\telse:\n",
"\t\t\tLink = self['liste'].getCurrent()[0][1]\n",
"\t\t\tself.session.open(beegFilmScreen, Link, Name)\n",
"\n",
"\tdef SuchenCallback(self, callback = None, entry = None):\n",
"\t\tif callback is not None and len(callback):\n",
"\t\t\tself.suchString = callback.replace(' ', '+')\n",
"\t\t\tLink = self.suchString\n",
"\t\t\tName = \"--- Search ---\"\n",
"\t\t\tself.session.open(beegFilmScreen, Link, Name)\n",
"\n",
"class beegFilmScreen(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, Link, Name):\n",
"\t\tself.Link = Link\n",
"\t\tself.Name = Name\n",
"\n",
"\t\tMPScreen.__init__(self, session, skin='MP_PluginDescr')\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft,\n",
"\t\t\t\"nextBouquet\" : self.keyPageUp,\n",
"\t\t\t\"prevBouquet\" : self.keyPageDown,\n",
"\t\t\t\"green\" : self.keyPageNumber\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"beeg.com\")\n",
"\t\tself['ContentTitle'] = Label(\"Genre: %s\" % self.Name)\n",
"\t\tself['F2'] = Label(_(\"Page\"))\n",
"\n",
"\t\tself['Page'] = Label(_(\"Page:\"))\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.page = 1\n",
"\t\tself.lastpage = 1\n",
"\n",
"\t\tself.filmliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\tself.filmliste = []\n",
"\t\tif re.match(\".*Search\", self.Name):\n",
"\t\t\turl = 'http://beeg.com/api/v6/%s/index/search/$PAGE$/mobile?query=%s' % (beeg_apikey, self.Link)\n",
"\t\t\turl = url.replace('$PAGE$', '%s' % str(self.page-1))\n",
"\t\telse:\n",
"\t\t\turl = self.Link.replace('$PAGE$', '%s' % str(self.page-1))\n",
"\t\ttwAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.loadData).addErrback(self.dataError)\n",
"\n",
"\tdef loadData(self, data):\n",
"\t\tself.getLastPage(data, '', '\"pages\":(.*?),')\n",
"\t\tVideos = re.findall('\\{\"title\":\"(.*?)\",\"id\":\"(.*?)\"', data, re.S)\n",
"\t\tif Videos:\n",
"\t\t\tfor (Title, VideoId) in Videos:\n",
"\t\t\t\tUrl = 'http://beeg.com/api/v6/%s/video/%s' % (beeg_apikey, VideoId)\n",
"\t\t\t\tImage = 'http://img.beeg.com/800x450/%s.jpg' % VideoId\n",
"\t\t\t\tself.filmliste.append((decodeHtml(Title), Url, Image))\n",
"\t\tif len(self.filmliste) == 0:\n",
"\t\t\tself.filmliste.append((_('No videos found!'), '', None))\n",
"\t\tself.ml.setList(map(self._defaultlistleft, self.filmliste))\n",
"\t\tself.ml.moveToIndex(0)\n",
"\t\tself.keyLocked = False\n",
"\t\tself.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\tpic = self['liste'].getCurrent()[0][2]\n",
"\t\tself['name'].setText(title)\n",
"\t\tCoverHelper(self['coverArt']).getCover(pic)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\ttwAgentGetPage(url, agent=IPhone5Agent, headers=MyHeaders).addCallback(self.getVideoPage).addErrback(self.dataError)\n",
"\n",
"\tdef decrypt_key(self, key):\n",
"\t\ttry:\n",
"\t\t\timport execjs\n",
"\t\t\tnode = execjs.get(\"Node\")\n",
"\t\texcept:\n",
"\t\t\tprintl('nodejs not found',self,'E')\n",
"\t\t\tself.session.open(MessageBoxExt, _(\"This plugin requires packages python-pyexecjs and nodejs.\"), MessageBoxExt.TYPE_INFO)\n",
"\t\t\treturn\n",
"\t\tjs = 'var jsalt=\"%s\";' % beeg_salt + 'String.prototype.str_split=function(e,t){var n=this;e=e||1,t=!!t;var r=[];if(t){var a=n.length%e;a>0&&(r.push(n.substring(0,a)),n=n.substring(a))}for(;n.length>e;)r.push(n.substring(0,e)),n=n.substring(e);return r.push(n),r}; function jsaltDecode(e){e=decodeURIComponent(e);for(var s=jsalt.length,t=\"\",o=0;o<e.length;o++){var l=e.charCodeAt(o),n=o%s,i=jsalt.charCodeAt(n)%21;t+=String.fromCharCode(l-i)}return t.str_split(3,!0).reverse().join(\"\")};' + 'var key = \"%s\";' % key + 'vidurl = jsaltDecode(key); return vidurl;'\n",
"\t\tkey = str(node.exec_(js))\n",
"\t\treturn key\n",
"\n",
"\tdef decrypt_url(self, encrypted_url):\n",
"\t\tencrypted_url = 'https:' + encrypted_url.replace('/{DATA_MARKERS}/', '/data=pc_XX__%s/' % beeg_apikey)\n",
"\t\tkey = re.search('/key=(.*?)%2Cend=', encrypted_url).group(1)\n",
"\t\tif not key:\n",
"\t\t\treturn encrypted_url\n",
"\t\treturn encrypted_url.replace(key, self.decrypt_key(key))\n",
"\n",
"\tdef getVideoPage(self, data):\n",
"\t\tstreamlinks = re.findall('\\d{3}p\":\"(.*?)\"', data , re.S)\n",
"\t\tif streamlinks:\n",
"\t\t\tstreamlink = self.decrypt_url(streamlinks[-1])\n",
"\t\t\tTitle = self['liste'].getCurrent()[0][0]\n",
"\t\t\tCover = self['liste'].getCurrent()[0][2]\n",
"\t\t\tmp_globals.player_agent = IPhone5Agent\n",
"\t\t\tself.session.open(SimplePlayer, [(Title, streamlink, Cover)], showPlaylist=False, ltype='beeg', cover=True)\n",
"\t\telse:\n",
"\t\t\tmessage = self.session.open(MessageBoxExt, _(\"Stream not found\"), MessageBoxExt.TYPE_INFO, timeout=3)"
] | [
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0.009174311926605505,
0.011627906976744186,
0,
0,
0.011235955056179775,
0.01098901098901099,
0.012048192771084338,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.012195121951219513,
0,
0.006578947368421052,
0.03409090909090909,
0.07692307692307693,
0.011111111111111112,
0,
0,
0,
0,
0.030303030303030304,
0,
0.03333333333333333,
0.018867924528301886,
0,
0.020833333333333332,
0.09090909090909091,
0.08333333333333333,
0.06666666666666667,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.08,
0.1111111111111111,
0,
0.027777777777777776,
0.024390243902439025,
0.041666666666666664,
0.043478260869565216,
0.041666666666666664,
0.041666666666666664,
0,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.023255813953488372,
0.02127659574468085,
0.125,
0.0196078431372549,
0,
0.043478260869565216,
0.017857142857142856,
0.029411764705882353,
0.01694915254237288,
0,
0.03333333333333333,
0.04838709677419355,
0.03125,
0.030303030303030304,
0.01694915254237288,
0,
0.03333333333333333,
0.047619047619047616,
0.028985507246376812,
0.05263157894736842,
0.01639344262295082,
0.023255813953488372,
0.014184397163120567,
0.125,
0.04,
0,
0.037037037037037035,
0.041666666666666664,
0.017857142857142856,
0.014285714285714285,
0.017241379310344827,
0,
0.03571428571428571,
0.04838709677419355,
0.08333333333333333,
0.018518518518518517,
0.08333333333333333,
0.043478260869565216,
0.021505376344086023,
0.037037037037037035,
0.023809523809523808,
0.038461538461538464,
0.014814814814814815,
0.01834862385321101,
0.03333333333333333,
0.017543859649122806,
0.015873015873015872,
0.015151515151515152,
0.038461538461538464,
0.125,
0.014184397163120567,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.023809523809523808,
0.029411764705882353,
0.03571428571428571,
0.041666666666666664,
0.04,
0.030303030303030304,
0.058823529411764705,
0.125,
0.023255813953488372,
0.02040816326530612,
0,
0.08620689655172414,
0.022222222222222223,
0.020833333333333332,
0.038461538461538464,
0.037037037037037035,
0.02040816326530612,
0,
0.021739130434782608,
0,
0.023809523809523808,
0.05263157894736842,
0.05263157894736842,
0,
0.017241379310344827,
0.03333333333333333,
0,
0.020833333333333332,
0.09090909090909091,
0.08333333333333333,
0.06666666666666667,
0.07142857142857142,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.07692307692307693,
0.05714285714285714,
0.05405405405405406,
0.0625,
0.1111111111111111,
0,
0.027777777777777776,
0.017857142857142856,
0.03125,
0,
0.02857142857142857,
0,
0.041666666666666664,
0.0625,
0.05,
0,
0.045454545454545456,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.041666666666666664,
0.022727272727272728,
0.045454545454545456,
0.02631578947368421,
0.02,
0.017857142857142856,
0.125,
0.016129032258064516,
0.017391304347826087,
0,
0.037037037037037035,
0.02127659574468085,
0.029411764705882353,
0.07692307692307693,
0.02857142857142857,
0.013888888888888888,
0.01694915254237288,
0.01694915254237288,
0.03225806451612903,
0.016666666666666666,
0.016129032258064516,
0.04,
0.04,
0.021505376344086023,
0.05263157894736842,
0,
0.045454545454545456,
0.023255813953488372,
0.024390243902439025,
0.03333333333333333,
0.021739130434782608,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.024390243902439025,
0.01680672268907563,
0,
0.034482758620689655,
0.14285714285714285,
0.058823529411764705,
0.034482758620689655,
0.2,
0.07692307692307693,
0.016,
0.1,
0.005319148936170213,
0.03571428571428571,
0.07692307692307693,
0,
0.02564102564102564,
0.01904761904761905,
0.015873015873015872,
0.07142857142857142,
0.041666666666666664,
0.01694915254237288,
0,
0.03225806451612903,
0.05084745762711865,
0.05555555555555555,
0.02,
0.022727272727272728,
0.022727272727272728,
0.023809523809523808,
0.018018018018018018,
0.125,
0.028846153846153848
] | 256 | 0.031371 | false |
# -*- coding: UTF-8 -*-
# Задача 8. Вариант 15.
#
# Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
# Mochalov V. V.
# 14.05.2016
import random
WORDS=("питон","анаграмма","простая","сложная","ответ","подстаканник","абсолютный","честность",
"авторитетный","мнение", "поступить","академия", "оранжевый","апельсин", "громкий","аплодисменты",
"цирк","арена", "приятный","аромат", "атака", "благоприятный","атмосфера", "старинный","балкон",
"бархатный","скатерть", "играть","баскетбол", "безвкусный","конфета", "беречь","имущество", "беседовать",
"профессия", "размешиват","бетон", "разглядывать", "бинокль", "благородный", "поступок", "прочитать","брошюра")
word=random.choice(WORDS)
correct=word
jumble=""
k=-1
j=""
p=1000
while word:
position=random.randrange(len(word))
jumble+=word[position]
word=word[:position]+word[(position+1):]
print("Добро пожаловатив игру 'Анаграммы'!",
"\nНадо пререставить буквы так, чтобы получилось слово.\n(Для выхода нажмите Enter, не вводя своей версии.)")
print("Вот анаграмма: ", jumble)
guess=input("\nПопробуйте отгадать слово: ")
while guess!=correct and guess!="":
help=input("К сожалению, вы неправы. Хотите получить подсказку (напишите 'да')?")
if help=="да":
if k<=(len(correct)-2):
k=k+1
j=j+correct[k]
print (j)
p=p-k
else:
print("Превышено максимальное число подсказок")
p=p-1
guess=input("Попробуйте еще раз: ")
if guess==correct:
print("Вы угадали!!!\n")
print("Спасибо за игру!!! Вы набрали ", p," очков.")
input("\n\nНажмите Enter, чтобы выйти.")
| [
"# -*- coding: UTF-8 -*-\r\n",
"# Задача 8. Вариант 15.\r\n",
"# \r\n",
"# Доработайте игру \"Анаграммы\" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.\r\n",
"# Mochalov V. V.\r\n",
"# 14.05.2016\r\n",
"\r\n",
"import random\r\n",
"WORDS=(\"питон\",\"анаграмма\",\"простая\",\"сложная\",\"ответ\",\"подстаканник\",\"абсолютный\",\"честность\",\r\n",
" \"авторитетный\",\"мнение\", \"поступить\",\"академия\", \"оранжевый\",\"апельсин\", \"громкий\",\"аплодисменты\",\r\n",
" \"цирк\",\"арена\", \"приятный\",\"аромат\", \"атака\", \"благоприятный\",\"атмосфера\", \"старинный\",\"балкон\",\r\n",
" \"бархатный\",\"скатерть\", \"играть\",\"баскетбол\", \"безвкусный\",\"конфета\", \"беречь\",\"имущество\", \"беседовать\",\r\n",
" \"профессия\", \"размешиват\",\"бетон\", \"разглядывать\", \"бинокль\", \"благородный\", \"поступок\", \"прочитать\",\"брошюра\")\r\n",
"word=random.choice(WORDS)\r\n",
"correct=word\r\n",
"jumble=\"\"\r\n",
"k=-1\r\n",
"j=\"\"\r\n",
"p=1000\r\n",
"while word:\r\n",
" position=random.randrange(len(word))\r\n",
" jumble+=word[position]\r\n",
" word=word[:position]+word[(position+1):]\r\n",
"print(\"Добро пожаловатив игру 'Анаграммы'!\",\r\n",
" \"\\nНадо пререставить буквы так, чтобы получилось слово.\\n(Для выхода нажмите Enter, не вводя своей версии.)\")\r\n",
"print(\"Вот анаграмма: \", jumble)\r\n",
"guess=input(\"\\nПопробуйте отгадать слово: \")\r\n",
"while guess!=correct and guess!=\"\":\r\n",
" help=input(\"К сожалению, вы неправы. Хотите получить подсказку (напишите 'да')?\")\r\n",
" if help==\"да\":\r\n",
" if k<=(len(correct)-2):\r\n",
" k=k+1\r\n",
" j=j+correct[k]\r\n",
" print (j)\r\n",
" p=p-k\r\n",
" else:\r\n",
" print(\"Превышено максимальное число подсказок\")\r\n",
" p=p-1\r\n",
" guess=input(\"Попробуйте еще раз: \")\r\n",
"if guess==correct:\r\n",
" print(\"Вы угадали!!!\\n\")\r\n",
"print(\"Спасибо за игру!!! Вы набрали \", p,\" очков.\")\r\n",
"input(\"\\n\\nНажмите Enter, чтобы выйти.\")\r\n"
] | [
0,
0,
0.25,
0.002793296089385475,
0,
0,
0,
0,
0.09278350515463918,
0.04672897196261682,
0.047619047619047616,
0.043859649122807015,
0.025,
0.037037037037037035,
0.07142857142857142,
0.09090909090909091,
0.16666666666666666,
0.16666666666666666,
0.125,
0,
0.023809523809523808,
0.03571428571428571,
0.021739130434782608,
0,
0.008547008547008548,
0,
0.021739130434782608,
0.05405405405405406,
0.022988505747126436,
0.05,
0.030303030303030304,
0.125,
0.08,
0.1,
0.125,
0,
0.017241379310344827,
0.09090909090909091,
0.024390243902439025,
0.05,
0,
0.018518518518518517,
0
] | 43 | 0.048057 | false |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 PSchem Contributors (see CONTRIBUTORS for details)
# This file is part of PSchem Database
# PSchem is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PSchem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with PSchem Database. If not, see <http://www.gnu.org/licenses/>.
#print 'CellViews in'
from Index import Index
from Primitives import *
#from Design import *
from xml.etree import ElementTree as et
#print 'CellViews out'
class CellView():
def __init__(self, name, cell):
self._name = name
self._attribs = {}
self._cell = cell
cell.cellViewAdded(self)
@property
def name(self):
return self._name
@property
def path(self):
return self.cell.path + '/' + self.name
@property
def cell(self):
return self._cell
@property
def attributes(self):
return self._attribs
@property
def library(self):
return self.cell.library
@property
def database(self):
return self.cell.database
def save(self):
pass
def restore(self):
pass
def remove(self):
#for a in list(self.attributes):
# a.remove()
self.cell.cellViewRemoved(self)
self.cell = None
def __repr__(self):
return "<CellView '" + self.path + "'>"
class Diagram(CellView):
def __init__(self, name, cell):
CellView.__init__(self, name, cell)
#self._elems = set()
self._items = set()
self._lines = set()
self._rects = set()
self._customPaths = set()
self._ellipses = set()
self._ellipseArcs = set()
self._labels = set()
self._attributeLabels = set()
#self._uu = 160 # default DB units per user units
self._attribs['uu'] = 160 # default DB units per user units
#self._name = 'diagram'
self._designUnits = set()
@property
def designUnits(self):
return self._designUnits
@property
def items(self):
return self._items
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs
@property
def lines(self):
return self._lines
@property
def rects(self):
return self._rects
@property
def customPaths(self):
return self._customPaths
@property
def ellipses(self):
return self._ellipses
@property
def ellipseArcs(self):
return self._ellipseArcs
@property
def labels(self):
return self._labels
@property
def attributeLabels(self):
return self._attributeLabels
@property
def uu(self):
return self._attribs['uu']
@uu.setter
def uu(self, uu):
self._attribs['uu'] = uu
def instanceItemAdded(self, view):
self.items.add(view)
for elem in self.elems:
elem.addToView(view)
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def instanceItemRemoved(self, view):
self.items.remove(view)
for elem in self.elems:
elem.removeFromView()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
scene = designUnit.scene()
for e in self.elems:
e.addToView(scene)
def designUnitRemoved(self, designUnit):
self.designUnits.remove(designUnit)
#def updateDesignUnits(self):
# for d in self._designUnits:
# d.updateDesignUnit()
# #v.updateItem()
def elementAdded(self, elem):
pass
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def elementChanged(self, elem):
pass
#for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
def elementRemoved(self, elem):
pass
#for designUnit in self._designUnits:
# elem.removeFromDesignUnit(designUnit)
#def addElem(self, elem):
# "main entry point for adding new elements to diagram"
# #self._elems.add(elem)
# elem.addToDiagram(self)
# for designUnit in self._designUnits:
# elem.addToDesignUnit(designUnit)
#def removeElem(self, elem):
# "main entry point for removing elements from diagram"
# for designUnit in self._designUnits:
# elem.removeFromDesignUnit(designUnit)
# elem.removeFromDiagram(self)
def lineAdded(self, line):
self.lines.add(line)
def lineRemoved(self, line):
self.lines.remove(line)
def rectAdded(self, rect):
self.rects.add(rect)
def rectRemoved(self, rect):
self.rects.remove(rect)
def customPathAdded(self, customPath):
self.customPaths.add(customPath)
def customPathRemoved(self, customPath):
self.customPaths.remove(customPath)
def ellipseAdded(self, ellipse):
self.ellipses.add(ellipse)
def ellipseRemoved(self, ellipse):
self.ellipses.remove(ellipse)
def ellipseArcAdded(self, ellipseArc):
self.ellipseArcs.add(ellipseArc)
def ellipseArcRemoved(self, ellipseArc):
self.ellipseArcs.remove(ellipseArc)
def labelAdded(self, label):
self.labels.add(label)
def labelRemoved(self, label):
self.labels.remove(label)
def attributeLabelAdded(self, attributeLabel):
self.attributeLabels.add(attributeLabel)
def attributeLabelRemoved(self, attributeLabel):
self.attributeLabels.remove(attributeLabel)
def remove(self):
for e in list(self.elems):
e.remove()
#self.removeElem(e)
for du in list(self.designUnits):
du.remove()
#self.removeDesignUnit(o)
CellView.remove(self)
def save(self):
root = et.Element(self.name)
tree = et.ElementTree(root)
for a in sorted(self.attributes):
root.attrib[str(a)] = str(self.attributes[a])
for e in sorted(self.elems, key=Element.name):
xElem = e.toXml()
root.append(xElem)
self._indentET(tree.getroot())
et.dump(tree)
#return tree
def restore(self):
pass
def _indentET(self, elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self._indentET(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def __repr__(self):
return "<Diagram '" + self.path + "'>"
class Schematic(Diagram):
def __init__(self, name, cell):
Diagram.__init__(self, name, cell)
#self._name = 'schematic'
self._pins = set()
self._instances = set()
self._netSegments = set()
self._solderDots = set()
self._nets = set()
self._index = Index()
#self._netSegmentsAdded = set()
#self._netSegmentsRemoved = set()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
#scene = designUnit.scene
#for e in self.elems-self.instances:
#for e in self.elems:
# e.addToView(scene)
#for i in self.instances():
# i.addToView(designUnit)
#for ns in self.netSegments():
# ns.addToDesignUnit(designUnit)
#designUnit.checkNets()
#def components(self):
# components = map(lambda i: i.cell(), self.instances())
# return components.sort()
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs | \
self.pins | self.instances | self.netSegments | self.solderDots
@property
def pins(self):
return self._pins
@property
def instances(self):
return self._instances
@property
def netSegments(self):
self.database.runDeferredProcesses(self)
return self._netSegments
@property
def solderDots(self):
self.database.runDeferredProcesses(self)
return self._solderDots
@property
def index(self):
return self._index
def pinAdded(self, pin):
self.pins.add(pin)
def pinRemoved(self, pin):
self.pins.remove(pin)
def instanceAdded(self, instance):
self.instances.add(instance)
def instanceRemoved(self, instance):
self.instances.remove(instance)
#def addNet(self, net):
# "call only from addToDiagram"
# self.nets.add(net)
#def removeNet(self, net):
# "call only from removeFromDiagram"
# self.nets.remove(net)
#@property
#def nets(self):
# return self._nets #filter(lambda e: isinstance(e, CNet), self.elems)
def netSegmentAdded(self, netSegment):
#print self.__class__.__name__, "ns added", netSegment
self.index.netSegmentAdded(netSegment)
self._netSegments.add(netSegment) #don't trigger deferred processing
#self._netSegmentsAdded.add(netSegment)
self.database.requestDeferredProcessing(self)
#self.splitNetSegment(netSegment)
#for designUnit in self._designUnits:
# netSegment.addToDesignUnit(designUnit)
# if designUnit.scene():
# netSegment.addToView(designUnit.scene())
def netSegmentRemoved(self, netSegment):
#print self.__class__.__name__, "ns removed", netSegment
self.index.netSegmentRemoved(netSegment)
self._netSegments.remove(netSegment) #don't trigger deferred processing
#self._netSegmentsRemoved.add(netSegment)
self.database.requestDeferredProcessing(self)
def solderDotAdded(self, solderDot):
self.index.solderDotAdded(solderDot)
#for designUnit in self._designUnits:
# #solderDot.addToDesignUnit(designUnit)
# if designUnit.scene():
# solderDot.addToView(designUnit.scene())
self._solderDots.add(solderDot) #don't trigger deferred processing
def solderDotRemoved(self, solderDot):
self.index.solderDotRemoved(solderDot)
self._solderDots.remove(solderDot) #don't trigger deferred processing
def splitNetSegment(self, netSegment):
"""
Check if (newly added) netSegment should be split or if it requires
other net segments to split.
"""
idx = self.index
(p1, p2) = idx.coordsOfNetSegments[netSegment]
n = 0
#first split other segments
for p in (p1, p2):
segments = idx.netSegmentsMidPointsAt(p[0], p[1])
for s in list(segments):
if s in idx.coordsOfNetSegments:
#print "split ", s, p, idx.coordsOfNetSegments()[s]
s.splitAt(p)
n += 1
#then, if necessary, split the netSegment
for p in list(idx.netSegmentsEndPoints):
if netSegment in idx.netSegmentsMidPointsAt(p[0], p[1]):
#print "split ", netSegment, p
netSegment.splitAt(p)
n += 1
break
#print self.__class__.__name__, "split", n, "segments"
def splitNetSegments(self):
"""
Go through all net segments in the design unit and make sure that
none of them crosses an end point (of a segment), an instance pin
or a port.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = idx.netSegmentsMidPointsAt(p[0], p[1])
for s in list(segments):
if s in idx.coordsOfNetSegments:
#print "split ", s, p
s.splitAt(p)
n += 1
#print self.__class__.__name__, "split", n, "segments"
def mergeNetSegments(self):
"""
Go through all net segments in the design unit and make sure that
there are no two or more segments being just a continuation of each other.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = list(idx.netSegmentsEndPointsAt(p[0], p[1]))
if len(segments) > 1:
if all(s.isHorizontal for s in segments) or \
all(s.isVertical for s in segments) or \
all(s.isDiagonal45 for s in segments) or \
all(s.isDiagonal135 for s in segments):
n += len(segments)
segments[0].mergeSegments(segments)
#print self.__class__.__name__, "merged", n, "segments"
def checkSolderDots(self):
"""
Goes through all endpoints and counts the number of segments connected there.
If it larger than 2 check if a solder dot exists
and if not, add it.
"""
idx = self.index
n = 0
for p in list(idx.netSegmentsEndPoints):
segments = idx.netSegmentsEndPointsAt(p[0], p[1])
if len(segments) > 2:
if len(idx.solderDotsAt(p[0], p[1])) == 0:
SolderDot(self, self.database.layers, p[0], p[1])
n += 1
#print self.__class__.__name__, "added", n, "solder dots"
def checkNets(self):
self.splitNetSegments()
self.mergeNetSegments()
self.checkSolderDots()
def runDeferredProcess(self):
"""
Runs deferred processes of the Schematic class.
Do not call it directly, Use Database.runDeferredProcesses(object)
"""
self.checkNets()
def __repr__(self):
return "<Schematic '" + self.path + "'>"
class Symbol(Diagram):
def __init__(self, name, cell):
Diagram.__init__(self, name, cell)
#self._name = 'symbol'
self._symbolPins = set()
def designUnitAdded(self, designUnit):
self.designUnits.add(designUnit)
#scene = designUnit.scene
#for e in self.elems:
# e.addToView(scene)
@property
def elems(self):
return self.lines | self.rects | self.labels | \
self.attributeLabels | self.customPaths | \
self.ellipses | self.ellipseArcs | \
self.symbolPins
@property
def symbolPins(self):
return self._symbolPins
def symbolPinAdded(self, symbolPin):
self.symbolPins.add(symbolPin)
def symbolPinRemoved(self, symbolPin):
self.symbolPins.remove(symbolPin)
def __repr__(self):
return "<Symbol '" + self.path + "'>"
class Netlist(CellView):
def __init__(self, name, cell):
CellView.__init__(self, name, cell)
#self._name = 'netlist'
def __repr__(self):
return "<Netlist '" + self.path + "'>"
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"# Copyright (C) 2009 PSchem Contributors (see CONTRIBUTORS for details)\n",
"\n",
"# This file is part of PSchem Database\n",
" \n",
"# PSchem is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU Lesser General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"\n",
"# PSchem is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU Lesser General Public License for more details.\n",
"\n",
"# You should have received a copy of the GNU Lesser General Public License\n",
"# along with PSchem Database. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"#print 'CellViews in'\n",
"\n",
"from Index import Index\n",
"from Primitives import *\n",
"#from Design import *\n",
"from xml.etree import ElementTree as et\n",
"\n",
"#print 'CellViews out'\n",
"\n",
"class CellView():\n",
" def __init__(self, name, cell):\n",
" self._name = name\n",
" self._attribs = {}\n",
" self._cell = cell\n",
" cell.cellViewAdded(self)\n",
" \n",
" @property\n",
" def name(self):\n",
" return self._name\n",
" \n",
" @property\n",
" def path(self):\n",
" return self.cell.path + '/' + self.name\n",
"\n",
" @property\n",
" def cell(self):\n",
" return self._cell\n",
"\n",
" @property\n",
" def attributes(self):\n",
" return self._attribs\n",
"\n",
" @property\n",
" def library(self):\n",
" return self.cell.library\n",
" \n",
" @property\n",
" def database(self):\n",
" return self.cell.database\n",
" \n",
" def save(self):\n",
" pass\n",
" \n",
" def restore(self):\n",
" pass\n",
"\n",
" def remove(self):\n",
" #for a in list(self.attributes):\n",
" # a.remove()\n",
" self.cell.cellViewRemoved(self)\n",
" self.cell = None\n",
"\n",
" def __repr__(self):\n",
" return \"<CellView '\" + self.path + \"'>\"\n",
"\n",
"\n",
"class Diagram(CellView):\n",
" def __init__(self, name, cell):\n",
" CellView.__init__(self, name, cell)\n",
" #self._elems = set()\n",
" self._items = set()\n",
" self._lines = set()\n",
" self._rects = set()\n",
" self._customPaths = set()\n",
" self._ellipses = set()\n",
" self._ellipseArcs = set()\n",
" self._labels = set()\n",
" self._attributeLabels = set()\n",
" #self._uu = 160 # default DB units per user units\n",
" self._attribs['uu'] = 160 # default DB units per user units\n",
" #self._name = 'diagram'\n",
" self._designUnits = set()\n",
"\n",
" @property\n",
" def designUnits(self):\n",
" return self._designUnits\n",
"\n",
" @property\n",
" def items(self):\n",
" return self._items\n",
" \n",
" @property\n",
" def elems(self):\n",
" return self.lines | self.rects | self.labels | \\\n",
" self.attributeLabels | self.customPaths | \\\n",
" self.ellipses | self.ellipseArcs\n",
" \n",
" @property\n",
" def lines(self):\n",
" return self._lines\n",
"\n",
" @property\n",
" def rects(self):\n",
" return self._rects\n",
"\n",
" @property\n",
" def customPaths(self):\n",
" return self._customPaths\n",
"\n",
" @property\n",
" def ellipses(self):\n",
" return self._ellipses\n",
"\n",
" @property\n",
" def ellipseArcs(self):\n",
" return self._ellipseArcs\n",
"\n",
" @property\n",
" def labels(self):\n",
" return self._labels\n",
"\n",
" @property\n",
" def attributeLabels(self):\n",
" return self._attributeLabels\n",
"\n",
" @property\n",
" def uu(self):\n",
" return self._attribs['uu']\n",
"\n",
" @uu.setter\n",
" def uu(self, uu):\n",
" self._attribs['uu'] = uu\n",
"\n",
" def instanceItemAdded(self, view):\n",
" self.items.add(view)\n",
" for elem in self.elems:\n",
" elem.addToView(view)\n",
" #for designUnit in self._designUnits:\n",
" # elem.addToDesignUnit(designUnit)\n",
" \n",
" def instanceItemRemoved(self, view):\n",
" self.items.remove(view)\n",
" for elem in self.elems:\n",
" elem.removeFromView()\n",
" \n",
" def designUnitAdded(self, designUnit):\n",
" self.designUnits.add(designUnit)\n",
" scene = designUnit.scene()\n",
" for e in self.elems:\n",
" e.addToView(scene)\n",
"\n",
" def designUnitRemoved(self, designUnit):\n",
" self.designUnits.remove(designUnit)\n",
" \n",
" #def updateDesignUnits(self):\n",
" # for d in self._designUnits:\n",
" # d.updateDesignUnit()\n",
" # #v.updateItem()\n",
"\n",
" def elementAdded(self, elem):\n",
" pass\n",
" #for designUnit in self._designUnits:\n",
" # elem.addToDesignUnit(designUnit)\n",
"\n",
" def elementChanged(self, elem):\n",
" pass\n",
" #for designUnit in self._designUnits:\n",
" # elem.addToDesignUnit(designUnit)\n",
"\n",
" def elementRemoved(self, elem):\n",
" pass\n",
" #for designUnit in self._designUnits:\n",
" # elem.removeFromDesignUnit(designUnit)\n",
" \n",
" #def addElem(self, elem):\n",
" # \"main entry point for adding new elements to diagram\"\n",
" # #self._elems.add(elem)\n",
" # elem.addToDiagram(self)\n",
" # for designUnit in self._designUnits:\n",
" # elem.addToDesignUnit(designUnit)\n",
"\n",
" #def removeElem(self, elem):\n",
" # \"main entry point for removing elements from diagram\"\n",
" # for designUnit in self._designUnits:\n",
" # elem.removeFromDesignUnit(designUnit)\n",
" # elem.removeFromDiagram(self)\n",
"\n",
" def lineAdded(self, line):\n",
" self.lines.add(line)\n",
" \n",
" def lineRemoved(self, line):\n",
" self.lines.remove(line)\n",
" \n",
" def rectAdded(self, rect):\n",
" self.rects.add(rect)\n",
" \n",
" def rectRemoved(self, rect):\n",
" self.rects.remove(rect)\n",
" \n",
" def customPathAdded(self, customPath):\n",
" self.customPaths.add(customPath)\n",
" \n",
" def customPathRemoved(self, customPath):\n",
" self.customPaths.remove(customPath)\n",
" \n",
" def ellipseAdded(self, ellipse):\n",
" self.ellipses.add(ellipse)\n",
" \n",
" def ellipseRemoved(self, ellipse):\n",
" self.ellipses.remove(ellipse)\n",
" \n",
" def ellipseArcAdded(self, ellipseArc):\n",
" self.ellipseArcs.add(ellipseArc)\n",
" \n",
" def ellipseArcRemoved(self, ellipseArc):\n",
" self.ellipseArcs.remove(ellipseArc)\n",
" \n",
" def labelAdded(self, label):\n",
" self.labels.add(label)\n",
" \n",
" def labelRemoved(self, label):\n",
" self.labels.remove(label)\n",
" \n",
" def attributeLabelAdded(self, attributeLabel):\n",
" self.attributeLabels.add(attributeLabel)\n",
" \n",
" def attributeLabelRemoved(self, attributeLabel):\n",
" self.attributeLabels.remove(attributeLabel)\n",
" \n",
" def remove(self):\n",
" for e in list(self.elems):\n",
" e.remove()\n",
" #self.removeElem(e)\n",
" for du in list(self.designUnits):\n",
" du.remove()\n",
" #self.removeDesignUnit(o)\n",
" CellView.remove(self)\n",
"\n",
" def save(self):\n",
" root = et.Element(self.name)\n",
" tree = et.ElementTree(root)\n",
" for a in sorted(self.attributes):\n",
" root.attrib[str(a)] = str(self.attributes[a])\n",
" for e in sorted(self.elems, key=Element.name):\n",
" xElem = e.toXml()\n",
" root.append(xElem)\n",
" self._indentET(tree.getroot())\n",
" et.dump(tree)\n",
" #return tree\n",
" \n",
" def restore(self):\n",
" pass\n",
" \n",
" def _indentET(self, elem, level=0):\n",
" i = \"\\n\" + level*\" \"\n",
" if len(elem):\n",
" if not elem.text or not elem.text.strip():\n",
" elem.text = i + \" \"\n",
" if not elem.tail or not elem.tail.strip():\n",
" elem.tail = i\n",
" for elem in elem:\n",
" self._indentET(elem, level+1)\n",
" if not elem.tail or not elem.tail.strip():\n",
" elem.tail = i\n",
" else:\n",
" if level and (not elem.tail or not elem.tail.strip()):\n",
" elem.tail = i\n",
"\n",
" def __repr__(self):\n",
" return \"<Diagram '\" + self.path + \"'>\"\n",
"\n",
" \n",
"class Schematic(Diagram):\n",
" def __init__(self, name, cell):\n",
" Diagram.__init__(self, name, cell)\n",
" #self._name = 'schematic'\n",
" self._pins = set()\n",
" self._instances = set()\n",
" self._netSegments = set()\n",
" self._solderDots = set()\n",
" self._nets = set()\n",
" self._index = Index()\n",
" \n",
" #self._netSegmentsAdded = set()\n",
" #self._netSegmentsRemoved = set()\n",
" \n",
" def designUnitAdded(self, designUnit):\n",
" self.designUnits.add(designUnit)\n",
" #scene = designUnit.scene\n",
" #for e in self.elems-self.instances:\n",
" #for e in self.elems:\n",
" # e.addToView(scene)\n",
" #for i in self.instances():\n",
" # i.addToView(designUnit)\n",
" #for ns in self.netSegments():\n",
" # ns.addToDesignUnit(designUnit)\n",
" #designUnit.checkNets()\n",
"\n",
" #def components(self):\n",
" # components = map(lambda i: i.cell(), self.instances())\n",
" # return components.sort()\n",
"\n",
" @property\n",
" def elems(self):\n",
" return self.lines | self.rects | self.labels | \\\n",
" self.attributeLabels | self.customPaths | \\\n",
" self.ellipses | self.ellipseArcs | \\\n",
" self.pins | self.instances | self.netSegments | self.solderDots\n",
"\n",
" @property\n",
" def pins(self):\n",
" return self._pins\n",
"\n",
" @property\n",
" def instances(self):\n",
" return self._instances\n",
"\n",
" @property\n",
" def netSegments(self):\n",
" self.database.runDeferredProcesses(self)\n",
" return self._netSegments\n",
"\n",
" @property\n",
" def solderDots(self):\n",
" self.database.runDeferredProcesses(self)\n",
" return self._solderDots\n",
"\n",
" @property\n",
" def index(self):\n",
" return self._index\n",
"\n",
" def pinAdded(self, pin):\n",
" self.pins.add(pin)\n",
" \n",
" def pinRemoved(self, pin):\n",
" self.pins.remove(pin)\n",
" \n",
" def instanceAdded(self, instance):\n",
" self.instances.add(instance)\n",
" \n",
" def instanceRemoved(self, instance):\n",
" self.instances.remove(instance)\n",
" \n",
" #def addNet(self, net):\n",
" # \"call only from addToDiagram\"\n",
" # self.nets.add(net)\n",
" \n",
" #def removeNet(self, net):\n",
" # \"call only from removeFromDiagram\"\n",
" # self.nets.remove(net)\n",
" \n",
" #@property\n",
" #def nets(self):\n",
" # return self._nets #filter(lambda e: isinstance(e, CNet), self.elems)\n",
"\n",
" def netSegmentAdded(self, netSegment):\n",
" #print self.__class__.__name__, \"ns added\", netSegment\n",
" self.index.netSegmentAdded(netSegment)\n",
" self._netSegments.add(netSegment) #don't trigger deferred processing\n",
" #self._netSegmentsAdded.add(netSegment)\n",
" self.database.requestDeferredProcessing(self)\n",
" #self.splitNetSegment(netSegment)\n",
" #for designUnit in self._designUnits:\n",
" # netSegment.addToDesignUnit(designUnit)\n",
" # if designUnit.scene():\n",
" # netSegment.addToView(designUnit.scene())\n",
" \n",
" def netSegmentRemoved(self, netSegment):\n",
" #print self.__class__.__name__, \"ns removed\", netSegment\n",
" self.index.netSegmentRemoved(netSegment)\n",
" self._netSegments.remove(netSegment) #don't trigger deferred processing\n",
" #self._netSegmentsRemoved.add(netSegment)\n",
" self.database.requestDeferredProcessing(self)\n",
" \n",
" def solderDotAdded(self, solderDot):\n",
" self.index.solderDotAdded(solderDot)\n",
" #for designUnit in self._designUnits:\n",
" # #solderDot.addToDesignUnit(designUnit)\n",
" # if designUnit.scene():\n",
" # solderDot.addToView(designUnit.scene())\n",
" self._solderDots.add(solderDot) #don't trigger deferred processing\n",
" \n",
" def solderDotRemoved(self, solderDot):\n",
" self.index.solderDotRemoved(solderDot)\n",
" self._solderDots.remove(solderDot) #don't trigger deferred processing\n",
" \n",
" def splitNetSegment(self, netSegment):\n",
" \"\"\"\n",
" Check if (newly added) netSegment should be split or if it requires\n",
" other net segments to split.\n",
" \"\"\"\n",
" idx = self.index\n",
" (p1, p2) = idx.coordsOfNetSegments[netSegment]\n",
" n = 0\n",
" #first split other segments\n",
" for p in (p1, p2):\n",
" segments = idx.netSegmentsMidPointsAt(p[0], p[1])\n",
" for s in list(segments):\n",
" if s in idx.coordsOfNetSegments:\n",
" #print \"split \", s, p, idx.coordsOfNetSegments()[s]\n",
" s.splitAt(p)\n",
" n += 1\n",
" #then, if necessary, split the netSegment\n",
" for p in list(idx.netSegmentsEndPoints):\n",
" if netSegment in idx.netSegmentsMidPointsAt(p[0], p[1]):\n",
" #print \"split \", netSegment, p\n",
" netSegment.splitAt(p)\n",
" n += 1\n",
" break\n",
" #print self.__class__.__name__, \"split\", n, \"segments\"\n",
"\n",
" def splitNetSegments(self):\n",
" \"\"\"\n",
" Go through all net segments in the design unit and make sure that\n",
" none of them crosses an end point (of a segment), an instance pin\n",
" or a port.\n",
" \"\"\"\n",
" idx = self.index\n",
" n = 0\n",
" for p in list(idx.netSegmentsEndPoints):\n",
" segments = idx.netSegmentsMidPointsAt(p[0], p[1])\n",
" for s in list(segments):\n",
" if s in idx.coordsOfNetSegments:\n",
" #print \"split \", s, p\n",
" s.splitAt(p)\n",
" n += 1\n",
" #print self.__class__.__name__, \"split\", n, \"segments\"\n",
" \n",
" def mergeNetSegments(self):\n",
" \"\"\"\n",
" Go through all net segments in the design unit and make sure that\n",
" there are no two or more segments being just a continuation of each other.\n",
" \"\"\"\n",
" idx = self.index\n",
" n = 0\n",
" for p in list(idx.netSegmentsEndPoints):\n",
" segments = list(idx.netSegmentsEndPointsAt(p[0], p[1]))\n",
" if len(segments) > 1:\n",
" if all(s.isHorizontal for s in segments) or \\\n",
" all(s.isVertical for s in segments) or \\\n",
" all(s.isDiagonal45 for s in segments) or \\\n",
" all(s.isDiagonal135 for s in segments):\n",
" n += len(segments)\n",
" segments[0].mergeSegments(segments)\n",
" #print self.__class__.__name__, \"merged\", n, \"segments\"\n",
" \n",
" def checkSolderDots(self):\n",
" \"\"\"\n",
" Goes through all endpoints and counts the number of segments connected there.\n",
" If it larger than 2 check if a solder dot exists\n",
" and if not, add it.\n",
" \"\"\"\n",
" idx = self.index\n",
" n = 0\n",
" for p in list(idx.netSegmentsEndPoints):\n",
" segments = idx.netSegmentsEndPointsAt(p[0], p[1])\n",
" if len(segments) > 2:\n",
" if len(idx.solderDotsAt(p[0], p[1])) == 0:\n",
" SolderDot(self, self.database.layers, p[0], p[1])\n",
" n += 1\n",
" #print self.__class__.__name__, \"added\", n, \"solder dots\"\n",
" \n",
" def checkNets(self):\n",
" self.splitNetSegments()\n",
" self.mergeNetSegments()\n",
" self.checkSolderDots()\n",
"\n",
" def runDeferredProcess(self):\n",
" \"\"\"\n",
" Runs deferred processes of the Schematic class.\n",
" Do not call it directly, Use Database.runDeferredProcesses(object)\n",
" \"\"\"\n",
" self.checkNets()\n",
" \n",
" def __repr__(self):\n",
" return \"<Schematic '\" + self.path + \"'>\"\n",
"\n",
"class Symbol(Diagram):\n",
" def __init__(self, name, cell):\n",
" Diagram.__init__(self, name, cell)\n",
" #self._name = 'symbol'\n",
" self._symbolPins = set()\n",
"\n",
" def designUnitAdded(self, designUnit):\n",
" self.designUnits.add(designUnit)\n",
" #scene = designUnit.scene\n",
" #for e in self.elems:\n",
" # e.addToView(scene)\n",
"\n",
" @property\n",
" def elems(self):\n",
" return self.lines | self.rects | self.labels | \\\n",
" self.attributeLabels | self.customPaths | \\\n",
" self.ellipses | self.ellipseArcs | \\\n",
" self.symbolPins\n",
" \n",
" @property\n",
" def symbolPins(self):\n",
" return self._symbolPins\n",
"\n",
" def symbolPinAdded(self, symbolPin):\n",
" self.symbolPins.add(symbolPin)\n",
" \n",
" def symbolPinRemoved(self, symbolPin):\n",
" self.symbolPins.remove(symbolPin)\n",
" \n",
" def __repr__(self):\n",
" return \"<Symbol '\" + self.path + \"'>\"\n",
"\n",
"class Netlist(CellView):\n",
" def __init__(self, name, cell):\n",
" CellView.__init__(self, name, cell)\n",
" #self._name = 'netlist'\n",
"\n",
" def __repr__(self):\n",
" return \"<Netlist '\" + self.path + \"'>\"\n",
"\n"
] | [
0,
0,
0,
0,
0,
0.5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0.045454545454545456,
0,
0,
0.043478260869565216,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0.014705882352941176,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0.07692307692307693,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0.021739130434782608,
0,
0.1111111111111111,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0.03125,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.025,
0.023809523809523808,
0.1111111111111111,
0,
0,
0.029411764705882353,
0.022222222222222223,
0.03333333333333333,
0,
0.027777777777777776,
0,
0.02564102564102564,
0,
0.03125,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.125,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0.03571428571428571,
0,
0,
0.1111111111111111,
0.03225806451612903,
0,
0,
0.1111111111111111,
0.06666666666666667,
0.047619047619047616,
0,
0,
0,
0.015873015873015872,
0,
0.025974025974025976,
0.020833333333333332,
0,
0.023809523809523808,
0.021739130434782608,
0,
0,
0,
0.1111111111111111,
0,
0.015384615384615385,
0,
0.025,
0.02,
0,
0.1111111111111111,
0,
0,
0.021739130434782608,
0,
0,
0,
0.02666666666666667,
0.1111111111111111,
0,
0,
0.02564102564102564,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0.013888888888888888,
0,
0,
0.02,
0,
0,
0.02127659574468085,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0.015873015873015872,
0.1111111111111111,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016666666666666666,
0.023255813953488372,
0,
0.015625,
0.1111111111111111,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.043478260869565216,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0.029411764705882353,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.125,
0,
0,
0.1111111111111111,
0,
0,
0,
0.04,
0,
0,
0.03125,
0,
0,
0,
1
] | 526 | 0.015904 | false |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import os.path
import sys
from Utils import HandlerUtil
from CommandExecuter import CommandExecuter
from Common import CommonVariables
class CronUtil(object):
"""description of class"""
def __init__(self,logger):
self.logger = logger
self.crontab = '/etc/crontab'
self.cron_restart_cmd = 'service cron restart'
def check_update_cron_config(self):
script_file_path = os.path.realpath(sys.argv[0])
script_dir = os.path.dirname(script_file_path)
script_file = os.path.basename(script_file_path)
old_line_end = ' '.join([script_file, '-chkrdma'])
new_line = ' '.join(['\n0 0 * * *', 'root cd', script_dir + "/..", '&& python main/handle.py -chkrdma >/dev/null 2>&1\n'])
HandlerUtil.waagent.ReplaceFileContentsAtomic(self.crontab, \
'\n'.join(filter(lambda a: a and (old_line_end not in a), HandlerUtil.waagent.GetFileContents(self.crontab).split('\n')))+ new_line)
def restart_cron(self):
commandExecuter = CommandExecuter(self.logger)
returnCode = commandExecuter.Execute(self.cron_restart_cmd)
if(returnCode != CommonVariables.process_success):
self.logger.log(msg="",level=CommonVariables.ErrorLevel)
| [
"#!/usr/bin/env python\n",
"#\n",
"# VM Backup extension\n",
"#\n",
"# Copyright 2014 Microsoft Corporation\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"#\n",
"# Requires Python 2.7+\n",
"#\n",
"import os\n",
"import os.path\n",
"import sys\n",
"from Utils import HandlerUtil\n",
"from CommandExecuter import CommandExecuter\n",
"from Common import CommonVariables\n",
"class CronUtil(object):\n",
" \"\"\"description of class\"\"\"\n",
" def __init__(self,logger):\n",
" self.logger = logger\n",
" self.crontab = '/etc/crontab'\n",
" self.cron_restart_cmd = 'service cron restart'\n",
"\n",
" def check_update_cron_config(self):\n",
" script_file_path = os.path.realpath(sys.argv[0])\n",
" script_dir = os.path.dirname(script_file_path)\n",
" script_file = os.path.basename(script_file_path)\n",
" old_line_end = ' '.join([script_file, '-chkrdma'])\n",
"\n",
" new_line = ' '.join(['\\n0 0 * * *', 'root cd', script_dir + \"/..\", '&& python main/handle.py -chkrdma >/dev/null 2>&1\\n'])\n",
"\n",
" HandlerUtil.waagent.ReplaceFileContentsAtomic(self.crontab, \\\n",
" '\\n'.join(filter(lambda a: a and (old_line_end not in a), HandlerUtil.waagent.GetFileContents(self.crontab).split('\\n')))+ new_line)\n",
" \n",
" def restart_cron(self):\n",
" commandExecuter = CommandExecuter(self.logger)\n",
" returnCode = commandExecuter.Execute(self.cron_restart_cmd)\n",
" if(returnCode != CommonVariables.process_success):\n",
" self.logger.log(msg=\"\",level=CommonVariables.ErrorLevel)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007633587786259542,
0,
0.014285714285714285,
0.020689655172413793,
0.2,
0,
0,
0,
0,
0.014492753623188406
] | 49 | 0.006756 | false |
#!/usr/bin/env python3.2
import http.client
#class InvalidResponse(http.client.HTTPException):
# """ Exception raised when the Closure Compilder URL
# returned an HTTP return code other than 200 """
# def __init__(self, code, message):
# self.code = code
# self.message = message
# def __str__(self):
# return "%s : %s" % (self.code, self.message)
# That kinda sucks (see notes about the state var in ClosureResult's constructor)... Better idea anyone ?
class _Namespace: pass
class ServerError(http.client.HTTPException):
""" Exception raised when the Closure Compiler
returned a server error
"""
def __init__(self, data):
self.errors = data
class ClosureMessage:
""" Represents a message sent by the Closure Compiler """
def __init__(self, line, char, description, text, id, file, is_error):
self.line = line
self.char = char
self.description = description
self.text = text
self.id = id
self.file = file
self.is_error = is_error
def __str__(self):
char_marker = ''.join(' ' for i in range(self.char)) + '^'
return """%s: %s at line %d character %d\n%s\n%s""" % (self.id, self.description, self.line, self.char, self.text, char_marker)
class ClosureResult:
""" Contains the Closure Compiler output, returned by
a call to the compile method
"""
def __init__(self, closure_message):
import xml.parsers.expat
# Initialize state
self.errors = []
self.warnings = []
self.compiled_code = None
self.original_size = 0
self.compressed_size = 0
self.compile_time = 0
# Reading state holders
# Here I made this state object so xmlstart can modify
# current_section_type without creating its own local value
# I don't really like creating a class just for that purpose,
# but I don't see any other way yet
state = _Namespace()
state.current_section_type = None
state.current_section_attrs = None
state.server_error = False
state.server_errors_list = []
# XML handler functions
def xmlstart(name, attrs):
state.current_section_type = name
state.current_section_attrs = attrs
def xmlcontent(text):
if state.current_section_type == "error":
if state.server_error:
state.server_errors_list.append(state.current_section_attrs['code'], text)
else:
csa = state.current_section_attrs
self.errors.append(ClosureMessage(
int(csa["lineno"]),
int(state.current_section_attrs['charno']),
text,
state.current_section_attrs['line'],
state.current_section_attrs['type'],
state.current_section_attrs['file'],
True))
elif state.current_section_type == "warning":
self.warnings.append(ClosureMessage(
int(state.current_section_attrs["lineno"]),
int(state.current_section_attrs['charno']),
text,
state.current_section_attrs['line'],
state.current_section_attrs['type'],
state.current_section_attrs['file'],
False))
elif state.current_section_type == "compiledCode":
self.compiled_code = self.compiled_code or ''
self.compiled_code += text
elif state.current_section_type == "serverErrors":
state.server_error = True
elif state.current_section_type == "originalSize":
self.original_size = int(text)
elif state.current_section_type == "compressedSize":
self.compressed_size = int(text)
elif state.current_section_type == "compileTime":
self.compile_time = int(text)
#self.section_managers[state.current_section_type](text)
if state.server_error:
raise ServerError(state.server_errors_list)
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = xmlstart
parser.CharacterDataHandler = xmlcontent
parser.Parse(closure_message)
def compile(files, debug=False):
""" Shrinks the specified list of Javascript files into a single one.
If the debug parameter is false, the script is processed by Google's
Closure Compiler service, with a SIMPLE_OPTIMIZATIONS compilation level.
If the debug parameter is True, then the files will only be joined
without any minimizing. This was made to allow easier debugging during
the developpement process. Note that this will still call the Closure
Compiler service in order to get the statistics, warnings and errors.
The compiled script, however, will not be the script minimized by
the compiler.
"""
import urllib.parse
import urllib.request
if isinstance(files, str):
files = files,
# Static parameters
params = [("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "xml"), ("output_info", "errors"),
("output_info", "warnings"), ("output_info", "compiled_code"), \
("output_info", "statistics")]
# Source files
params.extend(("js_code", open(f).read()) for f in files)
params = urllib.parse.urlencode(params)
params = params.encode('utf-8');
# Setting up the request
response = urllib.request.urlopen("http://closure-compiler.appspot.com/compile", params)
result = ClosureResult(response.read().decode('utf-8'))
if not result.errors and debug:
result.compiled_code = ''.join(open(f).read() for f in files)
return result | [
"#!/usr/bin/env python3.2\r\n",
"\r\n",
"import http.client\r\n",
"\r\n",
"#class InvalidResponse(http.client.HTTPException):\r\n",
"# \"\"\" Exception raised when the Closure Compilder URL\r\n",
"# returned an HTTP return code other than 200 \"\"\"\r\n",
"# def __init__(self, code, message):\r\n",
"# self.code = code\r\n",
"# self.message = message\r\n",
"\r\n",
"# def __str__(self):\r\n",
"# return \"%s : %s\" % (self.code, self.message)\r\n",
"\r\n",
"# That kinda sucks (see notes about the state var in ClosureResult's constructor)... Better idea anyone ?\r\n",
"class _Namespace: pass\r\n",
"\r\n",
"class ServerError(http.client.HTTPException):\r\n",
" \"\"\" Exception raised when the Closure Compiler \r\n",
" returned a server error \r\n",
" \r\n",
" \"\"\"\r\n",
" def __init__(self, data):\r\n",
" self.errors = data\r\n",
"\r\n",
"class ClosureMessage:\r\n",
" \"\"\" Represents a message sent by the Closure Compiler \"\"\"\r\n",
" def __init__(self, line, char, description, text, id, file, is_error):\r\n",
" self.line = line\r\n",
" self.char = char\r\n",
" self.description = description\r\n",
" self.text = text\r\n",
" self.id = id\r\n",
" self.file = file\r\n",
" self.is_error = is_error\r\n",
"\r\n",
" def __str__(self):\r\n",
" char_marker = ''.join(' ' for i in range(self.char)) + '^'\r\n",
" return \"\"\"%s: %s at line %d character %d\\n%s\\n%s\"\"\" % (self.id, self.description, self.line, self.char, self.text, char_marker)\r\n",
"\r\n",
"class ClosureResult:\r\n",
" \"\"\" Contains the Closure Compiler output, returned by \r\n",
" a call to the compile method\r\n",
" \r\n",
" \"\"\"\r\n",
" def __init__(self, closure_message):\r\n",
" import xml.parsers.expat\r\n",
"\r\n",
" # Initialize state\r\n",
" self.errors = []\r\n",
" self.warnings = []\r\n",
" self.compiled_code = None\r\n",
" self.original_size = 0\r\n",
" self.compressed_size = 0\r\n",
" self.compile_time = 0\r\n",
"\r\n",
" # Reading state holders\r\n",
" # Here I made this state object so xmlstart can modify \r\n",
" # current_section_type without creating its own local value\r\n",
" # I don't really like creating a class just for that purpose,\r\n",
" # but I don't see any other way yet\r\n",
" state = _Namespace()\r\n",
" state.current_section_type = None\r\n",
" state.current_section_attrs = None\r\n",
" state.server_error = False\r\n",
" state.server_errors_list = []\r\n",
"\r\n",
" # XML handler functions\r\n",
" def xmlstart(name, attrs):\r\n",
" state.current_section_type = name\r\n",
" state.current_section_attrs = attrs\r\n",
"\r\n",
" def xmlcontent(text):\r\n",
" if state.current_section_type == \"error\":\r\n",
" if state.server_error:\r\n",
" state.server_errors_list.append(state.current_section_attrs['code'], text)\r\n",
" else:\r\n",
" csa = state.current_section_attrs\r\n",
" self.errors.append(ClosureMessage(\r\n",
" int(csa[\"lineno\"]),\r\n",
" int(state.current_section_attrs['charno']),\r\n",
" text,\r\n",
" state.current_section_attrs['line'],\r\n",
" state.current_section_attrs['type'],\r\n",
" state.current_section_attrs['file'],\r\n",
" True))\r\n",
" elif state.current_section_type == \"warning\":\r\n",
" self.warnings.append(ClosureMessage(\r\n",
" int(state.current_section_attrs[\"lineno\"]),\r\n",
" int(state.current_section_attrs['charno']),\r\n",
" text,\r\n",
" state.current_section_attrs['line'],\r\n",
" state.current_section_attrs['type'],\r\n",
" state.current_section_attrs['file'],\r\n",
" False))\r\n",
" elif state.current_section_type == \"compiledCode\":\r\n",
" self.compiled_code = self.compiled_code or ''\r\n",
" self.compiled_code += text\r\n",
" elif state.current_section_type == \"serverErrors\":\r\n",
" state.server_error = True\r\n",
" elif state.current_section_type == \"originalSize\":\r\n",
" self.original_size = int(text)\r\n",
" elif state.current_section_type == \"compressedSize\":\r\n",
" self.compressed_size = int(text)\r\n",
" elif state.current_section_type == \"compileTime\":\r\n",
" self.compile_time = int(text)\r\n",
"\r\n",
" #self.section_managers[state.current_section_type](text)\r\n",
"\r\n",
" if state.server_error:\r\n",
" raise ServerError(state.server_errors_list)\r\n",
"\r\n",
" parser = xml.parsers.expat.ParserCreate()\r\n",
"\r\n",
" parser.StartElementHandler = xmlstart\r\n",
" parser.CharacterDataHandler = xmlcontent\r\n",
"\r\n",
" parser.Parse(closure_message)\r\n",
"\r\n",
"\r\n",
"\r\n",
"def compile(files, debug=False):\r\n",
" \"\"\" Shrinks the specified list of Javascript files into a single one.\r\n",
"\r\n",
" If the debug parameter is false, the script is processed by Google's\r\n",
" Closure Compiler service, with a SIMPLE_OPTIMIZATIONS compilation level.\r\n",
" \r\n",
" If the debug parameter is True, then the files will only be joined\r\n",
" without any minimizing. This was made to allow easier debugging during\r\n",
" the developpement process. Note that this will still call the Closure\r\n",
" Compiler service in order to get the statistics, warnings and errors.\r\n",
" The compiled script, however, will not be the script minimized by \r\n",
" the compiler.\r\n",
" \r\n",
" \"\"\"\r\n",
" import urllib.parse\r\n",
" import urllib.request\r\n",
"\r\n",
" if isinstance(files, str):\r\n",
" files = files,\r\n",
"\r\n",
" # Static parameters\r\n",
" params = [(\"compilation_level\", \"SIMPLE_OPTIMIZATIONS\"),\r\n",
" (\"output_format\", \"xml\"), (\"output_info\", \"errors\"),\r\n",
" (\"output_info\", \"warnings\"), (\"output_info\", \"compiled_code\"), \\\r\n",
" (\"output_info\", \"statistics\")]\r\n",
" # Source files\r\n",
" params.extend((\"js_code\", open(f).read()) for f in files)\r\n",
" params = urllib.parse.urlencode(params)\r\n",
" params = params.encode('utf-8');\r\n",
"\r\n",
" # Setting up the request\r\n",
" response = urllib.request.urlopen(\"http://closure-compiler.appspot.com/compile\", params)\r\n",
"\r\n",
" result = ClosureResult(response.read().decode('utf-8'))\r\n",
"\r\n",
" if not result.errors and debug:\r\n",
" result.compiled_code = ''.join(open(f).read() for f in files)\r\n",
"\r\n",
" return result"
] | [
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0.08333333333333333,
0,
0.02127659574468085,
0.018867924528301886,
0.029411764705882353,
0.1,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0072992700729927005,
0,
0.045454545454545456,
0.016666666666666666,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0.012195121951219513,
0.1,
0,
0,
0,
0,
0.013157894736842105,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0.013513513513513514,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 160 | 0.005722 | false |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import uuid
import random
import io
import os
import time
from azure.storage.file import (
ContentSettings,
)
class FileSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_file_service()
self.create_file()
self.delete_file()
self.file_metadata()
self.file_properties()
self.file_exists()
self.resize_file()
self.copy_file()
self.file_range()
self.file_with_bytes()
self.file_with_stream()
self.file_with_path()
self.file_with_text()
def _get_resource_reference(self, prefix):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _get_file_reference(self, prefix='file'):
return self._get_resource_reference(prefix)
def _create_share(self, prefix='share'):
share_name = self._get_resource_reference(prefix)
self.service.create_share(share_name)
return share_name
def _create_directory(self, share_name, prefix='dir'):
dir_name = self._get_resource_reference(prefix)
self.service.create_directory(share_name, dir_name)
return dir_name
def _get_random_bytes(self, size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
def create_file(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
# Basic
file_name1 = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name1, 512)
# Properties
settings = ContentSettings(content_type='html', content_language='fr')
file_name2 = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name2, 512, content_settings=settings)
# Metadata
metadata = {'val1': 'foo', 'val2': 'blah'}
file_name2 = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name2, 512, metadata=metadata)
self.service.delete_share(share_name)
def delete_file(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name, 512)
# Basic
self.service.delete_file(share_name, directory_name, file_name)
self.service.delete_share(share_name)
def file_metadata(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name, 512)
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)
metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)
metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)
metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_file_metadata(share_name, directory_name, file_name)
metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={}
self.service.delete_share(share_name)
def file_properties(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
metadata = {'val1': 'foo', 'val2': 'blah'}
self.service.create_file(share_name, directory_name, file_name, 512, metadata=metadata)
settings = ContentSettings(content_type='html', content_language='fr')
# Basic
self.service.set_file_properties(share_name, directory_name, file_name, content_settings=settings)
file = self.service.get_file_properties(share_name, directory_name, file_name)
content_language = file.properties.content_settings.content_language # fr
content_type = file.properties.content_settings.content_type # html
content_length = file.properties.content_length # 512
# Metadata
# Can't set metadata, but get will return metadata already on the file
file = self.service.get_file_properties(share_name, directory_name, file_name)
metadata = file.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
settings = ContentSettings(content_encoding='utf-8')
self.service.set_file_properties(share_name, directory_name, file_name, content_settings=settings)
file = self.service.get_file_properties(share_name, directory_name, file_name)
content_encoding = file.properties.content_settings.content_encoding # utf-8
content_language = file.properties.content_settings.content_language # None
self.service.delete_share(share_name)
def file_exists(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
# Basic
exists = self.service.exists(share_name, directory_name, file_name) # False
self.service.create_file(share_name, directory_name, file_name, 512)
exists = self.service.exists(share_name, directory_name, file_name) # True
self.service.delete_share(share_name)
def resize_file(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
# Basic
self.service.create_file(share_name, directory_name, file_name, 512)
self.service.resize_file(share_name, directory_name, file_name, 1024)
file = self.service.get_file_properties(share_name, directory_name, file_name)
length = file.properties.content_length # 1024
self.service.delete_share(share_name)
def copy_file(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
source_file_name = self._get_file_reference()
self.service.create_file(share_name, directory_name, source_file_name, 512)
# Basic
# Copy the file from the directory to the root of the share
source = self.service.make_file_url(share_name, directory_name, source_file_name)
copy = self.service.copy_file(share_name, None, 'file1copy', source)
# Poll for copy completion
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_file_properties(share_name, dir_name, 'file1copy').properties.copy
# With SAS from a remote account to local file
# Commented out as remote share, directory, file, and sas would need to be created
'''
source_file_url = self.service.make_file_url(
remote_share_name,
remote_directory_name,
remote_file_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_file(destination_sharename,
destination_directory_name,
destination_file_name,
source_file_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_file(share_name, dir_name, file_name, copy.id)
self.service.delete_share(share_name)
def file_with_bytes(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
# Basic
data = self._get_random_bytes(15)
file_name = self._get_file_reference()
self.service.create_file_from_bytes(share_name, directory_name, file_name, data)
file = self.service.get_file_to_bytes(share_name, directory_name, file_name)
content = file.content # data
# Download range
file = self.service.get_file_to_bytes(share_name, directory_name, file_name,
start_range=3, end_range=10)
content = file.content # data from 3-10
# Upload from index in byte array
file_name = self._get_file_reference()
self.service.create_file_from_bytes(share_name, directory_name, file_name, data, index=3)
# Content settings, metadata
settings = ContentSettings(content_type='html', content_language='fr')
metadata={'val1': 'foo', 'val2': 'blah'}
file_name = self._get_file_reference()
self.service.create_file_from_bytes(share_name, directory_name, file_name, data,
content_settings=settings,
metadata=metadata)
file = self.service.get_file_to_bytes(share_name, directory_name, file_name)
metadata = file.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
content_language = file.properties.content_settings.content_language # fr
content_type = file.properties.content_settings.content_type # html
# Progress
# Use slightly larger data so the chunking is more visible
data = self._get_random_bytes(8 * 1024 *1024)
def upload_callback(current, total):
print('({}, {})'.format(current, total))
def download_callback(current, total):
print('({}, {}) '.format(current, total))
file_name = self._get_file_reference()
print('upload: ')
self.service.create_file_from_bytes(share_name, directory_name, file_name, data,
progress_callback=upload_callback)
print('download: ')
file = self.service.get_file_to_bytes(share_name, directory_name, file_name,
progress_callback=download_callback)
self.service.delete_share(share_name)
def file_with_stream(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
# Basic
input_stream = io.BytesIO(self._get_random_bytes(15))
output_stream = io.BytesIO()
file_name = self._get_file_reference()
self.service.create_file_from_stream(share_name, directory_name, file_name,
input_stream, 15)
file = self.service.get_file_to_stream(share_name, directory_name, file_name,
output_stream)
content_length = file.properties.content_length
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See file_with_bytes for these examples. The code will be very similar.
self.service.delete_share(share_name)
def file_with_path(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
INPUT_FILE_PATH = 'file_input.temp.dat'
OUTPUT_FILE_PATH = 'file_output.temp.dat'
data = self._get_random_bytes(4 * 1024)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
# Basic
file_name = self._get_file_reference()
self.service.create_file_from_path(share_name, directory_name, file_name, INPUT_FILE_PATH)
file = self.service.get_file_to_path(share_name, directory_name, file_name, OUTPUT_FILE_PATH)
content_length = file.properties.content_length
# Open mode
# Append to the file instead of starting from the beginning
# Append streams are not seekable and so must be downloaded serially by setting max_connections=1.
file = self.service.get_file_to_path(share_name, directory_name, file_name, OUTPUT_FILE_PATH, open_mode='ab',
max_connections=1)
content_length = file.properties.content_length # will be the same, but local file length will be longer
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See file_with_bytes for these examples. The code will be very similar.
self.service.delete_share(share_name)
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
def file_with_text(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
# Basic
data = u'hello world'
file_name = self._get_file_reference()
self.service.create_file_from_text(share_name, directory_name, file_name, data)
file = self.service.get_file_to_text(share_name, directory_name, file_name)
content = file.content # 'hello world'
# Encoding
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
file_name = self._get_file_reference()
self.service.create_file_from_text(share_name, directory_name, file_name, text, 'utf-16')
file = self.service.get_file_to_text(share_name, directory_name, file_name, 'utf-16')
content = file.content # 'hello 啊齄丂狛狜 world'
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See file_with_bytes for these examples. The code will be very similar.
self.service.delete_share(share_name)
def file_range(self):
share_name = self._create_share()
directory_name = self._create_directory(share_name)
file_name = self._get_file_reference()
self.service.create_file(share_name, directory_name, file_name, 2048)
# Update the file between offset 512 and 15351535
data = b'abcdefghijklmnop' * 64
self.service.update_range(share_name, directory_name, file_name, data, 512, 1535)
# List ranges
print('list ranges: ')
ranges = self.service.list_ranges(share_name, directory_name, file_name)
for range in ranges:
print('({}, {}) '.format(range.start, range.end)) # (512, 1535)
# Clear part of that range
self.service.clear_range(share_name, directory_name, file_name, 600, 800)
self.service.delete_share(share_name) | [
"# coding: utf-8\n",
"\n",
"#-------------------------------------------------------------------------\n",
"# Copyright (c) Microsoft. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"#--------------------------------------------------------------------------\n",
"import uuid\n",
"import random\n",
"import io\n",
"import os\n",
"import time\n",
"\n",
"from azure.storage.file import (\n",
" ContentSettings,\n",
")\n",
"\n",
"class FileSamples(): \n",
"\n",
" def __init__(self, account):\n",
" self.account = account\n",
"\n",
" def run_all_samples(self):\n",
" self.service = self.account.create_file_service()\n",
"\n",
" self.create_file()\n",
" self.delete_file()\n",
" self.file_metadata() \n",
" self.file_properties()\n",
" self.file_exists()\n",
" self.resize_file()\n",
" self.copy_file()\n",
" self.file_range()\n",
"\n",
" self.file_with_bytes()\n",
" self.file_with_stream()\n",
" self.file_with_path()\n",
" self.file_with_text()\n",
"\n",
" def _get_resource_reference(self, prefix):\n",
" return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))\n",
"\n",
" def _get_file_reference(self, prefix='file'):\n",
" return self._get_resource_reference(prefix)\n",
"\n",
" def _create_share(self, prefix='share'):\n",
" share_name = self._get_resource_reference(prefix)\n",
" self.service.create_share(share_name)\n",
" return share_name\n",
"\n",
" def _create_directory(self, share_name, prefix='dir'):\n",
" dir_name = self._get_resource_reference(prefix)\n",
" self.service.create_directory(share_name, dir_name)\n",
" return dir_name\n",
"\n",
" def _get_random_bytes(self, size):\n",
" rand = random.Random()\n",
" result = bytearray(size)\n",
" for i in range(size):\n",
" result[i] = rand.randint(0, 255)\n",
" return bytes(result)\n",
"\n",
" def create_file(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
"\n",
" # Basic\n",
" file_name1 = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name1, 512)\n",
"\n",
" # Properties\n",
" settings = ContentSettings(content_type='html', content_language='fr')\n",
" file_name2 = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name2, 512, content_settings=settings)\n",
"\n",
" # Metadata\n",
" metadata = {'val1': 'foo', 'val2': 'blah'}\n",
" file_name2 = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name2, 512, metadata=metadata)\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def delete_file(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name, 512)\n",
"\n",
" # Basic\n",
" self.service.delete_file(share_name, directory_name, file_name)\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_metadata(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name, 512)\n",
" metadata = {'val1': 'foo', 'val2': 'blah'}\n",
"\n",
" # Basic\n",
" self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)\n",
" metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'val1': 'foo', 'val2': 'blah'}\n",
"\n",
" # Replaces values, does not merge\n",
" metadata = {'new': 'val'}\n",
" self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)\n",
" metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'new': 'val'}\n",
"\n",
" # Capital letters\n",
" metadata = {'NEW': 'VAL'}\n",
" self.service.set_file_metadata(share_name, directory_name, file_name, metadata=metadata)\n",
" metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={'new': 'VAL'}\n",
"\n",
" # Clearing\n",
" self.service.set_file_metadata(share_name, directory_name, file_name)\n",
" metadata = self.service.get_file_metadata(share_name, directory_name, file_name) # metadata={}\n",
" \n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_properties(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
"\n",
" metadata = {'val1': 'foo', 'val2': 'blah'}\n",
" self.service.create_file(share_name, directory_name, file_name, 512, metadata=metadata)\n",
"\n",
" settings = ContentSettings(content_type='html', content_language='fr') \n",
"\n",
" # Basic\n",
" self.service.set_file_properties(share_name, directory_name, file_name, content_settings=settings)\n",
" file = self.service.get_file_properties(share_name, directory_name, file_name)\n",
" content_language = file.properties.content_settings.content_language # fr\n",
" content_type = file.properties.content_settings.content_type # html\n",
" content_length = file.properties.content_length # 512\n",
"\n",
" # Metadata\n",
" # Can't set metadata, but get will return metadata already on the file\n",
" file = self.service.get_file_properties(share_name, directory_name, file_name)\n",
" metadata = file.metadata # metadata={'val1': 'foo', 'val2': 'blah'}\n",
"\n",
" # Replaces values, does not merge\n",
" settings = ContentSettings(content_encoding='utf-8')\n",
" self.service.set_file_properties(share_name, directory_name, file_name, content_settings=settings)\n",
" file = self.service.get_file_properties(share_name, directory_name, file_name)\n",
" content_encoding = file.properties.content_settings.content_encoding # utf-8\n",
" content_language = file.properties.content_settings.content_language # None\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_exists(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
"\n",
" # Basic\n",
" exists = self.service.exists(share_name, directory_name, file_name) # False\n",
" self.service.create_file(share_name, directory_name, file_name, 512)\n",
" exists = self.service.exists(share_name, directory_name, file_name) # True\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def resize_file(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
"\n",
" # Basic\n",
" self.service.create_file(share_name, directory_name, file_name, 512)\n",
" self.service.resize_file(share_name, directory_name, file_name, 1024)\n",
" file = self.service.get_file_properties(share_name, directory_name, file_name)\n",
" length = file.properties.content_length # 1024\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def copy_file(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" source_file_name = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, source_file_name, 512)\n",
"\n",
" # Basic\n",
" # Copy the file from the directory to the root of the share\n",
" source = self.service.make_file_url(share_name, directory_name, source_file_name)\n",
" copy = self.service.copy_file(share_name, None, 'file1copy', source)\n",
"\n",
" # Poll for copy completion\n",
" while copy.status != 'success':\n",
" count = count + 1\n",
" if count > 5:\n",
" print('Timed out waiting for async copy to complete.')\n",
" time.sleep(30)\n",
" copy = self.service.get_file_properties(share_name, dir_name, 'file1copy').properties.copy\n",
"\n",
" # With SAS from a remote account to local file\n",
" # Commented out as remote share, directory, file, and sas would need to be created\n",
" '''\n",
" source_file_url = self.service.make_file_url(\n",
" remote_share_name,\n",
" remote_directory_name,\n",
" remote_file_name,\n",
" sas_token=remote_sas_token,\n",
" )\n",
" copy = self.service.copy_file(destination_sharename, \n",
" destination_directory_name, \n",
" destination_file_name, \n",
" source_file_url)\n",
" '''\n",
"\n",
" # Abort copy\n",
" # Commented out as this involves timing the abort to be sent while the copy is still running\n",
" # Abort copy is useful to do along with polling\n",
" # self.service.abort_copy_file(share_name, dir_name, file_name, copy.id)\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_with_bytes(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
"\n",
" # Basic\n",
" data = self._get_random_bytes(15)\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_bytes(share_name, directory_name, file_name, data)\n",
" file = self.service.get_file_to_bytes(share_name, directory_name, file_name)\n",
" content = file.content # data\n",
"\n",
" # Download range\n",
" file = self.service.get_file_to_bytes(share_name, directory_name, file_name,\n",
" start_range=3, end_range=10)\n",
" content = file.content # data from 3-10\n",
"\n",
" # Upload from index in byte array\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_bytes(share_name, directory_name, file_name, data, index=3)\n",
"\n",
" # Content settings, metadata\n",
" settings = ContentSettings(content_type='html', content_language='fr') \n",
" metadata={'val1': 'foo', 'val2': 'blah'}\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_bytes(share_name, directory_name, file_name, data, \n",
" content_settings=settings,\n",
" metadata=metadata)\n",
" file = self.service.get_file_to_bytes(share_name, directory_name, file_name)\n",
" metadata = file.metadata # metadata={'val1': 'foo', 'val2': 'blah'}\n",
" content_language = file.properties.content_settings.content_language # fr\n",
" content_type = file.properties.content_settings.content_type # html\n",
"\n",
" # Progress\n",
" # Use slightly larger data so the chunking is more visible\n",
" data = self._get_random_bytes(8 * 1024 *1024)\n",
" def upload_callback(current, total):\n",
" print('({}, {})'.format(current, total))\n",
" def download_callback(current, total):\n",
" print('({}, {}) '.format(current, total))\n",
" file_name = self._get_file_reference()\n",
"\n",
" print('upload: ')\n",
" self.service.create_file_from_bytes(share_name, directory_name, file_name, data, \n",
" progress_callback=upload_callback)\n",
"\n",
" print('download: ')\n",
" file = self.service.get_file_to_bytes(share_name, directory_name, file_name, \n",
" progress_callback=download_callback)\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_with_stream(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
"\n",
" # Basic\n",
" input_stream = io.BytesIO(self._get_random_bytes(15))\n",
" output_stream = io.BytesIO()\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_stream(share_name, directory_name, file_name, \n",
" input_stream, 15)\n",
" file = self.service.get_file_to_stream(share_name, directory_name, file_name, \n",
" output_stream)\n",
" content_length = file.properties.content_length\n",
"\n",
" # Download range\n",
" # Content settings, metadata\n",
" # Progress\n",
" # Parallelism\n",
" # See file_with_bytes for these examples. The code will be very similar.\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_with_path(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" INPUT_FILE_PATH = 'file_input.temp.dat'\n",
" OUTPUT_FILE_PATH = 'file_output.temp.dat'\n",
"\n",
" data = self._get_random_bytes(4 * 1024)\n",
" with open(INPUT_FILE_PATH, 'wb') as stream:\n",
" stream.write(data)\n",
"\n",
" # Basic\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_path(share_name, directory_name, file_name, INPUT_FILE_PATH)\n",
" file = self.service.get_file_to_path(share_name, directory_name, file_name, OUTPUT_FILE_PATH)\n",
" content_length = file.properties.content_length\n",
"\n",
" # Open mode\n",
" # Append to the file instead of starting from the beginning\n",
" # Append streams are not seekable and so must be downloaded serially by setting max_connections=1.\n",
" file = self.service.get_file_to_path(share_name, directory_name, file_name, OUTPUT_FILE_PATH, open_mode='ab',\n",
" max_connections=1)\n",
" content_length = file.properties.content_length # will be the same, but local file length will be longer\n",
"\n",
" # Download range\n",
" # Content settings, metadata\n",
" # Progress\n",
" # Parallelism\n",
" # See file_with_bytes for these examples. The code will be very similar.\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" if os.path.isfile(INPUT_FILE_PATH):\n",
" try:\n",
" os.remove(INPUT_FILE_PATH)\n",
" except:\n",
" pass\n",
"\n",
" if os.path.isfile(OUTPUT_FILE_PATH):\n",
" try:\n",
" os.remove(OUTPUT_FILE_PATH)\n",
" except:\n",
" pass\n",
"\n",
" def file_with_text(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
"\n",
" # Basic\n",
" data = u'hello world'\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_text(share_name, directory_name, file_name, data)\n",
" file = self.service.get_file_to_text(share_name, directory_name, file_name)\n",
" content = file.content # 'hello world'\n",
"\n",
" # Encoding\n",
" text = u'hello 啊齄丂狛狜 world'\n",
" data = text.encode('utf-16')\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file_from_text(share_name, directory_name, file_name, text, 'utf-16')\n",
" file = self.service.get_file_to_text(share_name, directory_name, file_name, 'utf-16')\n",
" content = file.content # 'hello 啊齄丂狛狜 world'\n",
"\n",
" # Download range\n",
" # Content settings, metadata\n",
" # Progress\n",
" # Parallelism\n",
" # See file_with_bytes for these examples. The code will be very similar.\n",
"\n",
" self.service.delete_share(share_name)\n",
"\n",
" def file_range(self):\n",
" share_name = self._create_share()\n",
" directory_name = self._create_directory(share_name)\n",
" file_name = self._get_file_reference()\n",
" self.service.create_file(share_name, directory_name, file_name, 2048)\n",
"\n",
" # Update the file between offset 512 and 15351535\n",
" data = b'abcdefghijklmnop' * 64\n",
" self.service.update_range(share_name, directory_name, file_name, data, 512, 1535)\n",
"\n",
" # List ranges\n",
" print('list ranges: ')\n",
" ranges = self.service.list_ranges(share_name, directory_name, file_name)\n",
" for range in ranges:\n",
" print('({}, {}) '.format(range.start, range.end)) # (512, 1535)\n",
"\n",
" # Clear part of that range\n",
" self.service.clear_range(share_name, directory_name, file_name, 600, 800)\n",
"\n",
" self.service.delete_share(share_name)"
] | [
0,
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08695652173913043,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.015151515151515152,
0,
0,
0,
0.010309278350515464,
0.017391304347826087,
0,
0,
0,
0.010309278350515464,
0.017391304347826087,
0,
0,
0,
0.019417475728155338,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.011627906976744186,
0,
0,
0.009345794392523364,
0.011494252873563218,
0.024390243902439025,
0.013157894736842105,
0.016129032258064516,
0,
0,
0,
0.011494252873563218,
0.013157894736842105,
0,
0,
0,
0.009345794392523364,
0.011494252873563218,
0.023529411764705882,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0.024096385542168676,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0.016129032258064516,
0.017543859649122806,
0,
0,
0,
0,
0.009900990099009901,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0.011764705882352941,
0.02631578947368421,
0,
0,
0.011764705882352941,
0,
0.020833333333333332,
0,
0,
0,
0.01020408163265306,
0,
0,
0.012195121951219513,
0.02040816326530612,
0,
0.022222222222222223,
0.015151515151515152,
0.017241379310344827,
0.011764705882352941,
0.013157894736842105,
0.024390243902439025,
0.013157894736842105,
0,
0,
0,
0.018518518518518517,
0.022222222222222223,
0,
0.02127659574468085,
0,
0,
0,
0,
0.022222222222222223,
0.013513513513513514,
0,
0,
0.023255813953488372,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0,
0.022988505747126436,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0.00980392156862745,
0,
0,
0,
0,
0.009345794392523364,
0.00847457627118644,
0,
0.017699115044247787,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.011904761904761904,
0.02127659574468085,
0,
0,
0,
0,
0,
0.01020408163265306,
0.010638297872340425,
0.018867924528301886,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0.012345679012345678,
0,
0.013157894736842105,
0,
0,
0.012195121951219513,
0,
0.022222222222222223
] | 389 | 0.004203 | false |
elif msg.text in ["/listgroup","/Listgroup"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[=> %s\n" % (cl.getGroup(i).name)
cl.sendText(msg.to,h)
def autolike():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by Sora\n\nhttp://line.me/ti/p/%40dsd5411x")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by Sora\n\nhttp://line.me/ti/p/%40dsd5411x")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(500)
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
# -------- VIEW SEEN ----------
elif msg.text.lower() == rname+' setview':
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendMessage(msg.to, "Checkpoint checked!")
print "@setview"
elif msg.text.lower() == rname+' viewseen':
with open('dataSeen/'+group_id+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContact(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "List Viewer\n* "
grp = '\n* '.join(str(f) for f in dataResult)
total = '\n\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendMessage(msg.to, "%s %s %s" % (tukang, grp, total))
else:
cl.sendMessage(msg.to, "Belum ada viewers")
print "@viewseen"
except Exception as e:
print "ERROR : " + str(e)
# ---------------- CREATOR
elif msg.text in ["Creator","creator"]:
msg.contentType = 13
cl.sendText(msg.to, "ADD MY CREATOR BOT SAGIRI\nPROTECT GROUP\nline.me/ti/p/~alifp.sikumbang")
msg.contentMetadata = {'mid': 'uaa75cafbc718153f1b0e69998c51f4e7'}
cl.sendMessage(msg)
pass
cl.sendText(msg.to, "ADD MY STAFF BOT SAGIRI\nPROTECT GROUP")
msg.contentMetadata = {'mid': 'u3661447738b7d0766102618c427115d8'}
cl.sendMessage(msg)
# ----------------- NOTIFED MEMBER OUT GROUP
if op.type == 15:
if op.param2 in Bots:
return
kk.sendText(op.param1, "Good Bye\n(*´・ω・*)")
print "MEMBER HAS LEFT THE GROUP"
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if op.param2 in Bots:
return
kk.sendText(op.param1, "Welcome\n(*´・ω・*)")
print "MEMBER HAS JOIN THE GROUP"
# ----------------- NOTIFED PROTECT INV MEMBER GROUP
if op.type == 13:
if op.param2 in Bots:
return
if op.param2 in admin:
return
if wait ["protectinv"] == True:
try:
X = cl.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
print gInviMids + "INVITE CANCEL"
except:
try:
print "RETRY CANCEL INVITATION"
X = cl.getGroup(op.param1)
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(op.param1, gInviMids)
print gInviMids + "INVITE CANCELED"
except:
print "BOT CAN'T CANCEL THE INVITATION"
pass
# ----------------- Me By tag / Me @ and Mid By tag/ Mid @
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
# ----------------- Mid By tag/ Mid @ PART 2
if "Mid:" in msg.text:
midd = eval(msg.contentMetadata["MENTION"])
key1 = midd["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"mid:"+key1)
# ----------------- KICK MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Bye " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
# -----------------
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user")
xname = ""
for mi_d in wait["blacklist"]:
xname = cl.getContact(mi_d).displayName + "\n"
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+"\n" " "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mi_d)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
# -----------------
if op.type == 19:
if op.param3 in mid:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[mid])
except:
pass
if op.param3 in Amid:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kd.inviteIntoGroup(op.param1,[Amid])
except:
pass
if op.param3 in Bmid:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
kf.inviteIntoGroup(op.param1,[Bmid])
except:
pass
if op.param3 in Cmid:
try:
kd.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[Cmid])
except:
pass
if op.param3 in Dmid:
try:
kf.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[Dmid])
except:
pass
if op.param3 in Emid:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[Emid]
except:
pass
if op.param3 in Fmid:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[Fmid]
except:
pass
| [
"elif msg.text in [\"/listgroup\",\"/Listgroup\"]:\r\n",
" gid = cl.getGroupIdsJoined()\r\n",
" h = \"\"\r\n",
" for i in gid:\r\n",
" h += \"[=> %s\\n\" % (cl.getGroup(i).name)\r\n",
" cl.sendText(msg.to,h)\r\n",
" \r\n",
"def autolike():\r\n",
" for zx in range(0,20):\r\n",
" hasil = cl.activity(limit=20)\r\n",
" if hasil['result']['posts'][zx]['postInfo']['liked'] == False:\r\n",
" try: \r\n",
" cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)\r\n",
" cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],\"Auto Like by Sora\\n\\nhttp://line.me/ti/p/%40dsd5411x\")\r\n",
" kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)\r\n",
" kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],\"Auto Like by Sora\\n\\nhttp://line.me/ti/p/%40dsd5411x\")\r\n",
" print \"Like\"\r\n",
" except:\r\n",
" pass\r\n",
" else:\r\n",
" print \"Already Liked\"\r\n",
" time.sleep(500)\r\n",
"thread2 = threading.Thread(target=autolike)\r\n",
"thread2.daemon = True\r\n",
"thread2.start()\r\n",
"\r\n",
"# -------- VIEW SEEN ---------- \r\n",
"elif msg.text.lower() == rname+' setview':\r\n",
" subprocess.Popen(\"echo '' > dataSeen/\"+msg.to+\".txt\", shell=True, stdout=subprocess.PIPE)\r\n",
" cl.sendMessage(msg.to, \"Checkpoint checked!\")\r\n",
" print \"@setview\"\r\n",
" elif msg.text.lower() == rname+' viewseen':\r\n",
" with open('dataSeen/'+group_id+'.txt','r') as rr:\r\n",
" contactArr = rr.readlines()\r\n",
" for v in xrange(len(contactArr) -1,0,-1):\r\n",
" num = re.sub(r'\\n', \"\", contactArr[v])\r\n",
" contacts.append(num)\r\n",
" pass\r\n",
" contacts = list(set(contacts))\r\n",
" for z in range(len(contacts)):\r\n",
" arg = contacts[z].split('|')\r\n",
" userList.append(arg[0])\r\n",
" timelist.append(arg[1])\r\n",
" uL = list(set(userList))\r\n",
" for ll in range(len(uL)):\r\n",
" try:\r\n",
" getIndexUser = userList.index(uL[ll])\r\n",
" timeSeen.append(time.strftime(\"%H:%M:%S\", time.localtime(int(timelist[getIndexUser]) / 1000)))\r\n",
" recheckData.append(userList[getIndexUser])\r\n",
" except IndexError:\r\n",
" conName.append('nones')\r\n",
" pass\r\n",
" contactId = cl.getContact(recheckData)\r\n",
" for v in range(len(recheckData)):\r\n",
" dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')\r\n",
" pass\r\n",
" if len(dataResult) > 0:\r\n",
" tukang = \"List Viewer\\n* \"\r\n",
" grp = '\\n* '.join(str(f) for f in dataResult)\r\n",
" total = '\\n\\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )\r\n",
" cl.sendMessage(msg.to, \"%s %s %s\" % (tukang, grp, total))\r\n",
" else:\r\n",
" cl.sendMessage(msg.to, \"Belum ada viewers\")\r\n",
" print \"@viewseen\"\r\n",
" except Exception as e:\r\n",
" print \"ERROR : \" + str(e)\r\n",
"\r\n",
"# ---------------- CREATOR\r\n",
"elif msg.text in [\"Creator\",\"creator\"]:\r\n",
" msg.contentType = 13\r\n",
" cl.sendText(msg.to, \"ADD MY CREATOR BOT SAGIRI\\nPROTECT GROUP\\nline.me/ti/p/~alifp.sikumbang\")\r\n",
" msg.contentMetadata = {'mid': 'uaa75cafbc718153f1b0e69998c51f4e7'}\r\n",
" cl.sendMessage(msg)\r\n",
" pass\r\n",
" cl.sendText(msg.to, \"ADD MY STAFF BOT SAGIRI\\nPROTECT GROUP\")\r\n",
" msg.contentMetadata = {'mid': 'u3661447738b7d0766102618c427115d8'}\r\n",
" cl.sendMessage(msg)\r\n",
"# ----------------- NOTIFED MEMBER OUT GROUP\r\n",
" if op.type == 15:\r\n",
" if op.param2 in Bots:\r\n",
" return\r\n",
" kk.sendText(op.param1, \"Good Bye\\n(*´・ω・*)\")\r\n",
" print \"MEMBER HAS LEFT THE GROUP\"\r\n",
"# ----------------- NOTIFED MEMBER JOIN GROUP\r\n",
" if op.type == 17:\r\n",
" if op.param2 in Bots:\r\n",
" return\r\n",
" kk.sendText(op.param1, \"Welcome\\n(*´・ω・*)\")\r\n",
" print \"MEMBER HAS JOIN THE GROUP\"\r\n",
" \r\n",
"# ----------------- NOTIFED PROTECT INV MEMBER GROUP\r\n",
" if op.type == 13:\r\n",
" if op.param2 in Bots:\r\n",
" return\r\n",
" if op.param2 in admin:\r\n",
" return\r\n",
"\t if wait [\"protectinv\"] == True:\r\n",
" try:\r\n",
" X = cl.getGroup(op.param1)\r\n",
" gInviMids = [contact.mid for contact in X.invitee]\r\n",
" cl.cancelGroupInvitation(msg.to, gInviMids)\r\n",
" print gInviMids + \"INVITE CANCEL\"\r\n",
" except:\r\n",
" try:\r\n",
" print \"RETRY CANCEL INVITATION\"\r\n",
" X = cl.getGroup(op.param1)\r\n",
" gInviMids = [contact.mid for contact in X.invitee]\r\n",
" cl.cancelGroupInvitation(op.param1, gInviMids)\r\n",
" print gInviMids + \"INVITE CANCELED\"\r\n",
" except:\r\n",
" print \"BOT CAN'T CANCEL THE INVITATION\"\r\n",
" pass\r\n",
"# ----------------- Me By tag / Me @ and Mid By tag/ Mid @\r\n",
" elif \"Mid @\" in msg.text:\r\n",
" _name = msg.text.replace(\"Mid @\",\"\")\r\n",
" _nametarget = _name.rstrip(' ')\r\n",
" gs = cl.getGroup(msg.to)\r\n",
" for g in gs.members:\r\n",
" if _nametarget == g.displayName:\r\n",
" cl.sendText(msg.to, g.mid)\r\n",
" else:\r\n",
" pass\r\n",
" elif \"Me @\" in msg.text:\r\n",
" msg.contentType = 13\r\n",
" _name = msg.text.replace(\"Me @\",\"\")\r\n",
" _nametarget = _name.rstrip(' ')\r\n",
" gs = cl.getGroup(msg.to)\r\n",
" for g in gs.members:\r\n",
" if _nametarget == g.displayName:\r\n",
" msg.contentMetadata = {'mid': g.mid}\r\n",
" cl.sendMessage(msg)\r\n",
" else:\r\n",
" pass\r\n",
"# ----------------- Mid By tag/ Mid @ PART 2\r\n",
" if \"Mid:\" in msg.text:\r\n",
" midd = eval(msg.contentMetadata[\"MENTION\"])\r\n",
" key1 = midd[\"MENTIONEES\"][0][\"M\"]\r\n",
" cl.sendText(msg.to,\"mid:\"+key1)\r\n",
"# ----------------- KICK MEMBER BY TAG 2TAG ATAU 10TAG MEMBER\r\n",
" elif (\"Bye \" in msg.text):\r\n",
" if msg.from_ in admin:\r\n",
" key = eval(msg.contentMetadata[\"MENTION\"])\r\n",
" key[\"MENTIONEES\"][0][\"M\"]\r\n",
" targets = []\r\n",
" for x in key[\"MENTIONEES\"]:\r\n",
" targets.append(x[\"M\"])\r\n",
" for target in targets:\r\n",
" try:\r\n",
" cl.kickoutFromGroup(msg.to,[target])\r\n",
" except:\r\n",
" pass\r\n",
"# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER\r\n",
" elif (\"Ban \" in msg.text):\r\n",
" if msg.from_ in admin:\r\n",
" key = eval(msg.contentMetadata[\"MENTION\"])\r\n",
" key[\"MENTIONEES\"][0][\"M\"]\r\n",
" targets = []\r\n",
" for x in key[\"MENTIONEES\"]:\r\n",
" targets.append(x[\"M\"])\r\n",
" for target in targets:\r\n",
" try:\r\n",
" wait[\"blacklist\"][target] = True\r\n",
" f=codecs.open('st2__b.json','w','utf-8')\r\n",
" json.dump(wait[\"blacklist\"], f, sort_keys=True, indent=4,ensure_ascii=False)\r\n",
" cl.sendText(msg.to,\"Succes Banned\")\r\n",
" except:\r\n",
" pass\r\n",
"# -----------------\r\n",
" elif msg.text in [\"Banlist\"]:\r\n",
" if wait[\"blacklist\"] == {}:\r\n",
" cl.sendText(msg.to,\"nothing\")\r\n",
" else:\r\n",
" cl.sendText(msg.to,\"Blacklist user\")\r\n",
" xname = \"\"\r\n",
" for mi_d in wait[\"blacklist\"]:\r\n",
" xname = cl.getContact(mi_d).displayName + \"\\n\"\r\n",
" xlen = str(len(xname)+1)\r\n",
" msg.contentType = 0\r\n",
" msg.text = \"@\"+xname+\"\\n\" \" \"\r\n",
" msg.contentMetadata ={'MENTION':'{\"MENTIONEES\":[{\"S\":\"0\",\"E\":'+json.dumps(xlen)+',\"M\":'+json.dumps(mi_d)+'}]}','EMTVER':'4'}\r\n",
" try:\r\n",
" cl.sendMessage(msg)\r\n",
" except Exception as error:\r\n",
" print error\r\n",
"# -----------------\r\n",
"\tif op.type == 19:\r\n",
" if op.param3 in mid:\r\n",
" try:\r\n",
" ki.kickoutFromGroup(op.param1,[op.param2])\r\n",
" kk.inviteIntoGroup(op.param1,[mid])\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Amid:\r\n",
" try:\r\n",
" kc.kickoutFromGroup(op.param1,[op.param2])\r\n",
" kd.inviteIntoGroup(op.param1,[Amid])\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Bmid:\r\n",
" try:\r\n",
" ke.kickoutFromGroup(op.param1,[op.param2])\r\n",
" kf.inviteIntoGroup(op.param1,[Bmid])\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Cmid:\r\n",
" try:\r\n",
" kd.kickoutFromGroup(op.param1,[op.param2])\r\n",
" ke.inviteIntoGroup(op.param1,[Cmid])\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Dmid:\r\n",
" try:\r\n",
" kf.kickoutFromGroup(op.param1,[op.param2])\r\n",
" ki.inviteIntoGroup(op.param1,[Dmid])\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Emid:\r\n",
" try:\r\n",
" ki.kickoutFromGroup(op.param1,[op.param2])\r\n",
" kk.inviteIntoGroup(op.param1,[Emid]\r\n",
" except:\r\n",
" pass\r\n",
" if op.param3 in Fmid:\r\n",
" try:\r\n",
" ke.kickoutFromGroup(op.param1,[op.param2])\r\n",
" cl.inviteIntoGroup(op.param1,[Fmid]\r\n",
" except:\r\n",
" pass\r\n"
] | [
0.02127659574468085,
0.021739130434782608,
0,
0,
0,
0.02564102564102564,
0.05555555555555555,
0.058823529411764705,
0.03571428571428571,
0,
0.014705882352941176,
0.06666666666666667,
0.022222222222222223,
0.01675977653631285,
0.022222222222222223,
0.01675977653631285,
0,
0.058823529411764705,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0.029411764705882353,
0,
0.018691588785046728,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 228 | 0.002302 | false |
import wsgi_intercept.webtest_intercept
from applications.sahana.modules.s3.s3test import WSGI_Test
class CR_Test(WSGI_Test):
"""
Extended version of WSGI_Test for Shelter Registry.
Designed to be run from Web2Py shell
Copy to modules folder & run as:
from applications.sahana.modules.test_cr import *
test_cr(db)
NB This doesn't yet work: NameError: global name 'create_fn' is not defined
"""
module = "cr"
def __init__(self, db):
WSGI_Test.__init__(self, db)
return
def setUp(self,db):
"""
Populate Shelter Registry with Test Data to make each test case separable
Intercept WSGI calls to do in-process testing
"""
# Not working :/
#db = self._globals()
#self.setUpShelter(db)
wsgi_intercept.add_wsgi_intercept(self.HOST, self.PORT, create_fn)
return
def runTest(self,db):
""" Unit Test all functions within Shelter Registry """
self.runTestLogin()
self.runTestShelter()
return
def runTestLogin(self):
if "200 OK" in test.getPage("/sahana/%s/login" % module):
test.assertHeader("Content-Type", "text/html")
test.assertInBody("Login")
return
def runTestShelter(self):
resource = "shelter"
if "200 OK" in test.getPage("/sahana/%s/%s" % (module, resource)):
test.assertHeader("Content-Type", "text/html")
test.assertInBody("List Shelters")
# Need to login
#if "200 OK" in test.getPage("/sahana/%s/%s/create" % (module, resource)):
# test.assertHeader("Content-Type", "text/html")
# test.assertInBody("Add Shelter")
if "200 OK" in test.getPage("/sahana/%s/%s?format=json" % (module, resource)):
test.assertHeader("Content-Type", "text/html")
test.assertInBody("[")
if "200 OK" in test.getPage("/sahana/%s/%s?format=csv" % (module, resource)):
test.assertHeader("Content-Type", "text/csv")
return
def setUpShelter(self, db):
""" Create test Data for Shelter Registry """
resource = "shelter"
table = module + "_" + resource
if not len(db().select(db[table].ALL)):
db[table].insert(
name = "Test Shelter",
description = "Just a test",
location_id = 1,
person_id = 1,
address = "52 Test Street",
capacity = 100,
#dwellings=10,
#persons_per_dwelling=10,
#area="1 sq km"
)
return
def test_cr(db):
test = CR_Test(db)
test.setUp(db)
test.runTest(db)
return
if __name__ == "__main__":
test_cr(db)
| [
"import wsgi_intercept.webtest_intercept\n",
"from applications.sahana.modules.s3.s3test import WSGI_Test\n",
"\n",
"class CR_Test(WSGI_Test):\n",
" \"\"\"\n",
" Extended version of WSGI_Test for Shelter Registry.\n",
" Designed to be run from Web2Py shell\n",
" Copy to modules folder & run as:\n",
" from applications.sahana.modules.test_cr import *\n",
" test_cr(db)\n",
" NB This doesn't yet work: NameError: global name 'create_fn' is not defined\n",
" \"\"\"\n",
" \n",
" module = \"cr\"\n",
"\n",
" def __init__(self, db):\n",
" WSGI_Test.__init__(self, db)\n",
" return\n",
" \n",
" def setUp(self,db):\n",
" \"\"\"\n",
" Populate Shelter Registry with Test Data to make each test case separable\n",
" Intercept WSGI calls to do in-process testing\n",
" \"\"\"\n",
" # Not working :/\n",
" #db = self._globals()\n",
" #self.setUpShelter(db)\n",
" wsgi_intercept.add_wsgi_intercept(self.HOST, self.PORT, create_fn)\n",
" return\n",
"\n",
" def runTest(self,db):\n",
" \"\"\" Unit Test all functions within Shelter Registry \"\"\"\n",
" self.runTestLogin()\n",
" self.runTestShelter()\n",
" return\n",
" \n",
" def runTestLogin(self):\n",
" if \"200 OK\" in test.getPage(\"/sahana/%s/login\" % module):\n",
" test.assertHeader(\"Content-Type\", \"text/html\")\n",
" test.assertInBody(\"Login\")\n",
" return\n",
"\n",
" def runTestShelter(self):\n",
" resource = \"shelter\"\n",
" if \"200 OK\" in test.getPage(\"/sahana/%s/%s\" % (module, resource)):\n",
" test.assertHeader(\"Content-Type\", \"text/html\")\n",
" test.assertInBody(\"List Shelters\")\n",
" # Need to login\n",
" #if \"200 OK\" in test.getPage(\"/sahana/%s/%s/create\" % (module, resource)):\n",
" # test.assertHeader(\"Content-Type\", \"text/html\")\n",
" # test.assertInBody(\"Add Shelter\")\n",
" if \"200 OK\" in test.getPage(\"/sahana/%s/%s?format=json\" % (module, resource)):\n",
" test.assertHeader(\"Content-Type\", \"text/html\")\n",
" test.assertInBody(\"[\")\n",
" if \"200 OK\" in test.getPage(\"/sahana/%s/%s?format=csv\" % (module, resource)):\n",
" test.assertHeader(\"Content-Type\", \"text/csv\")\n",
" return\n",
"\n",
" def setUpShelter(self, db):\n",
" \"\"\" Create test Data for Shelter Registry \"\"\"\n",
" resource = \"shelter\"\n",
" table = module + \"_\" + resource\n",
" if not len(db().select(db[table].ALL)):\n",
" db[table].insert(\n",
" name = \"Test Shelter\",\n",
" description = \"Just a test\",\n",
" location_id = 1,\n",
" person_id = 1,\n",
" address = \"52 Test Street\",\n",
" capacity = 100,\n",
" #dwellings=10,\n",
" #persons_per_dwelling=10,\n",
" #area=\"1 sq km\"\n",
" )\n",
" return\n",
"\n",
"def test_cr(db):\n",
" test = CR_Test(db)\n",
" test.setUp(db)\n",
" test.runTest(db)\n",
" return\n",
" \n",
"if __name__ == \"__main__\":\n",
" test_cr(db)\n"
] | [
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0.2,
0,
0,
0,
0,
0,
0.1111111111111111,
0.041666666666666664,
0,
0.011627906976744186,
0,
0,
0,
0.03333333333333333,
0.03225806451612903,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024096385542168676,
0,
0,
0.011494252873563218,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05128205128205128,
0.044444444444444446,
0.06060606060606061,
0.06451612903225806,
0.045454545454545456,
0.0625,
0.03225806451612903,
0.023809523809523808,
0.03125,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.07692307692307693,
0.037037037037037035,
0
] | 84 | 0.01613 | false |
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: disable=line-too-long
import azure.cli.core.commands.parameters #pylint: disable=unused-import
from azure.cli.core.commands import CliArgumentType
from azure.cli.core.commands import register_cli_argument
register_cli_argument('ad app', 'application_object_id', options_list=('--object-id',))
register_cli_argument('ad app', 'display_name', help=' the display name of the application')
register_cli_argument('ad app', 'homepage', help='the url where users can sign in and use your app.')
register_cli_argument('ad app', 'identifier', options_list=('--id',), help='identifier uri, application id, or object id')
register_cli_argument('ad app', 'identifier_uris', nargs='+', help='space separated unique URIs that Azure AD can use for this app.')
register_cli_argument('ad app', 'reply_urls', nargs='+',
help='space separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request. The value does not need to be a physical endpoint, but must be a valid URI.')
register_cli_argument('ad app', 'start_date', help='the start date after which password or key would be valid. Default value is current time')
register_cli_argument('ad app', 'end_date', help='the end date till which password or key is valid. Default value is one year after current time')
register_cli_argument('ad app', 'key_value', help='the value for the key credentials associated with the application')
register_cli_argument('ad app', 'key_type', choices=['AsymmetricX509Cert', 'Password', 'Symmetric'], default='AsymmetricX509Cert',
help='the type of the key credentials associated with the application')
register_cli_argument('ad app', 'key_usage', choices=['Sign', 'Verify'], default='Verify',
help='the usage of the key credentials associated with the application.')
sp_name_type = CliArgumentType(
options_list=('--name', '-n')
)
register_cli_argument('ad sp', 'identifier', options_list=('--id',), help='service principal name, or object id')
register_cli_argument('ad sp create', 'identifier', options_list=('--id',), help='identifier uri, application id, or object id of the associated application')
register_cli_argument('ad sp create-for-rbac', 'name', sp_name_type)
register_cli_argument('ad sp create-for-rbac', 'years', type=int)
register_cli_argument('ad sp create-for-rbac', 'scopes', nargs='+')
register_cli_argument('ad sp reset-credentials', 'name', sp_name_type)
register_cli_argument('ad sp reset-credentials', 'years', type=int)
register_cli_argument('ad', 'display_name', help='object\'s display name or its prefix')
register_cli_argument('ad', 'identifier_uri',
help='graph application identifier, must be in uri format')
register_cli_argument('ad', 'spn', help='service principal name')
register_cli_argument('ad', 'upn', help='user principal name, e.g. john.doe@contoso.com')
register_cli_argument('ad', 'query_filter', options_list=('--filter',), help='OData filter')
register_cli_argument('ad user', 'mail_nickname',
help='mail alias. Defaults to user principal name')
register_cli_argument('ad user', 'force_change_password_next_login', action='store_true')
register_cli_argument('role assignment', 'role_assignment_name',
options_list=('--role-assignment-name', '-n'))
register_cli_argument('role assignment', 'role', help='role name or id')
register_cli_argument('role assignment', 'show_all', options_list=('--all',), action='store_true',
help='show all assignments under the current subscription')
register_cli_argument('role assignment', 'include_inherited', action='store_true',
help='include assignments applied on parent scopes')
register_cli_argument('role assignment', 'assignee', help='represent a user, group, or service principal. supported format: object id, user sign-in name, or service principal name')
register_cli_argument('role assignment', 'ids', nargs='+', help='space separated role assignment ids')
register_cli_argument('role', 'role_id', help='the role id')
register_cli_argument('role', 'resource_group_name', options_list=('--resource-group', '-g'),
help='use it only if the role or assignment was added at the level of a resource group')
register_cli_argument('role', 'scope', help='scope at which this role assignment applies to, e.g., /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333, /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup, or /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup/providers/Microsoft.Compute/virtualMachines/myVM')
register_cli_argument('role', 'custom_role_only', action='store_true', help='custom roles only(vs. build-in ones)')
register_cli_argument('role', 'role_definition', help="json formatted content which defines the new role. run 'show-create-template' to get samples")
register_cli_argument('role', 'name', options_list=('--name', '-n'), help="the role's logical name")
| [
"#---------------------------------------------------------------------------------------------\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
"# Licensed under the MIT License. See License.txt in the project root for license information.\n",
"#---------------------------------------------------------------------------------------------\n",
"#pylint: disable=line-too-long\n",
"import azure.cli.core.commands.parameters #pylint: disable=unused-import\n",
"from azure.cli.core.commands import CliArgumentType\n",
"from azure.cli.core.commands import register_cli_argument\n",
"\n",
"register_cli_argument('ad app', 'application_object_id', options_list=('--object-id',))\n",
"register_cli_argument('ad app', 'display_name', help=' the display name of the application')\n",
"register_cli_argument('ad app', 'homepage', help='the url where users can sign in and use your app.')\n",
"register_cli_argument('ad app', 'identifier', options_list=('--id',), help='identifier uri, application id, or object id')\n",
"register_cli_argument('ad app', 'identifier_uris', nargs='+', help='space separated unique URIs that Azure AD can use for this app.')\n",
"register_cli_argument('ad app', 'reply_urls', nargs='+',\n",
" help='space separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request. The value does not need to be a physical endpoint, but must be a valid URI.')\n",
"register_cli_argument('ad app', 'start_date', help='the start date after which password or key would be valid. Default value is current time')\n",
"register_cli_argument('ad app', 'end_date', help='the end date till which password or key is valid. Default value is one year after current time')\n",
"register_cli_argument('ad app', 'key_value', help='the value for the key credentials associated with the application')\n",
"register_cli_argument('ad app', 'key_type', choices=['AsymmetricX509Cert', 'Password', 'Symmetric'], default='AsymmetricX509Cert',\n",
" help='the type of the key credentials associated with the application')\n",
"register_cli_argument('ad app', 'key_usage', choices=['Sign', 'Verify'], default='Verify',\n",
" help='the usage of the key credentials associated with the application.')\n",
"\n",
"sp_name_type = CliArgumentType(\n",
" options_list=('--name', '-n')\n",
")\n",
"register_cli_argument('ad sp', 'identifier', options_list=('--id',), help='service principal name, or object id')\n",
"register_cli_argument('ad sp create', 'identifier', options_list=('--id',), help='identifier uri, application id, or object id of the associated application')\n",
"register_cli_argument('ad sp create-for-rbac', 'name', sp_name_type)\n",
"register_cli_argument('ad sp create-for-rbac', 'years', type=int)\n",
"register_cli_argument('ad sp create-for-rbac', 'scopes', nargs='+')\n",
"register_cli_argument('ad sp reset-credentials', 'name', sp_name_type)\n",
"register_cli_argument('ad sp reset-credentials', 'years', type=int)\n",
"\n",
"register_cli_argument('ad', 'display_name', help='object\\'s display name or its prefix')\n",
"register_cli_argument('ad', 'identifier_uri',\n",
" help='graph application identifier, must be in uri format')\n",
"register_cli_argument('ad', 'spn', help='service principal name')\n",
"register_cli_argument('ad', 'upn', help='user principal name, e.g. john.doe@contoso.com')\n",
"register_cli_argument('ad', 'query_filter', options_list=('--filter',), help='OData filter')\n",
"register_cli_argument('ad user', 'mail_nickname',\n",
" help='mail alias. Defaults to user principal name')\n",
"register_cli_argument('ad user', 'force_change_password_next_login', action='store_true')\n",
"\n",
"register_cli_argument('role assignment', 'role_assignment_name',\n",
" options_list=('--role-assignment-name', '-n'))\n",
"register_cli_argument('role assignment', 'role', help='role name or id')\n",
"register_cli_argument('role assignment', 'show_all', options_list=('--all',), action='store_true',\n",
" help='show all assignments under the current subscription')\n",
"register_cli_argument('role assignment', 'include_inherited', action='store_true',\n",
" help='include assignments applied on parent scopes')\n",
"register_cli_argument('role assignment', 'assignee', help='represent a user, group, or service principal. supported format: object id, user sign-in name, or service principal name')\n",
"register_cli_argument('role assignment', 'ids', nargs='+', help='space separated role assignment ids')\n",
"register_cli_argument('role', 'role_id', help='the role id')\n",
"register_cli_argument('role', 'resource_group_name', options_list=('--resource-group', '-g'),\n",
" help='use it only if the role or assignment was added at the level of a resource group')\n",
"register_cli_argument('role', 'scope', help='scope at which this role assignment applies to, e.g., /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333, /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup, or /subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup/providers/Microsoft.Compute/virtualMachines/myVM')\n",
"register_cli_argument('role', 'custom_role_only', action='store_true', help='custom roles only(vs. build-in ones)')\n",
"register_cli_argument('role', 'role_definition', help=\"json formatted content which defines the new role. run 'show-create-template' to get samples\")\n",
"register_cli_argument('role', 'name', options_list=('--name', '-n'), help=\"the role's logical name\")\n",
"\n"
] | [
0.021052631578947368,
0,
0.010526315789473684,
0.021052631578947368,
0.03225806451612903,
0.0273972602739726,
0,
0,
0,
0.011363636363636364,
0.010752688172043012,
0.00980392156862745,
0.008130081300813009,
0.007462686567164179,
0,
0.00510204081632653,
0.006993006993006993,
0.006802721088435374,
0.008403361344537815,
0.007633587786259542,
0.010638297872340425,
0.01098901098901099,
0.010416666666666666,
0,
0,
0,
0,
0.008771929824561403,
0.006289308176100629,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0.012195121951219513,
0,
0.011111111111111112,
0.010752688172043012,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0.010101010101010102,
0.012195121951219513,
0.012048192771084338,
0,
0.005494505494505495,
0.009708737864077669,
0,
0.010638297872340425,
0.009009009009009009,
0.0028011204481792717,
0.008620689655172414,
0.006666666666666667,
0.009900990099009901,
1
] | 62 | 0.022507 | false |
# coding:utf-8
'''
sudoku augmented reality v1.0 with python
solver from web
winxos 2016-03-02
'''
import cv2
import numpy as np
import time
from datetime import datetime
import math
from sudokuSlove import Sudoku
digits=[0 for i in range(10)]
for i in range(9):
digits[i]=cv2.imread("%d.png"%(i+1),0)
def find(img):
#t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
m=0
ans=0
for i in xrange(9):
res=cv2.matchTemplate(digits[i],img,cv2.TM_CCOEFF_NORMED)
if res>m:
m=res
ans= i+1
#print m,ans
if m[0]<0.5:
print "recognition miss ",m[0],ans
ans=0
return "%d"%ans
def load_args(data):
a = [None] * 9
for i, d in enumerate(data.split()):
a[i] = [int(c) for c in d]
return a
class board:
mask = None
board_field = None
showflag = True
solved=False
img_ans=None
sum=0 #for fps
pt=0 #for fps
def __init__(self):
self.cam = cv2.VideoCapture(0)
self.cam.set(3, 1280)
self.cam.set(4, 720)
w = self.cam.get(3)
h = self.cam.get(4)
print w, h
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print (x, y)
if x < 100 and y < 100: #
im = None
if self.showflag:
im = self.img
else:
im = self.hsv
if im != None: #
cv2.imwrite(datetime.now().strftime(
"%m%d%H%M%S") + ".png", im) #
print "save png file to:\n", datetime.now().strftime("%m%d%H%M%S") + ".png"
def exact_img(self, win, img, cnt):#warp exact
if len(cnt) < 4:
return
pts = cnt.reshape(4, 2)
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
ratio = max(maxHeight, maxWidth) / 600.0
#w, h = int(maxWidth / ratio), int(maxHeight / ratio)
w, h = int(maxWidth / ratio), int(maxWidth / ratio)
dst = np.array([
[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
return cv2.warpPerspective(img, M, (w, h))
def add_warp_img(self,src,sub,cnt):
r,c,_=src.shape
pts = cnt.reshape(4, 2)
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
M = cv2.getPerspectiveTransform(np.array([[0,0],[599,0],[599,599],[0,599]],np.float32), rect)
return cv2.warpPerspective(sub, M,(c,r))
def get_state(self,img):
t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
w = img.shape[1]
h = img.shape[0]
grids=9
gaph=h//grids
gapw=w//grids
for i in range(grids-1):
cv2.line(img,(0,(i+1)*gaph),(w,(i+1)*gaph),(0,0,255),2)
cv2.line(img,(0+(i+1)*gapw,0),((i+1)*gapw,h),(0,0,255),2)
margin=10
ans=""
for i in xrange(9):
for j in xrange(9):
if cv2.countNonZero(img[i*gapw+margin:i*gapw+gapw-margin,j*gapw+margin:j*gapw+gapw-margin])<2100:
s=img[i*gapw+margin:i*gapw+gapw-margin,j*gapw+margin:j*gapw+gapw-margin]
mask = cv2.bitwise_not(s)
kernel = np.ones((5,5), np.uint8)
mask = cv2.dilate(mask, kernel)
mask = cv2.erode(mask, kernel)
kernel = np.ones((3,3), np.uint8)
mask = cv2.dilate(mask, kernel)
#mask = cv2.erode(mask, kernel)
#cv2.imshow("m%d"%(i*9+j+1),mask)
cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(cnts[0])
sub=cv2.resize(s[y:y+h,x:x+w],(40,40))
#cv2.imshow("%d"%(i*9+j+1),sub)
#print "%d"%(i*9+j+1),w,h,
ans+=find(sub)
#cv2.resizeWindow("m%d"%(i*9+j+1),200,100)
else:
ans+="0"
ans+=" "
#cv2.imshow("after", img)
return ans
def add_numbers(self,img,n,ans):#draw number
w = img.shape[1]
h = img.shape[0]
grids=9
gap=h//grids
margin=10
for i in xrange(9):
for j in xrange(9):
if ans[j*10+i]=="0":
cv2.putText(img, n[j*9+i],(i*gap+margin+gap/5, j*gap+margin+gap/2), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,255, 255), 3)
def find_contours(self, img):#find main board
img = cv2.GaussianBlur(img, (3, 3), 1) #
t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
#t, img = cv2.threshold(img,97,255,cv2.THRESH_BINARY)
#img = cv2.Canny(img, 100, 250) #
kernel = np.ones((1,1), np.uint8)
img = cv2.erode(img, kernel)
#img = cv2.dilate(img, kernel)
#img = cv2.erode(img, kernel)
#img=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
cnts, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[1:2]
cv2.drawContours(self.img, cnts,-1, (0,0,255),2)
for cnt in cnts:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.01 * cnt_len, True) #
if len(cnt)==4 and cv2.isContourConvex(cnt) and cv2.contourArea(cnt) > 50000: # 4 cv2.contourArea(cnt)
x0, y0, w0, h0 = cv2.boundingRect(cnt)
if not self.solved and x0>10 and y0>10:
ans=self.get_state(self.exact_img("sudoku",self.gray,cnt))
if ans!="":
if not self.solved:
s=Sudoku(load_args(ans))
if not s.solve():
print "can't solved"
self.solved=False
else:
s=s.dump()
print ans," > ",s
sub=np.zeros((600,600,3), np.uint8)
self.add_numbers(sub,s,ans)
sub = cv2.GaussianBlur(sub, (1, 1), 0)
self.img_ans=self.add_warp_img(self.img,sub,cnt)
else:
self.solved=False
break
else:
self.solved=False
self.img_ans=None
def get_fps(self, t):
self.sum += t
self.pt += 1
if self.pt > 100:
self.pt = 1
self.sum = t
return int(self.pt / self.sum)
def run(self): #
while True:
st = time.clock()
ret, self.img = self.cam.read() #
try:
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.find_contours(self.gray)
cv2.putText(self.img, "fps:" + str(self.get_fps((time.clock() - st))),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 250, 0), 1)
key = cv2.waitKey(20)
if(key == 27):
break #
if self.showflag: #
if self.img_ans!=None:
ret, mask = cv2.threshold(cv2.cvtColor(self.img_ans,cv2.COLOR_BGR2GRAY), 1, 255, cv2.THRESH_BINARY)
mask_inv=cv2.bitwise_not(mask) #
img1_bg = cv2.bitwise_and(self.img,self.img,mask = mask_inv)#
img2_fg = cv2.bitwise_and(self.img_ans,self.img_ans,mask = mask) #
self.img=cv2.add(img1_bg,img2_fg)
self.solved=True
cv2.imshow("sudoku ar v0.1", self.img)
cv2.setMouseCallback("sudoku ar v0.1", self.on_mouse) #
except Exception,e:
print(e)
self.cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
b = board()
b.run()
| [
"# coding:utf-8\n",
"'''\n",
"sudoku augmented reality v1.0 with python\n",
"solver from web\n",
"winxos 2016-03-02\n",
"'''\n",
"import cv2\n",
"import numpy as np\n",
"import time\n",
"from datetime import datetime\n",
"import math\n",
"from sudokuSlove import Sudoku\n",
"digits=[0 for i in range(10)]\n",
"for i in range(9):\n",
" digits[i]=cv2.imread(\"%d.png\"%(i+1),0)\n",
"def find(img):\n",
" #t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)\n",
" m=0\n",
" ans=0\n",
" for i in xrange(9):\n",
" res=cv2.matchTemplate(digits[i],img,cv2.TM_CCOEFF_NORMED)\n",
" if res>m:\n",
" m=res\n",
" ans= i+1\n",
" #print m,ans\n",
" if m[0]<0.5:\n",
" print \"recognition miss \",m[0],ans\n",
" ans=0\n",
" return \"%d\"%ans\n",
"def load_args(data):\n",
" a = [None] * 9\n",
" for i, d in enumerate(data.split()):\n",
" a[i] = [int(c) for c in d]\n",
" return a\n",
"class board:\n",
" mask = None\n",
" board_field = None\n",
" showflag = True\n",
" solved=False\n",
" img_ans=None\n",
" sum=0 #for fps\n",
" pt=0 #for fps\n",
" def __init__(self):\n",
" self.cam = cv2.VideoCapture(0)\n",
" self.cam.set(3, 1280)\n",
" self.cam.set(4, 720)\n",
" w = self.cam.get(3)\n",
" h = self.cam.get(4)\n",
" print w, h\n",
"\n",
" def on_mouse(self, event, x, y, flags, param):\n",
" if event == cv2.EVENT_LBUTTONDOWN:\n",
" print (x, y)\n",
" if x < 100 and y < 100: #\n",
" im = None\n",
" if self.showflag:\n",
" im = self.img\n",
" else:\n",
" im = self.hsv\n",
" if im != None: #\n",
" cv2.imwrite(datetime.now().strftime(\n",
" \"%m%d%H%M%S\") + \".png\", im) #\n",
" print \"save png file to:\\n\", datetime.now().strftime(\"%m%d%H%M%S\") + \".png\"\n",
"\n",
" def exact_img(self, win, img, cnt):#warp exact\n",
" if len(cnt) < 4:\n",
" return\n",
" pts = cnt.reshape(4, 2)\n",
" rect = np.zeros((4, 2), dtype=\"float32\")\n",
" s = pts.sum(axis=1)\n",
" rect[0] = pts[np.argmin(s)]\n",
" rect[2] = pts[np.argmax(s)]\n",
" diff = np.diff(pts, axis=1)\n",
" rect[1] = pts[np.argmin(diff)]\n",
" rect[3] = pts[np.argmax(diff)]\n",
" (tl, tr, br, bl) = rect\n",
" widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n",
" widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n",
" heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n",
" heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n",
"\n",
" maxWidth = max(int(widthA), int(widthB))\n",
" maxHeight = max(int(heightA), int(heightB))\n",
" ratio = max(maxHeight, maxWidth) / 600.0\n",
" #w, h = int(maxWidth / ratio), int(maxHeight / ratio)\n",
" w, h = int(maxWidth / ratio), int(maxWidth / ratio)\n",
" dst = np.array([\n",
" [0, 0],\n",
" [w - 1, 0],\n",
" [w - 1, h - 1],\n",
" [0, h - 1]], dtype=\"float32\")\n",
" M = cv2.getPerspectiveTransform(rect, dst)\n",
" return cv2.warpPerspective(img, M, (w, h))\n",
" def add_warp_img(self,src,sub,cnt):\n",
" r,c,_=src.shape\n",
" pts = cnt.reshape(4, 2)\n",
" rect = np.zeros((4, 2), dtype=\"float32\")\n",
" s = pts.sum(axis=1)\n",
" rect[0] = pts[np.argmin(s)]\n",
" rect[2] = pts[np.argmax(s)]\n",
" diff = np.diff(pts, axis=1)\n",
" rect[1] = pts[np.argmin(diff)]\n",
" rect[3] = pts[np.argmax(diff)]\n",
" M = cv2.getPerspectiveTransform(np.array([[0,0],[599,0],[599,599],[0,599]],np.float32), rect)\n",
" return cv2.warpPerspective(sub, M,(c,r))\n",
" def get_state(self,img):\n",
" t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)\n",
" w = img.shape[1]\n",
" h = img.shape[0]\n",
" grids=9\n",
" gaph=h//grids\n",
" gapw=w//grids\n",
" for i in range(grids-1):\n",
" cv2.line(img,(0,(i+1)*gaph),(w,(i+1)*gaph),(0,0,255),2)\n",
" cv2.line(img,(0+(i+1)*gapw,0),((i+1)*gapw,h),(0,0,255),2)\n",
" margin=10\n",
" ans=\"\"\n",
" for i in xrange(9):\n",
" for j in xrange(9):\n",
" if cv2.countNonZero(img[i*gapw+margin:i*gapw+gapw-margin,j*gapw+margin:j*gapw+gapw-margin])<2100:\n",
" s=img[i*gapw+margin:i*gapw+gapw-margin,j*gapw+margin:j*gapw+gapw-margin]\n",
" mask = cv2.bitwise_not(s)\n",
" kernel = np.ones((5,5), np.uint8)\n",
" mask = cv2.dilate(mask, kernel)\n",
" mask = cv2.erode(mask, kernel)\n",
" kernel = np.ones((3,3), np.uint8)\n",
" mask = cv2.dilate(mask, kernel)\n",
" #mask = cv2.erode(mask, kernel)\n",
" #cv2.imshow(\"m%d\"%(i*9+j+1),mask)\n",
" cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" x, y, w, h = cv2.boundingRect(cnts[0])\n",
" sub=cv2.resize(s[y:y+h,x:x+w],(40,40))\n",
" #cv2.imshow(\"%d\"%(i*9+j+1),sub)\n",
" #print \"%d\"%(i*9+j+1),w,h,\n",
" ans+=find(sub)\n",
" #cv2.resizeWindow(\"m%d\"%(i*9+j+1),200,100)\n",
" else:\n",
" ans+=\"0\"\n",
" ans+=\" \"\n",
" #cv2.imshow(\"after\", img)\n",
" return ans\n",
" def add_numbers(self,img,n,ans):#draw number\n",
" w = img.shape[1]\n",
" h = img.shape[0]\n",
" grids=9\n",
" gap=h//grids\n",
" margin=10\n",
" for i in xrange(9):\n",
" for j in xrange(9):\n",
" if ans[j*10+i]==\"0\":\n",
" cv2.putText(img, n[j*9+i],(i*gap+margin+gap/5, j*gap+margin+gap/2), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,255, 255), 3) \n",
" def find_contours(self, img):#find main board\n",
" img = cv2.GaussianBlur(img, (3, 3), 1) #\n",
" t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)\n",
" #t, img = cv2.threshold(img,97,255,cv2.THRESH_BINARY)\n",
" #img = cv2.Canny(img, 100, 250) #\n",
" kernel = np.ones((1,1), np.uint8)\n",
" img = cv2.erode(img, kernel)\n",
" #img = cv2.dilate(img, kernel)\n",
" #img = cv2.erode(img, kernel)\n",
" #img=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n",
" cnts, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
" cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[1:2]\n",
" cv2.drawContours(self.img, cnts,-1, (0,0,255),2)\n",
" for cnt in cnts:\n",
" cnt_len = cv2.arcLength(cnt, True)\n",
" cnt = cv2.approxPolyDP(cnt, 0.01 * cnt_len, True) #\n",
" if len(cnt)==4 and cv2.isContourConvex(cnt) and cv2.contourArea(cnt) > 50000: # 4 cv2.contourArea(cnt)\n",
" x0, y0, w0, h0 = cv2.boundingRect(cnt)\n",
" if not self.solved and x0>10 and y0>10:\n",
" ans=self.get_state(self.exact_img(\"sudoku\",self.gray,cnt))\n",
" if ans!=\"\":\n",
" if not self.solved:\n",
" s=Sudoku(load_args(ans))\n",
" if not s.solve():\n",
" print \"can't solved\"\n",
" self.solved=False\n",
" else:\n",
" s=s.dump()\n",
" print ans,\" > \",s\n",
" sub=np.zeros((600,600,3), np.uint8)\n",
" self.add_numbers(sub,s,ans)\n",
" sub = cv2.GaussianBlur(sub, (1, 1), 0)\n",
" self.img_ans=self.add_warp_img(self.img,sub,cnt)\n",
" else:\n",
" self.solved=False\n",
" break\n",
" else:\n",
" self.solved=False\n",
" self.img_ans=None\n",
" def get_fps(self, t):\n",
" self.sum += t\n",
" self.pt += 1\n",
" if self.pt > 100:\n",
" self.pt = 1\n",
" self.sum = t\n",
" return int(self.pt / self.sum)\n",
"\n",
" def run(self): #\n",
" while True:\n",
" st = time.clock()\n",
" ret, self.img = self.cam.read() #\n",
" try:\n",
" self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n",
" self.find_contours(self.gray)\n",
" cv2.putText(self.img, \"fps:\" + str(self.get_fps((time.clock() - st))),\n",
" (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 250, 0), 1)\n",
" key = cv2.waitKey(20)\n",
" if(key == 27):\n",
" break #\n",
" if self.showflag: #\n",
" if self.img_ans!=None:\n",
" ret, mask = cv2.threshold(cv2.cvtColor(self.img_ans,cv2.COLOR_BGR2GRAY), 1, 255, cv2.THRESH_BINARY)\n",
" mask_inv=cv2.bitwise_not(mask) #\n",
" img1_bg = cv2.bitwise_and(self.img,self.img,mask = mask_inv)#\n",
" img2_fg = cv2.bitwise_and(self.img_ans,self.img_ans,mask = mask) #\n",
" self.img=cv2.add(img1_bg,img2_fg)\n",
" self.solved=True\n",
" cv2.imshow(\"sudoku ar v0.1\", self.img)\n",
" cv2.setMouseCallback(\"sudoku ar v0.1\", self.on_mouse) #\n",
" except Exception,e:\n",
" print(e)\n",
" self.cam.release()\n",
" cv2.destroyAllWindows()\n",
"\n",
"if __name__ == '__main__':\n",
" b = board()\n",
" b.run()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.06976744186046512,
0.06666666666666667,
0.01818181818181818,
0.125,
0.1,
0,
0.045454545454545456,
0.05555555555555555,
0.05555555555555555,
0.047619047619047616,
0.058823529411764705,
0.058823529411764705,
0.046511627906976744,
0.07142857142857142,
0.05,
0.047619047619047616,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.058823529411764705,
0.058823529411764705,
0.15789473684210525,
0.16666666666666666,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0.010416666666666666,
0,
0.0392156862745098,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.125,
0,
0,
0,
0,
0,
0,
0,
0,
0.08823529411764706,
0.04081632653061224,
0.06896551724137931,
0.05172413793103448,
0,
0,
0.0625,
0.045454545454545456,
0.045454545454545456,
0,
0.11764705882352941,
0.11428571428571428,
0.05555555555555555,
0.06666666666666667,
0,
0,
0.02631578947368421,
0.03225806451612903,
0,
0.018518518518518517,
0,
0,
0.018518518518518517,
0,
0.019230769230769232,
0.018518518518518517,
0.010309278350515464,
0,
0.06779661016949153,
0.019230769230769232,
0.02127659574468085,
0.02857142857142857,
0.015873015873015872,
0,
0.034482758620689655,
0.047619047619047616,
0.029411764705882353,
0,
0.12244897959183673,
0,
0,
0.0625,
0.047619047619047616,
0.05555555555555555,
0,
0,
0.02702702702702703,
0.024539877300613498,
0.06,
0,
0.05172413793103448,
0.016129032258064516,
0.023255813953488372,
0.023809523809523808,
0,
0.02564102564102564,
0.02631578947368421,
0.016666666666666666,
0.011494252873563218,
0,
0.07017543859649122,
0,
0,
0,
0.025423728813559324,
0,
0.03571428571428571,
0.0379746835443038,
0.03125,
0,
0.018867924528301886,
0,
0,
0.02,
0,
0.023255813953488372,
0.04,
0.04411764705882353,
0.03333333333333333,
0,
0.04938271604938271,
0,
0.023809523809523808,
0,
0,
0.029411764705882353,
0.029411764705882353,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.012048192771084338,
0,
0,
0,
0,
0.046511627906976744,
0.016129032258064516,
0.03508771929824561,
0.06976744186046512,
0.06593406593406594,
0.034482758620689655,
0.024390243902439025,
0,
0,
0.03125,
0,
0,
0,
0,
0.037037037037037035,
0,
0
] | 228 | 0.020026 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
# NOTE: these keys are fake, but valid base-64 data, they were generated using:
# base64.b64encode(os.urandom(32))
AUTH_TYPE_SAS = "sas"
AUTH_TYPE_ACS = "acs"
SERVICEBUS_AUTH_TYPE = AUTH_TYPE_SAS
SERVICEBUS_NAME = "fakesbnamespace"
SERVICEBUS_ACS_KEY = "u4T5Ue9OBB35KAlAkoVVc6Tcr/a+Ei4Vl9o7wcyQuPY="
SERVICEBUS_SAS_KEY_NAME = "RootManageSharedAccessKey"
SERVICEBUS_SAS_KEY_VALUE = "WnFy94qL+8MHkWyb2vxnIIh3SomfV97F+u7sl2ULW7Q="
EVENTHUB_NAME = "fakehubnamespace"
EVENTHUB_SAS_KEY_NAME = "RootManageSharedAccessKey"
EVENTHUB_SAS_KEY_VALUE = "ELT4OCAZT5jgnsKts1vvHZXSevv5uXf8yACEiqEhFH4="
USE_PROXY = False
PROXY_HOST = "192.168.15.116"
PROXY_PORT = "8118"
PROXY_USER = ""
PROXY_PASSWORD = ""
| [
"#-------------------------------------------------------------------------\n",
"# Copyright (c) Microsoft. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"#--------------------------------------------------------------------------\n",
"\n",
"# NOTE: these keys are fake, but valid base-64 data, they were generated using:\n",
"# base64.b64encode(os.urandom(32))\n",
"\n",
"AUTH_TYPE_SAS = \"sas\"\n",
"AUTH_TYPE_ACS = \"acs\"\n",
"\n",
"SERVICEBUS_AUTH_TYPE = AUTH_TYPE_SAS\n",
"SERVICEBUS_NAME = \"fakesbnamespace\"\n",
"SERVICEBUS_ACS_KEY = \"u4T5Ue9OBB35KAlAkoVVc6Tcr/a+Ei4Vl9o7wcyQuPY=\"\n",
"SERVICEBUS_SAS_KEY_NAME = \"RootManageSharedAccessKey\"\n",
"SERVICEBUS_SAS_KEY_VALUE = \"WnFy94qL+8MHkWyb2vxnIIh3SomfV97F+u7sl2ULW7Q=\"\n",
"EVENTHUB_NAME = \"fakehubnamespace\"\n",
"EVENTHUB_SAS_KEY_NAME = \"RootManageSharedAccessKey\"\n",
"EVENTHUB_SAS_KEY_VALUE = \"ELT4OCAZT5jgnsKts1vvHZXSevv5uXf8yACEiqEhFH4=\"\n",
"\n",
"USE_PROXY = False\n",
"PROXY_HOST = \"192.168.15.116\"\n",
"PROXY_PORT = \"8118\"\n",
"PROXY_USER = \"\"\n",
"PROXY_PASSWORD = \"\"\n"
] | [
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 35 | 0.000757 | false |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para nowvideo
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
# Credits:
# Unwise and main algorithm taken from Eldorado url resolver
# https://github.com/Eldorados/script.module.urlresolver/blob/master/lib/urlresolver/plugins/nowvideo.py
import urlparse, urllib2, urllib, re
import os
from core import scrapertools
from core import logger
from core import config
from core import unwise
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0"
def test_video_exists(page_url):
logger.info("[nowvideo.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "The file is being converted" in data:
return False, "El fichero está en proceso"
if "no longer exists" in data:
return False, "El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
video_id = scrapertools.get_match(page_url, "http://www.nowvideo.../video/([a-z0-9]+)")
if premium:
# Lee la página de login
login_url = "http://www.nowvideo.eu/login.php"
data = scrapertools.cache_page(login_url)
# Hace el login
login_url = "http://www.nowvideo.eu/login.php?return="
post = "user=" + user + "&pass=" + password + "®ister=Login"
headers = []
headers.append(["User-Agent", USER_AGENT])
headers.append(["Referer", "http://www.nowvideo.eu/login.php"])
data = scrapertools.cache_page(login_url, post=post, headers=headers)
# Descarga la página del vídeo
data = scrapertools.cache_page(page_url)
logger.debug("data:" + data)
# URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
# En la página:
'''
flashvars.domain="http://www.nowvideo.eu";
flashvars.file="rxnwy9ku2nwx7";
flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788";
flashvars.advURL="0";
flashvars.autoplay="false";
flashvars.cid="1";
flashvars.user="aaa";
flashvars.key="bbb";
flashvars.type="1";
'''
flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"')
flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')
flashvar_filekey = scrapertools.get_match(data, 'var ' + flashvar_filekey + '="([^"]+)"')
flashvar_user = scrapertools.get_match(data, 'flashvars.user="([^"]+)"')
flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"')
flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"')
# http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
url = "http://www.nowvideo.eu/api/player.api.php?user=" + flashvar_user + "&file=" + flashvar_file + "&pass=" + flashvar_key + "&cid=1&cid2=undefined&key=" + flashvar_filekey.replace(
".", "%2E").replace("-", "%2D") + "&cid3=undefined"
data = scrapertools.cache_page(url)
logger.info("data=" + data)
location = scrapertools.get_match(data, 'url=([^\&]+)&')
location = location + "?client=FLASH"
video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location])
else:
data = scrapertools.cache_page(page_url)
flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')
filekey = scrapertools.get_match(data, 'var %s="([^"]+)"' % flashvar_filekey).replace(".", "%2E").replace("-", "%2D")
# get stream url from api
url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, video_id)
data = scrapertools.cache_page(url)
data = scrapertools.get_match(data, 'url=([^\&]+)&')
res = scrapertools.get_header_from_response(url, header_to_get="content-type")
if res == "text/html":
data = urllib.quote_plus(data).replace(".", "%2E")
url = 'http://www.nowvideo.sx/api/player.api.php?cid3=undefined&numOfErrors=1&user=undefined&errorUrl=%s&pass=undefined&errorCode=404&cid=1&cid2=undefined&file=%s&key=%s' % (
data, video_id, filekey)
data = scrapertools.cache_page(url)
try:
data = scrapertools.get_match(data, 'url=([^\&]+)&')
except:
url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, video_id)
data = scrapertools.cache_page(url)
data = scrapertools.get_match(data, 'url=([^\&]+)&')
media_url = data
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [nowvideo]", media_url])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.nowvideo.eu/video/4fd0757fd4592
# serie tv cineblog
page = scrapertools.find_single_match(data, 'canonical" href="http://www.cb01.tv/serietv/([^"]+)"')
page2 = scrapertools.find_single_match(data, 'title">Telef([^"]+)</span>')
page3 = scrapertools.find_single_match(data, 'content="http://www.piratestreaming.../serietv/([^"]+)"')
patronvideos = 'nowvideo.../video/([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.sx/video/" + match
d = scrapertools.cache_page(url)
ma = scrapertools.find_single_match(d, '(?<=<h4>)([^<]+)(?=</h4>)')
ma = titulo + " " + ma
if url not in encontrados:
logger.info(" url=" + url)
if page != "" or page2 != "" or page3 != "":
devuelve.append([ma, url, 'nowvideo'])
else:
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
# http://www.player3k.info/nowvideo/?id=t1hkrf1bnf2ek
patronvideos = 'player3k.info/nowvideo/\?id\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.sx/video/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
# http://embed.nowvideo.eu/embed.php?v=obkqt27q712s9&width=600&height=480
# http://embed.nowvideo.eu/embed.php?v=4grxvdgzh9fdw&width=568&height=340
patronvideos = 'nowvideo.../embed.php\?v\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.sx/video/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
# http://embed.nowvideo.eu/embed.php?width=600&height=480&v=9fb588463b2c8
patronvideos = 'nowvideo.../embed.php\?.+?v\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.sx/video/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
# Cineblog by be4t5
patronvideos = '<a href="http://cineblog01.../HR/go.php\?id\=([0-9]+)'
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
page = scrapertools.find_single_match(data, 'rel="canonical" href="([^"]+)"')
from lib import mechanize
br = mechanize.Browser()
br.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0')]
br.set_handle_robots(False)
for match in matches:
titulo = "[nowvideo]"
url = "http://cineblog01.pw/HR/go.php?id=" + match
r = br.open(page)
req = br.click_link(url=url)
data = br.open(req)
data = data.read()
data = scrapertools.find_single_match(data, 'www.nowvideo.../video/([^"]+)"?')
url = "http://www.nowvideo.sx/video/" + data
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
def test():
video_urls = get_video_url("http://www.nowvideo.eu/video/xuntu4pfq0qye")
return len(video_urls) > 0
| [
"# -*- coding: utf-8 -*-\n",
"# ------------------------------------------------------------\n",
"# pelisalacarta - XBMC Plugin\n",
"# Conector para nowvideo\n",
"# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n",
"# ------------------------------------------------------------\n",
"# Credits:\n",
"# Unwise and main algorithm taken from Eldorado url resolver\n",
"# https://github.com/Eldorados/script.module.urlresolver/blob/master/lib/urlresolver/plugins/nowvideo.py\n",
"\n",
"import urlparse, urllib2, urllib, re\n",
"import os\n",
"\n",
"from core import scrapertools\n",
"from core import logger\n",
"from core import config\n",
"from core import unwise\n",
"\n",
"USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0\"\n",
"\n",
"\n",
"def test_video_exists(page_url):\n",
" logger.info(\"[nowvideo.py] test_video_exists(page_url='%s')\" % page_url)\n",
"\n",
" data = scrapertools.cache_page(page_url)\n",
"\n",
" if \"The file is being converted\" in data:\n",
" return False, \"El fichero está en proceso\"\n",
"\n",
" if \"no longer exists\" in data:\n",
" return False, \"El fichero ha sido borrado\"\n",
"\n",
" return True, \"\"\n",
"\n",
"\n",
"def get_video_url(page_url, premium=False, user=\"\", password=\"\", video_password=\"\"):\n",
" logger.info(\"[nowvideo.py] get_video_url(page_url='%s')\" % page_url)\n",
" video_urls = []\n",
"\n",
" video_id = scrapertools.get_match(page_url, \"http://www.nowvideo.../video/([a-z0-9]+)\")\n",
"\n",
" if premium:\n",
" # Lee la página de login\n",
" login_url = \"http://www.nowvideo.eu/login.php\"\n",
" data = scrapertools.cache_page(login_url)\n",
"\n",
" # Hace el login\n",
" login_url = \"http://www.nowvideo.eu/login.php?return=\"\n",
" post = \"user=\" + user + \"&pass=\" + password + \"®ister=Login\"\n",
" headers = []\n",
" headers.append([\"User-Agent\", USER_AGENT])\n",
" headers.append([\"Referer\", \"http://www.nowvideo.eu/login.php\"])\n",
" data = scrapertools.cache_page(login_url, post=post, headers=headers)\n",
"\n",
" # Descarga la página del vídeo\n",
" data = scrapertools.cache_page(page_url)\n",
" logger.debug(\"data:\" + data)\n",
"\n",
" # URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined\n",
" # En la página:\n",
" '''\n",
" flashvars.domain=\"http://www.nowvideo.eu\";\n",
" flashvars.file=\"rxnwy9ku2nwx7\";\n",
" flashvars.filekey=\"83.46.246.226-c7e707c6e20a730c563e349d2333e788\";\n",
" flashvars.advURL=\"0\";\n",
" flashvars.autoplay=\"false\";\n",
" flashvars.cid=\"1\";\n",
" flashvars.user=\"aaa\";\n",
" flashvars.key=\"bbb\";\n",
" flashvars.type=\"1\";\n",
" '''\n",
" flashvar_file = scrapertools.get_match(data, 'flashvars.file=\"([^\"]+)\"')\n",
" flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')\n",
" flashvar_filekey = scrapertools.get_match(data, 'var ' + flashvar_filekey + '=\"([^\"]+)\"')\n",
" flashvar_user = scrapertools.get_match(data, 'flashvars.user=\"([^\"]+)\"')\n",
" flashvar_key = scrapertools.get_match(data, 'flashvars.key=\"([^\"]+)\"')\n",
" flashvar_type = scrapertools.get_match(data, 'flashvars.type=\"([^\"]+)\"')\n",
"\n",
" # http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined\n",
" url = \"http://www.nowvideo.eu/api/player.api.php?user=\" + flashvar_user + \"&file=\" + flashvar_file + \"&pass=\" + flashvar_key + \"&cid=1&cid2=undefined&key=\" + flashvar_filekey.replace(\n",
" \".\", \"%2E\").replace(\"-\", \"%2D\") + \"&cid3=undefined\"\n",
" data = scrapertools.cache_page(url)\n",
" logger.info(\"data=\" + data)\n",
"\n",
" location = scrapertools.get_match(data, 'url=([^\\&]+)&')\n",
" location = location + \"?client=FLASH\"\n",
"\n",
" video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + \" [premium][nowvideo]\", location])\n",
"\n",
" else:\n",
"\n",
" data = scrapertools.cache_page(page_url)\n",
"\n",
" flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')\n",
" filekey = scrapertools.get_match(data, 'var %s=\"([^\"]+)\"' % flashvar_filekey).replace(\".\", \"%2E\").replace(\"-\", \"%2D\")\n",
"\n",
" # get stream url from api\n",
"\n",
" url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, video_id)\n",
" data = scrapertools.cache_page(url)\n",
"\n",
" data = scrapertools.get_match(data, 'url=([^\\&]+)&')\n",
" res = scrapertools.get_header_from_response(url, header_to_get=\"content-type\")\n",
" if res == \"text/html\":\n",
" data = urllib.quote_plus(data).replace(\".\", \"%2E\")\n",
" url = 'http://www.nowvideo.sx/api/player.api.php?cid3=undefined&numOfErrors=1&user=undefined&errorUrl=%s&pass=undefined&errorCode=404&cid=1&cid2=undefined&file=%s&key=%s' % (\n",
" data, video_id, filekey)\n",
" data = scrapertools.cache_page(url)\n",
" try:\n",
" data = scrapertools.get_match(data, 'url=([^\\&]+)&')\n",
" except:\n",
" url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, video_id)\n",
" data = scrapertools.cache_page(url)\n",
" data = scrapertools.get_match(data, 'url=([^\\&]+)&')\n",
"\n",
" media_url = data\n",
"\n",
" video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + \" [nowvideo]\", media_url])\n",
"\n",
" return video_urls\n",
"\n",
"\n",
"# Encuentra vídeos del servidor en el texto pasado\n",
"def find_videos(data):\n",
" encontrados = set()\n",
" devuelve = []\n",
"\n",
"\n",
" # http://www.nowvideo.eu/video/4fd0757fd4592\n",
" # serie tv cineblog\n",
" page = scrapertools.find_single_match(data, 'canonical\" href=\"http://www.cb01.tv/serietv/([^\"]+)\"')\n",
" page2 = scrapertools.find_single_match(data, 'title\">Telef([^\"]+)</span>')\n",
" page3 = scrapertools.find_single_match(data, 'content=\"http://www.piratestreaming.../serietv/([^\"]+)\"')\n",
" patronvideos = 'nowvideo.../video/([a-z0-9]+)'\n",
" logger.info(\"[nowvideo.py] find_videos #\" + patronvideos + \"#\")\n",
" matches = re.compile(patronvideos, re.DOTALL).findall(data)\n",
"\n",
" for match in matches:\n",
" titulo = \"[nowvideo]\"\n",
" url = \"http://www.nowvideo.sx/video/\" + match\n",
" d = scrapertools.cache_page(url)\n",
" ma = scrapertools.find_single_match(d, '(?<=<h4>)([^<]+)(?=</h4>)')\n",
" ma = titulo + \" \" + ma\n",
" if url not in encontrados:\n",
" logger.info(\" url=\" + url)\n",
" if page != \"\" or page2 != \"\" or page3 != \"\":\n",
" devuelve.append([ma, url, 'nowvideo'])\n",
" else:\n",
" devuelve.append([titulo, url, 'nowvideo'])\n",
" encontrados.add(url)\n",
" else:\n",
" logger.info(\" url duplicada=\" + url)\n",
"\n",
"\n",
"\n",
" # http://www.player3k.info/nowvideo/?id=t1hkrf1bnf2ek\n",
" patronvideos = 'player3k.info/nowvideo/\\?id\\=([a-z0-9]+)'\n",
" logger.info(\"[nowvideo.py] find_videos #\" + patronvideos + \"#\")\n",
" matches = re.compile(patronvideos, re.DOTALL).findall(data)\n",
"\n",
" for match in matches:\n",
" titulo = \"[nowvideo]\"\n",
" url = \"http://www.nowvideo.sx/video/\" + match\n",
" if url not in encontrados:\n",
" logger.info(\" url=\" + url)\n",
" devuelve.append([titulo, url, 'nowvideo'])\n",
" encontrados.add(url)\n",
" else:\n",
" logger.info(\" url duplicada=\" + url)\n",
"\n",
" # http://embed.nowvideo.eu/embed.php?v=obkqt27q712s9&width=600&height=480\n",
" # http://embed.nowvideo.eu/embed.php?v=4grxvdgzh9fdw&width=568&height=340\n",
" patronvideos = 'nowvideo.../embed.php\\?v\\=([a-z0-9]+)'\n",
" logger.info(\"[nowvideo.py] find_videos #\" + patronvideos + \"#\")\n",
" matches = re.compile(patronvideos, re.DOTALL).findall(data)\n",
"\n",
" for match in matches:\n",
" titulo = \"[nowvideo]\"\n",
" url = \"http://www.nowvideo.sx/video/\" + match\n",
" if url not in encontrados:\n",
" logger.info(\" url=\" + url)\n",
" devuelve.append([titulo, url, 'nowvideo'])\n",
" encontrados.add(url)\n",
" else:\n",
" logger.info(\" url duplicada=\" + url)\n",
"\n",
" # http://embed.nowvideo.eu/embed.php?width=600&height=480&v=9fb588463b2c8\n",
" patronvideos = 'nowvideo.../embed.php\\?.+?v\\=([a-z0-9]+)'\n",
" logger.info(\"[nowvideo.py] find_videos #\" + patronvideos + \"#\")\n",
" matches = re.compile(patronvideos, re.DOTALL).findall(data)\n",
"\n",
" for match in matches:\n",
" titulo = \"[nowvideo]\"\n",
" url = \"http://www.nowvideo.sx/video/\" + match\n",
" if url not in encontrados:\n",
" logger.info(\" url=\" + url)\n",
" devuelve.append([titulo, url, 'nowvideo'])\n",
" encontrados.add(url)\n",
" else:\n",
" logger.info(\" url duplicada=\" + url)\n",
"\n",
" # Cineblog by be4t5\n",
" patronvideos = '<a href=\"http://cineblog01.../HR/go.php\\?id\\=([0-9]+)'\n",
" logger.info(\"[nowvideo.py] find_videos #\" + patronvideos + \"#\")\n",
" matches = re.compile(patronvideos, re.DOTALL).findall(data)\n",
" page = scrapertools.find_single_match(data, 'rel=\"canonical\" href=\"([^\"]+)\"')\n",
" from lib import mechanize\n",
" br = mechanize.Browser()\n",
" br.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0')]\n",
" br.set_handle_robots(False)\n",
"\n",
" for match in matches:\n",
" titulo = \"[nowvideo]\"\n",
" url = \"http://cineblog01.pw/HR/go.php?id=\" + match\n",
" r = br.open(page)\n",
" req = br.click_link(url=url)\n",
" data = br.open(req)\n",
" data = data.read()\n",
" data = scrapertools.find_single_match(data, 'www.nowvideo.../video/([^\"]+)\"?')\n",
" url = \"http://www.nowvideo.sx/video/\" + data\n",
" if url not in encontrados:\n",
" logger.info(\" url=\" + url)\n",
" devuelve.append([titulo, url, 'nowvideo'])\n",
" encontrados.add(url)\n",
" else:\n",
" logger.info(\" url duplicada=\" + url)\n",
"\n",
" return devuelve\n",
"\n",
"\n",
"def test():\n",
" video_urls = get_video_url(\"http://www.nowvideo.eu/video/xuntu4pfq0qye\")\n",
"\n",
" return len(video_urls) > 0\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.005025125628140704,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.011627906976744186,
0.01020408163265306,
0.012345679012345678,
0,
0.012345679012345678,
0,
0,
0.005208333333333333,
0,
0,
0,
0,
0.015384615384615385,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0.011627906976744186,
0.007936507936507936,
0,
0,
0,
0.010526315789473684,
0,
0,
0.01639344262295082,
0.011494252873563218,
0,
0,
0.0053475935828877,
0.02702702702702703,
0,
0,
0.014492753623188406,
0.05,
0.009708737864077669,
0,
0.014492753623188406,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0.009615384615384616,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03389830508474576,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02666666666666667,
0,
0,
0.012195121951219513,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 234 | 0.002375 | false |
# -*- coding: utf-8 -*-
# Copyright 2015 Ivebo.
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#Library from Google
from google.appengine.ext import ndb
from google.appengine.api import search
#Library
from manager.utils.utils import *
from manager.utils.genID import PushID
import os
INDEX_PAGES = 'pages'
class PageCategory(ndb.Model):
name = ndb.StringProperty(required=True)
name_short = ndb.StringProperty(required=True)
category_parent = ndb.KeyProperty(kind='PageCategory')
summary = ndb.StringProperty()
idcategory = ndb.StringProperty(required=True)
image_url = ndb.StringProperty()
date_publication = ndb.DateTimeProperty()
date_updated = ndb.DateTimeProperty(auto_now_add=True)
userID = ndb.StringProperty(required=True)
language = ndb.StringProperty(default='es')
class Page(ndb.Model):
title = ndb.StringProperty(required=True)
title_short = ndb.StringProperty(required=True)
idpage = ndb.StringProperty(required=True)
image_url = ndb.StringProperty()
gs_key = ndb.StringProperty()
gs_filename = ndb.StringProperty()
content = ndb.TextProperty()
summary = ndb.StringProperty()
order = ndb.IntegerProperty()
date_publication = ndb.DateTimeProperty()
date_updated = ndb.DateTimeProperty(auto_now_add=True)
userID = ndb.StringProperty(required=True)
visibility = ndb.StringProperty(choices=set(['public','private']),required=True,default='public')
status = ndb.StringProperty(choices=set(['published','unpublished','archived','trashed']),required=True,default='published')
featured = ndb.BooleanProperty(default=False)
language = ndb.StringProperty(default='es')
class PagesinCategory(ndb.Model):
page = ndb.KeyProperty(kind=Page)
category = ndb.KeyProperty(kind=PageCategory)
date_publication = ndb.DateTimeProperty()
date_updated = ndb.DateTimeProperty(auto_now_add=True)
def title_short(string):
string = short_text(string)
return string
def GenId():
p = PushID()
p = p.next_id()
return p
def site_key():
return ndb.Key('Site', os.environ['site'])
def IndexPages(title, idpage, summary, content):
return search.Document(
doc_id = idpage,
fields=[search.TextField(name='title', value=title),
search.AtomField(name='idpage',value=idpage),
search.TextField(name='summary', value=summary),
search.HtmlField(name='content', value=content)
]
)
| [
"# -*- coding: utf-8 -*-\n",
"# Copyright 2015 Ivebo.\n",
"#\n",
"#This program is free software: you can redistribute it and/or modify\n",
"#it under the terms of the GNU General Public License as published by\n",
"#the Free Software Foundation, either version 3 of the License, or\n",
"#(at your option) any later version.\n",
"\n",
"#This program is distributed in the hope that it will be useful,\n",
"#but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"#GNU General Public License for more details.\n",
"\n",
"#You should have received a copy of the GNU General Public License\n",
"#along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\n",
"#Library from Google\n",
"from google.appengine.ext import ndb\n",
"from google.appengine.api import search\n",
"\n",
"#Library\n",
"from manager.utils.utils import *\n",
"from manager.utils.genID import PushID\n",
"\n",
"import os\n",
"\n",
"INDEX_PAGES = 'pages'\n",
"\n",
"class PageCategory(ndb.Model):\n",
" name = ndb.StringProperty(required=True)\n",
" name_short = ndb.StringProperty(required=True)\n",
" category_parent = ndb.KeyProperty(kind='PageCategory')\n",
" summary = ndb.StringProperty()\n",
" idcategory = ndb.StringProperty(required=True)\n",
" image_url = ndb.StringProperty()\n",
" date_publication = ndb.DateTimeProperty()\n",
" date_updated = ndb.DateTimeProperty(auto_now_add=True)\n",
" userID = ndb.StringProperty(required=True)\n",
" language = ndb.StringProperty(default='es')\n",
" \n",
"class Page(ndb.Model):\n",
" title = ndb.StringProperty(required=True)\n",
" title_short = ndb.StringProperty(required=True)\n",
" idpage = ndb.StringProperty(required=True)\n",
" image_url = ndb.StringProperty()\n",
" gs_key = ndb.StringProperty()\n",
" gs_filename = ndb.StringProperty()\n",
" content = ndb.TextProperty()\n",
" summary = ndb.StringProperty()\n",
" order = ndb.IntegerProperty()\n",
" date_publication = ndb.DateTimeProperty()\n",
" date_updated = ndb.DateTimeProperty(auto_now_add=True)\n",
" userID = ndb.StringProperty(required=True)\n",
" visibility = ndb.StringProperty(choices=set(['public','private']),required=True,default='public')\n",
" status = ndb.StringProperty(choices=set(['published','unpublished','archived','trashed']),required=True,default='published')\n",
" featured = ndb.BooleanProperty(default=False)\n",
" language = ndb.StringProperty(default='es')\n",
"\n",
"class PagesinCategory(ndb.Model):\n",
" page = ndb.KeyProperty(kind=Page)\n",
" category = ndb.KeyProperty(kind=PageCategory)\n",
" date_publication = ndb.DateTimeProperty()\n",
" date_updated = ndb.DateTimeProperty(auto_now_add=True)\n",
"\n",
"\n",
"def title_short(string):\n",
" string = short_text(string)\n",
" return string\n",
"\n",
"def GenId():\n",
" p = PushID()\n",
" p = p.next_id()\n",
" return p\n",
"\n",
"def site_key():\n",
" return ndb.Key('Site', os.environ['site'])\n",
"\n",
"def IndexPages(title, idpage, summary, content):\n",
" return search.Document(\n",
" doc_id = idpage,\n",
" fields=[search.TextField(name='title', value=title),\n",
" search.AtomField(name='idpage',value=idpage),\n",
" search.TextField(name='summary', value=summary),\n",
" search.HtmlField(name='content', value=content)\n",
" ]\n",
" )\n"
] | [
0,
0,
0,
0.014285714285714285,
0.014285714285714285,
0.014925373134328358,
0.02702702702702703,
0,
0.015384615384615385,
0.015625,
0.015873015873015872,
0.021739130434782608,
0,
0.014925373134328358,
0.014084507042253521,
0,
0,
0.047619047619047616,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0392156862745098,
0.046511627906976744,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.0625,
0.022222222222222223,
0,
0.02040816326530612,
0,
0.08,
0,
0.016129032258064516,
0,
0,
0.07142857142857142,
0
] | 87 | 0.012269 | false |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: Telugu OCR
Author : Rakeshvara Rao
License: GNU GPL 3.0
This module contains functions that take a unicode Telugu string and
break it up in to pieces. E.g:- the letter స is broken in to a ✓ and
a bottom part represented as స again.
"""
import re
VATTU_HAVERS = 'ఖఘఛఝటఢథధఫభ' # unused
TWO_PIECERS = 'ఘపఫషసహ'
GHO_STYLE = 'TEL'
# This variable is font dependent as ఘో is rendered in two different ways.
# TELugu, KANnada
# aksh_pattern includes space also
aksh_pattern = re.compile(
"([ఁ-ఔృౄౢౣ])|" # Duals, Vowels
"([0-9౦-౯])|" # Numbers
"([!(),\-.=?'\"।॥:;%&+<>])|" # Punctuations
"( )|" # Space
"(([క-హ]్)*[క-హ][ా-ూె-ౌ])|" # Compounds
"(([క-హ]్)*[క-హ](?![ా-ూె-్]))|" # Compounds in 'a'
"(([క-హ]్)+(?=\s))") # Pollu
def process_two_piecers(akshara):
""" Process the aksharas that are written in two pieces"""
# Class of tick & consonant+vowel
if '''ఘాఘుఘూఘౌపుపూఫుఫూషుషూసుసూహా
హుహూహొహోహౌ'''.find(akshara) >= 0:
return ['✓', akshara]
# Class of vowel-mark & underlying consonant base
if '''ఘిఘీఘెఘేపిపీపెపేఫిఫీఫెఫేషిషీషెషేసిసీసె
సేహిహీహెహేఘ్ప్ఫ్ష్స్హ్ '''.find(akshara) >= 0:
return [akshara[1], akshara[0]]
# Detached ai-karams
if 'ఘైపైఫైషైసైహై'.find(akshara) >= 0:
return ['ె', akshara[0], 'ై']
# gho
if 'ఘొఘో'.find(akshara) >= 0:
if GHO_STYLE == 'TEL': # Telugu style ఘొఘో
return ['✓', akshara]
else: # Kannada style
return ['ె', 'ఘా' if akshara == 'ఘో' else 'ఘు']
# Combining marks like saa, pau etc.
return [akshara]
def process_sans_vattu(akshara):
"""Process one independent symbol or a simple CV pair"""
glps = []
# ఏ Special Case
if akshara == 'ఏ':
glps += ['ఏ', 'ఎ']
# Punc, Single Letters
elif len(akshara) == 1:
if akshara in TWO_PIECERS:
glps += ['✓']
glps += [akshara]
# Cons + Vowel
elif len(akshara) == 2:
if akshara[0] in TWO_PIECERS:
glps += process_two_piecers(akshara)
elif akshara[1] in 'ై':
glps += [akshara[0] + ('ె' if akshara[1] == 'ై' else '')]
glps += [akshara[1]]
else:
glps += [akshara]
return glps
def process_akshara(akshara):
""" Processes an Akshara at a time (i.e. syllable by syllable)"""
aksh_wo_vattulu = akshara[0] + ('' if len(akshara) % 2 else akshara[-1])
glps = process_sans_vattu(aksh_wo_vattulu)
for i in range(1, len(akshara) - 1, 2): # Add each vattu, usually just one
glps += [akshara[i] + akshara[i + 1]]
return glps
def process_line(line, pattern=aksh_pattern):
"""The main function of this module; Used to parse one chunk of Telugu text
"""
glps = []
for a in pattern.finditer(line):
glps += process_akshara(a.group())
return glps
# #############################################################################
def main():
dump = ['ఏతస్మిన్ సిద్ధాశ్రమే దేశే మందాకిన్యా',
'శైలస్య చిత్రకూటస్య పాదే పూర్వోత్తరే ',
'ఘోరోఽపేయ పైత్యకారిణ్ లినక్స్ ']
for line in dump:
print(line)
for aks in process_line(line):
print(aks, end=', ')
print()
if __name__ == '__main__':
main() | [
"#! /usr/bin/env python3\n",
"# -*- coding: utf-8 -*-\n",
"\"\"\"\n",
" Project: Telugu OCR\n",
" Author : Rakeshvara Rao\n",
" License: GNU GPL 3.0\n",
"\n",
" This module contains functions that take a unicode Telugu string and \n",
" break it up in to pieces. E.g:- the letter స is broken in to a ✓ and\n",
" a bottom part represented as స again.\n",
"\"\"\"\n",
"import re\n",
"\n",
"VATTU_HAVERS = 'ఖఘఛఝటఢథధఫభ' # unused\n",
"TWO_PIECERS = 'ఘపఫషసహ'\n",
"GHO_STYLE = 'TEL'\n",
"# This variable is font dependent as ఘో is rendered in two different ways.\n",
"# TELugu, KANnada\n",
"\n",
"# aksh_pattern includes space also\n",
"aksh_pattern = re.compile(\n",
" \"([ఁ-ఔృౄౢౣ])|\" # Duals, Vowels\n",
" \"([0-9౦-౯])|\" # Numbers\n",
" \"([!(),\\-.=?'\\\"।॥:;%&+<>])|\" # Punctuations\n",
" \"( )|\" # Space\n",
" \"(([క-హ]్)*[క-హ][ా-ూె-ౌ])|\" # Compounds\n",
" \"(([క-హ]్)*[క-హ](?![ా-ూె-్]))|\" # Compounds in 'a'\n",
" \"(([క-హ]్)+(?=\\s))\") # Pollu\n",
"\n",
"\n",
"def process_two_piecers(akshara):\n",
" \"\"\" Process the aksharas that are written in two pieces\"\"\"\n",
" # Class of tick & consonant+vowel \n",
" if '''ఘాఘుఘూఘౌపుపూఫుఫూషుషూసుసూహా\n",
" హుహూహొహోహౌ'''.find(akshara) >= 0:\n",
" return ['✓', akshara]\n",
"\n",
" # Class of vowel-mark & underlying consonant base\n",
" if '''ఘిఘీఘెఘేపిపీపెపేఫిఫీఫెఫేషిషీషెషేసిసీసె\n",
" సేహిహీహెహేఘ్ప్ఫ్ష్స్హ్ '''.find(akshara) >= 0:\n",
" return [akshara[1], akshara[0]]\n",
"\n",
" # Detached ai-karams\n",
" if 'ఘైపైఫైషైసైహై'.find(akshara) >= 0:\n",
" return ['ె', akshara[0], 'ై']\n",
"\n",
" # gho\n",
" if 'ఘొఘో'.find(akshara) >= 0:\n",
" if GHO_STYLE == 'TEL': # Telugu style ఘొఘో\n",
" return ['✓', akshara]\n",
" else: # Kannada style\n",
" return ['ె', 'ఘా' if akshara == 'ఘో' else 'ఘు']\n",
"\n",
" # Combining marks like saa, pau etc.\n",
" return [akshara]\n",
"\n",
"\n",
"def process_sans_vattu(akshara):\n",
" \"\"\"Process one independent symbol or a simple CV pair\"\"\"\n",
" glps = []\n",
"\n",
" # ఏ Special Case\n",
" if akshara == 'ఏ':\n",
" glps += ['ఏ', 'ఎ']\n",
"\n",
" # Punc, Single Letters\n",
" elif len(akshara) == 1:\n",
" if akshara in TWO_PIECERS:\n",
" glps += ['✓']\n",
"\n",
" glps += [akshara]\n",
"\n",
" # Cons + Vowel\n",
" elif len(akshara) == 2:\n",
" if akshara[0] in TWO_PIECERS:\n",
" glps += process_two_piecers(akshara)\n",
"\n",
" elif akshara[1] in 'ై':\n",
" glps += [akshara[0] + ('ె' if akshara[1] == 'ై' else '')]\n",
" glps += [akshara[1]]\n",
"\n",
" else:\n",
" glps += [akshara]\n",
"\n",
" return glps\n",
"\n",
"\n",
"def process_akshara(akshara):\n",
" \"\"\" Processes an Akshara at a time (i.e. syllable by syllable)\"\"\"\n",
" aksh_wo_vattulu = akshara[0] + ('' if len(akshara) % 2 else akshara[-1])\n",
"\n",
" glps = process_sans_vattu(aksh_wo_vattulu)\n",
"\n",
" for i in range(1, len(akshara) - 1, 2): # Add each vattu, usually just one\n",
" glps += [akshara[i] + akshara[i + 1]]\n",
"\n",
" return glps\n",
"\n",
"\n",
"def process_line(line, pattern=aksh_pattern):\n",
" \"\"\"The main function of this module; Used to parse one chunk of Telugu text\n",
" \"\"\"\n",
" glps = []\n",
" for a in pattern.finditer(line):\n",
" glps += process_akshara(a.group())\n",
" return glps\n",
"\n",
"\n",
"# #############################################################################\n",
"\n",
"\n",
"def main():\n",
" dump = ['ఏతస్మిన్ సిద్ధాశ్రమే దేశే మందాకిన్యా',\n",
" 'శైలస్య చిత్రకూటస్య పాదే పూర్వోత్తరే ',\n",
" 'ఘోరోఽపేయ పైత్యకారిణ్ లినక్స్ ']\n",
" for line in dump:\n",
" print(line)\n",
" for aks in process_line(line):\n",
" print(aks, end=', ')\n",
" print()\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()"
] | [
0,
0,
0,
0,
0,
0,
0,
0.013513513513513514,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1
] | 124 | 0.001524 | false |
from django.shortcuts import get_object_or_404, render
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'myhealth/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published question. (not including those set to be
published in the future)
"""
return Question.objects.filter(pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'myhealth/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'myhealth/results.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
''' part 3 version:
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'myhealth/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'myhealth/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'myhealth/results.html', {'question': question})
'''
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'myhealth/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('myhealth:results', args=(question.id,)))
| [
"from django.shortcuts import get_object_or_404, render\n",
"from django.http import Http404, HttpResponse, HttpResponseRedirect\n",
"from django.core.urlresolvers import reverse\n",
"from django.views import generic\n",
"from django.utils import timezone\n",
"\n",
"from .models import Question, Choice\n",
"\n",
"class IndexView(generic.ListView):\n",
" template_name = 'myhealth/index.html'\n",
" context_object_name = 'latest_question_list'\n",
"\n",
" def get_queryset(self):\n",
" \"\"\" \n",
" Return the last five published question. (not including those set to be\n",
" published in the future)\n",
" \"\"\"\n",
" return Question.objects.filter(pub_date__lte=timezone.now()\n",
" ).order_by('-pub_date')[:5]\n",
"\n",
"class DetailView(generic.DetailView):\n",
" model = Question\n",
" template_name = 'myhealth/detail.html'\n",
"\n",
" def get_queryset(self):\n",
" \"\"\"\n",
" Excludes any questions that aren't published yet.\n",
" \"\"\"\n",
" return Question.objects.filter(pub_date__lte=timezone.now())\n",
"\n",
"class ResultsView(generic.DetailView):\n",
" model = Question\n",
" template_name = 'myhealth/results.html'\n",
"\n",
" def get_queryset(self):\n",
" \"\"\"\n",
" Excludes any questions that aren't published yet.\n",
" \"\"\"\n",
" return Question.objects.filter(pub_date__lte=timezone.now())\n",
"\n",
"''' part 3 version: \n",
"def index(request):\n",
" latest_question_list = Question.objects.order_by('-pub_date')[:5]\n",
" context = {\n",
" 'latest_question_list': latest_question_list,\n",
" }\n",
" return render(request, 'myhealth/index.html', context)\n",
"\n",
"def detail(request, question_id):\n",
" question = get_object_or_404(Question, pk=question_id)\n",
" return render(request, 'myhealth/detail.html', {'question': question})\n",
"\n",
"def results(request, question_id):\n",
" question = get_object_or_404(Question, pk=question_id)\n",
" return render(request, 'myhealth/results.html', {'question': question})\n",
"\n",
"'''\n",
"\n",
"def vote(request, question_id):\n",
" question = get_object_or_404(Question, pk=question_id)\n",
" try:\n",
" selected_choice = question.choice_set.get(pk=request.POST['choice'])\n",
" except (KeyError, Choice.DoesNotExist):\n",
" # Redisplay the question voting form.\n",
" return render(request, 'myhealth/detail.html', {\n",
" 'question': question,\n",
" 'error_message': \"You didn't select a choice.\",\n",
" })\n",
" else:\n",
" selected_choice.votes += 1\n",
" selected_choice.save()\n",
" # Always return an HttpResponseRedirect after successfully dealing\n",
" # with POST data. This prevents data from being posted twice if a\n",
" # user hits the Back button.\n",
" return HttpResponseRedirect(reverse('myhealth:results', args=(question.id,)))\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.015151515151515152,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09523809523809523,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186
] | 75 | 0.004143 | false |
"""
This script runs the application using a development server.
"""
import bottle
import os
import sys
# routes contains the HTTP handlers for our server and must be imported.
from routes import index
from routes.dtos import crud
from routes.dtos import operations as operationsDtos
from routes.entities import operations as operationsEntities
if '--debug' in sys.argv[1:] or 'SERVER_DEBUG' in os.environ:
# Debug mode will enable more verbose output in the console window.
# It must be set at the beginning of the script.
bottle.debug(True)
def wsgi_app():
"""Returns the application to make available through wfastcgi. This is used
when the site is published to Microsoft Azure."""
return bottle.default_app()
if __name__ == '__main__':
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, '..', 'Client').replace('\\', '/')
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
@bottle.route('/')
def server_root():
"""Handler for static files, used with the development server.
When running under a production server such as IIS or Apache,
the server should be configured to serve the static files."""
return bottle.static_file("index.html", root=STATIC_ROOT)
@bottle.route('/static/<filepath:path>')
def server_static(filepath):
"""Handler for static files, used with the development server.
When running under a production server such as IIS or Apache,
the server should be configured to serve the static files."""
return bottle.static_file(filepath, root=STATIC_ROOT)
# Starts a local test server.
bottle.run(server='wsgiref', host=HOST, port=PORT)
| [
"\"\"\"\n",
"This script runs the application using a development server.\n",
"\"\"\"\n",
"\n",
"import bottle\n",
"import os\n",
"import sys\n",
"\n",
"# routes contains the HTTP handlers for our server and must be imported.\n",
"from routes import index\n",
"from routes.dtos import crud\n",
"from routes.dtos import operations as operationsDtos\n",
"from routes.entities import operations as operationsEntities\n",
"\n",
"if '--debug' in sys.argv[1:] or 'SERVER_DEBUG' in os.environ:\n",
" # Debug mode will enable more verbose output in the console window.\n",
" # It must be set at the beginning of the script.\n",
" bottle.debug(True)\n",
"\n",
"def wsgi_app():\n",
" \"\"\"Returns the application to make available through wfastcgi. This is used\n",
" when the site is published to Microsoft Azure.\"\"\"\n",
" return bottle.default_app()\n",
"\n",
"if __name__ == '__main__':\n",
" PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n",
" STATIC_ROOT = os.path.join(PROJECT_ROOT, '..', 'Client').replace('\\\\', '/')\n",
" HOST = os.environ.get('SERVER_HOST', 'localhost')\n",
" try:\n",
" PORT = int(os.environ.get('SERVER_PORT', '5555'))\n",
" except ValueError:\n",
" PORT = 5555\n",
"\n",
" @bottle.route('/')\n",
" def server_root():\n",
" \"\"\"Handler for static files, used with the development server.\n",
" When running under a production server such as IIS or Apache,\n",
" the server should be configured to serve the static files.\"\"\"\n",
" return bottle.static_file(\"index.html\", root=STATIC_ROOT)\n",
"\n",
" @bottle.route('/static/<filepath:path>')\n",
" def server_static(filepath):\n",
" \"\"\"Handler for static files, used with the development server.\n",
" When running under a production server such as IIS or Apache,\n",
" the server should be configured to serve the static files.\"\"\"\n",
" return bottle.static_file(filepath, root=STATIC_ROOT)\n",
"\n",
" # Starts a local test server.\n",
" bottle.run(server='wsgiref', host=HOST, port=PORT)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 49 | 0.002031 | false |
import common, rechat, twitch
import xbmcaddon
import xbmcgui
import xbmc
import xbmcvfs
import stuff
import classes
import time
from common import Cache, Debugger
d = Debugger()
Cache.CACHE_PATH = 'special://temp/reXChat/'
# d.dialog('script started')
addon = xbmcaddon.Addon()
# addon.openSettings()
addonname = addon.getAddonInfo('name')
addonpath = xbmc.translatePath(addon.getAddonInfo('path'))
ACTION_PREVIOUS_MENU = 10
class OverlayChat():
def __init__(self):
self.showing = False
self.window = xbmcgui.Window(12005)
#main window
chatBackground = xbmc.translatePath(classes.CACHE_PATH + 'ChatBackground.png')
# chatBackgroundResized = xbmc.translatePath(twitchvideo.cachePath + 'ChatBackgroundResized.jpeg')
# im = Image.open(chatBackground)
# im.thumbnail((320, 600), Image.ANTIALIAS)
# im.save(chatBackgroundResized, "JPEG")
self.background = xbmcgui.ControlImage(-110, 0, 418, 564, chatBackground, aspectRatio=0)
# w*22 font12 width 320, itemTextXOffset -2
self.clist = xbmcgui.ControlList(0, 0, 320, 600, 'font12', 'FFFFFFFF', 'IrcChat/ChatArrowFO.png', 'pstvButtonFocus.png', 'FFFFFFFF', 0, 0, -2, 0, 20, 0, 0)
self.window.addControl(self.background)
self.window.addControl(self.clist)
self.id = self.clist.getId()
# listItem = xbmcgui.ListItem('yoyoyo', 'asdfasdf')
# listItem1 = xbmcgui.ListItem('yoyoyo', 'asdfasdf2')
# listItem2 = xbmcgui.ListItem('yoyoyo', 'asdfasdf3')
# listItem3 = xbmcgui.ListItem('yoyoyo', 'asdfasdf4')
# self.clist.addItem('uau')
# self.clist.addItem(listItem1)
# self.clist.addItem(listItem2)
# self.clist.addItem(listItem3)
# self.clist.selectItem(0)
# self.clist.selectItem(1)
# self.window.setFocus(self.clist)
# listItem.select(True)
# d.dialog(self.clist.size())
# self.clist.setAnimations([('focus', 'effect=zoom end=90,247,220,56 time=0',)])
def addLine(self, line):
self.clist.addItem(line)
xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Input.Down", "id": %s }' %(self.id))
def show(self):
self.showing=True
def hide(self):
self.showing=False
self.window.removeControl(self.background)
self.window.removeControl(self.clist)
def scrollTo(self, index):
index = int(index)
self.clist.selectItem(index)
# c_size = self.clist.size()
# xbmc.executebuiltin("Control.move(" + str(self.clist.getId()) + ", %s)" %'1')
# xbmc.executebuiltin("Control.move(" + str(self.clist.getId()) + ", %s)" %index)
# xbmc.executebuiltin("Control.move(1331, %s)" %c_size)
def resizeBackground(self, x, y, width, height):
self.window.removeControl(self.background)
self.background = xbmcgui.ControlImage(x, y, width, height, xbmc.translatePath(classes.CACHE_PATH + 'ChatBackground.png'))
self.window.addControl(self.background)
def _close(self):
if self.showing:
self.hide()
else:
pass
try:
self.window.clearProperties()
print("OverlayText window closed")
except: pass
class PlaybackController(xbmc.Monitor):
def __init__(self):
self.settings = Settings()
classes.Message.lineLength = self.settings.characters
self.chat = ChatRenderer(self.settings)
def kill(self):
self.chat.close()
def now(self):
self.chat.scroll(xbmc.Player().getTime() * 1000)
def onSettingsChanged(self):
self.settings.update()
# lat123.resizeBackground(settings.backgroundX, settings.backgroundY, settings.backgroundWidth, settings.backgroundHeight)
class ChatRenderer():
def __init__(self, settings):
self.messageIndex = {}
self.video = twitch.ArchiveVideo(None, 1, streamId='v3725446')
self.chat = OverlayChat()
self.chat.show()
self.delay = settings.delay
# self.chat.populate() #1
# self.chat.clist.reset()
def addMessage(self, message):
for line in message.getLines():
self.chat.addLine(line)
message.isRendered = True
self.messageIndex[message.id] = self.chat.clist.size() - 1
def close(self):
self.chat._close()
def populate(self):
for message in self.video.messages:
self.addMessage(message)
def scroll(self, playerTime):
scroll = None
delayedTime = playerTime + self.delay
index = self.chat.clist.size()
id = self.chat.clist.getId()
for message in self.video.messages:
if message.absoluteTimeMs < delayedTime and message.isRendered == False:
d.dialog('before')
self.addMessage(message) #1
d.dialog('after')
# d.dialog('added')
# xbmc.sleep(50)
pass #1
elif message.absoluteTimeMs > delayedTime:
if scroll:
self.chat.scrollTo(self.messageIndex[scroll.id])
# d.dialog('scroll')
# xbmc.sleep(2000)
return
# else: # Wonders happen
# self.addMessage(message) #1
# self.chat.scrollTo(self.messageIndex[message.id])
# return
scroll = message
class Settings():
def __init__(self):
self.update()
def update(self):
self.backgroundX = int(addon.getSetting('backgroundX'))
self.backgroundY = int(addon.getSetting('backgroundY'))
self.backgroundWidth = int(addon.getSetting('backgroundWidth'))
self.backgroundHeight = int(addon.getSetting('backgroundHeight'))
self.backgroundOpacity = addon.getSetting('backgroundOpacity')
self.chatX = int(addon.getSetting('chatX'))
self.chatY = int(addon.getSetting('chatY'))
self.chatWidth = int(addon.getSetting('chatWidth'))
self.chatHeight = int(addon.getSetting('chatHeight'))
self.characters = int(addon.getSetting('characters'))
self.delay = int(addon.getSetting('delay')) * 1000
def drawUI():
playbackController = PlaybackController()
#try:
count = 0
while(count < 25):
xbmc.sleep(500)
playbackController.now()
count += 1
#except: d.dialog('exception')
playbackController.kill()
def rechatServiceTest():
twitchAPI = twitch.CachedAPI()
# streamInfo = twitchAPI.getStreamInfo(videoId='a611375915')
# d.dialog(streamInfo.streamId)
# d.dialog(streamInfo.recordedAt)
# d.dialog(streamInfo.recordedAtMs)
# streamInfo = twitchAPI.getStreamInfo(streamId='v3832313')
# streamInfo = twitchAPI.getStreamInfo(streamId='v3676602') # checks findLoose on start
streamInfo = twitchAPI.getStreamInfo(streamId='v3800416')
# d.dialog(streamInfo.streamId)
# d.dialog(streamInfo.recordedAt)
# d.dialog(streamInfo.recordedAtMs)
# rechatService = rechat.Service('v3800416', 1424183641000)
# rechatService = rechat.Service(streamInfo)
# rMessages = rechatService.next()
# d.dialog(rMessages[-1])
# d.dialog(rechatService.next()[-1])
# d.dialog(rechatService.afterMs(1424183641000)[-1])
# d.dialog(rechatService.next()[-1])
cachedRechatService = rechat.CachedService(streamInfo)
cRMessages = cachedRechatService.next()
d.dialog(cRMessages[-1])
cRMessages = cachedRechatService.next()
d.dialog(cRMessages[-1])
cRMessages = cachedRechatService.afterMs(1424183641000)
d.dialog(cRMessages[-1])
#drawUI()
rechatServiceTest()
| [
"import common, rechat, twitch\n",
"import xbmcaddon\n",
"import xbmcgui\n",
"import xbmc\n",
"import xbmcvfs\n",
"\n",
"import stuff\n",
"import classes\n",
"\n",
"import time\n",
"\n",
"from common import Cache, Debugger\n",
"\n",
"d = Debugger()\n",
"Cache.CACHE_PATH = 'special://temp/reXChat/'\n",
"\n",
"# d.dialog('script started')\n",
"addon = xbmcaddon.Addon()\n",
"# addon.openSettings()\n",
"addonname = addon.getAddonInfo('name')\n",
"addonpath = xbmc.translatePath(addon.getAddonInfo('path'))\n",
"\n",
"ACTION_PREVIOUS_MENU = 10\n",
"\n",
"class OverlayChat():\n",
" def __init__(self):\n",
" self.showing = False\n",
" self.window = xbmcgui.Window(12005)\n",
"\n",
" #main window\n",
" chatBackground = xbmc.translatePath(classes.CACHE_PATH + 'ChatBackground.png')\n",
" # chatBackgroundResized = xbmc.translatePath(twitchvideo.cachePath + 'ChatBackgroundResized.jpeg')\n",
" # im = Image.open(chatBackground)\n",
" # im.thumbnail((320, 600), Image.ANTIALIAS)\n",
" # im.save(chatBackgroundResized, \"JPEG\")\n",
" self.background = xbmcgui.ControlImage(-110, 0, 418, 564, chatBackground, aspectRatio=0)\n",
" # w*22 font12 width 320, itemTextXOffset -2\n",
" self.clist = xbmcgui.ControlList(0, 0, 320, 600, 'font12', 'FFFFFFFF', 'IrcChat/ChatArrowFO.png', 'pstvButtonFocus.png', 'FFFFFFFF', 0, 0, -2, 0, 20, 0, 0)\n",
" self.window.addControl(self.background)\n",
" self.window.addControl(self.clist)\n",
" self.id = self.clist.getId()\n",
" # listItem = xbmcgui.ListItem('yoyoyo', 'asdfasdf')\n",
" # listItem1 = xbmcgui.ListItem('yoyoyo', 'asdfasdf2')\n",
" # listItem2 = xbmcgui.ListItem('yoyoyo', 'asdfasdf3')\n",
" # listItem3 = xbmcgui.ListItem('yoyoyo', 'asdfasdf4')\n",
" # self.clist.addItem('uau')\n",
" # self.clist.addItem(listItem1)\n",
" # self.clist.addItem(listItem2)\n",
" # self.clist.addItem(listItem3)\n",
" # self.clist.selectItem(0)\n",
" # self.clist.selectItem(1)\n",
" # self.window.setFocus(self.clist)\n",
" # listItem.select(True)\n",
" # d.dialog(self.clist.size())\n",
" # self.clist.setAnimations([('focus', 'effect=zoom end=90,247,220,56 time=0',)])\n",
"\n",
" def addLine(self, line):\n",
" self.clist.addItem(line)\n",
" xbmc.executeJSONRPC('{ \"jsonrpc\": \"2.0\", \"method\": \"Input.Down\", \"id\": %s }' %(self.id))\n",
"\n",
" def show(self):\n",
" self.showing=True\n",
"\n",
" def hide(self):\n",
" self.showing=False\n",
" self.window.removeControl(self.background)\n",
" self.window.removeControl(self.clist)\n",
"\n",
" def scrollTo(self, index):\n",
" index = int(index)\n",
" self.clist.selectItem(index)\n",
"# c_size = self.clist.size()\n",
"# xbmc.executebuiltin(\"Control.move(\" + str(self.clist.getId()) + \", %s)\" %'1')\n",
"# xbmc.executebuiltin(\"Control.move(\" + str(self.clist.getId()) + \", %s)\" %index)\n",
"# xbmc.executebuiltin(\"Control.move(1331, %s)\" %c_size)\n",
"\n",
" def resizeBackground(self, x, y, width, height):\n",
" self.window.removeControl(self.background)\n",
" self.background = xbmcgui.ControlImage(x, y, width, height, xbmc.translatePath(classes.CACHE_PATH + 'ChatBackground.png'))\n",
" self.window.addControl(self.background)\n",
"\n",
" def _close(self):\n",
" if self.showing:\n",
" self.hide()\n",
" else:\n",
" pass\n",
" try:\n",
" self.window.clearProperties()\n",
" print(\"OverlayText window closed\")\n",
" except: pass\n",
"\n",
"\n",
"class PlaybackController(xbmc.Monitor):\n",
" def __init__(self):\n",
" self.settings = Settings()\n",
" classes.Message.lineLength = self.settings.characters\n",
" self.chat = ChatRenderer(self.settings)\n",
" def kill(self):\n",
" self.chat.close()\n",
" def now(self):\n",
" self.chat.scroll(xbmc.Player().getTime() * 1000)\n",
" def onSettingsChanged(self):\n",
" self.settings.update()\n",
" # lat123.resizeBackground(settings.backgroundX, settings.backgroundY, settings.backgroundWidth, settings.backgroundHeight)\n",
"\n",
"class ChatRenderer():\n",
" def __init__(self, settings):\n",
" self.messageIndex = {}\n",
" self.video = twitch.ArchiveVideo(None, 1, streamId='v3725446')\n",
" self.chat = OverlayChat()\n",
" self.chat.show()\n",
" self.delay = settings.delay\n",
"# self.chat.populate() #1\n",
"# self.chat.clist.reset()\n",
" def addMessage(self, message):\n",
" for line in message.getLines():\n",
" self.chat.addLine(line)\n",
" message.isRendered = True\n",
" self.messageIndex[message.id] = self.chat.clist.size() - 1\n",
" def close(self):\n",
" self.chat._close()\n",
" def populate(self):\n",
" for message in self.video.messages:\n",
" self.addMessage(message)\n",
" def scroll(self, playerTime):\n",
" scroll = None\n",
" delayedTime = playerTime + self.delay\n",
" index = self.chat.clist.size()\n",
" id = self.chat.clist.getId()\n",
" for message in self.video.messages:\n",
" if message.absoluteTimeMs < delayedTime and message.isRendered == False:\n",
" d.dialog('before')\n",
" self.addMessage(message) #1\n",
" d.dialog('after')\n",
"# d.dialog('added')\n",
"# xbmc.sleep(50)\n",
" pass #1\n",
" elif message.absoluteTimeMs > delayedTime:\n",
" if scroll:\n",
" self.chat.scrollTo(self.messageIndex[scroll.id])\n",
"# d.dialog('scroll')\n",
"# xbmc.sleep(2000)\n",
" return\n",
"# else: # Wonders happen\n",
"# self.addMessage(message) #1\n",
"# self.chat.scrollTo(self.messageIndex[message.id])\n",
"# return\n",
" scroll = message\n",
"\n",
"class Settings():\n",
" def __init__(self):\n",
" self.update()\n",
" def update(self):\n",
" self.backgroundX = int(addon.getSetting('backgroundX'))\n",
" self.backgroundY = int(addon.getSetting('backgroundY'))\n",
" self.backgroundWidth = int(addon.getSetting('backgroundWidth'))\n",
" self.backgroundHeight = int(addon.getSetting('backgroundHeight'))\n",
" self.backgroundOpacity = addon.getSetting('backgroundOpacity')\n",
" self.chatX = int(addon.getSetting('chatX'))\n",
" self.chatY = int(addon.getSetting('chatY'))\n",
" self.chatWidth = int(addon.getSetting('chatWidth'))\n",
" self.chatHeight = int(addon.getSetting('chatHeight'))\n",
" self.characters = int(addon.getSetting('characters'))\n",
" self.delay = int(addon.getSetting('delay')) * 1000\n",
"\n",
"\n",
"def drawUI():\n",
" playbackController = PlaybackController()\n",
" #try:\n",
" count = 0\n",
" while(count < 25):\n",
" xbmc.sleep(500)\n",
" playbackController.now()\n",
" count += 1\n",
" #except: d.dialog('exception')\n",
" playbackController.kill()\n",
"\n",
"def rechatServiceTest():\n",
" twitchAPI = twitch.CachedAPI()\n",
"# streamInfo = twitchAPI.getStreamInfo(videoId='a611375915')\n",
"# d.dialog(streamInfo.streamId)\n",
"# d.dialog(streamInfo.recordedAt)\n",
"# d.dialog(streamInfo.recordedAtMs)\n",
"# streamInfo = twitchAPI.getStreamInfo(streamId='v3832313')\n",
"# streamInfo = twitchAPI.getStreamInfo(streamId='v3676602') # checks findLoose on start\n",
" streamInfo = twitchAPI.getStreamInfo(streamId='v3800416')\n",
"# d.dialog(streamInfo.streamId)\n",
"# d.dialog(streamInfo.recordedAt)\n",
"# d.dialog(streamInfo.recordedAtMs)\n",
"# rechatService = rechat.Service('v3800416', 1424183641000)\n",
"# rechatService = rechat.Service(streamInfo)\n",
"# rMessages = rechatService.next()\n",
"# d.dialog(rMessages[-1])\n",
"# d.dialog(rechatService.next()[-1])\n",
"# d.dialog(rechatService.afterMs(1424183641000)[-1])\n",
"# d.dialog(rechatService.next()[-1])\n",
" cachedRechatService = rechat.CachedService(streamInfo)\n",
" cRMessages = cachedRechatService.next()\n",
" d.dialog(cRMessages[-1])\n",
" cRMessages = cachedRechatService.next()\n",
" d.dialog(cRMessages[-1])\n",
" cRMessages = cachedRechatService.afterMs(1424183641000)\n",
" d.dialog(cRMessages[-1])\n",
"\n",
"#drawUI()\n",
"rechatServiceTest()\n",
"\n",
"\n"
] | [
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0.047619047619047616,
0.011494252873563218,
0.009345794392523364,
0,
0,
0,
0.010309278350515464,
0,
0.006097560975609756,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0.020618556701030927,
0,
0,
0.038461538461538464,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011235955056179775,
0,
0,
0,
0,
0.007633587786259542,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09523809523809523,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0.05263157894736842,
0,
0.030303030303030304,
0,
0.007633587786259542,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.047619047619047616,
0,
0.041666666666666664,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.023529411764705882,
0,
0.045454545454545456,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.05,
0,
1
] | 208 | 0.011499 | false |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer (sphere@dersphere.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import re
from datetime import date
from urllib import quote
from urllib2 import urlopen, Request, HTTPError, URLError
API_URL = 'http://app.4players.de/services/app/data.php'
USER_AGENT = 'VuBox4PlayersApi'
SYSTEMS = (
'360', 'PC-CDROM', 'iPhone', 'iPad', 'Android', '3DS', 'NDS', 'Wii_U',
'PlayStation3', 'PlayStation4', 'PSP', 'PS_Vita', 'Spielkultur',
'WindowsPhone7', 'XBox', 'Wii', 'PlayStation2',
)
class NetworkError(Exception):
pass
class VuBox4PlayersApi(object):
LIMIT = 50
def __init__(self):
self._game_infos = {}
self._systems = []
pass
def set_systems(self, system_list):
self._systems = system_list
def get_latest_videos(self, limit=LIMIT, older_than=0):
params = (
0, # video_id
limit, # limit
0, # newer_than
older_than, # older_than
0, # reviews_only
self.systems, # system filter
1, # include spielinfo
)
videos = self.__api_call('getVideos', *params)['Video']
return self.__format_videos(videos)
def get_latest_reviews(self, limit=LIMIT, older_than=0):
params = (
0, # video_id
limit, # limit
0, # newer_than
older_than, # older_than
1, # reviews_only
self.systems, # system filter
1, # include spielinfo
)
videos = self.__api_call('getVideos', *params)['Video']
return self.__format_videos(videos)
def get_popular_videos(self, limit=LIMIT, page=1):
offset = int(limit) * (int(page) - 1)
params = (
limit, # limit
offset, # offset
self.systems, # system filter
1, # include spielinfo
)
videos = self.__api_call('getVideosByViews', *params)['Video']
return self.__format_videos(videos)
def get_videos_by_game(self, game_id, limit=LIMIT, older_than=0):
params = (
game_id, # game_id
limit, # limit
0, # newer_than
older_than, # older_than
)
videos = self.__api_call('getVideosBySpiel', *params)['Video']
return self.__format_videos(videos)
def get_games(self, search_string, limit=LIMIT):
params = (
search_string, # search_string
limit # limit
)
games = self.__api_call('getSpieleBySuchbegriff', *params)['GameInfo']
return self.__format_games(games)
def _get_game_info(self, game_id):
params = (
game_id, # game_id
0, # newer than
)
return self.__api_call('getSpielinfo', *params)['GameInfo']
def __format_videos(self, raw_videos):
videos = [{
'id': video['id'],
'video_title': video['beschreibung'],
'rating': video['rating'],
'ts': video['datum'],
'date': self.__format_date(video['datum']),
'duration': self.__format_duration(video['laufzeit']),
'thumb': self.__format_thumb(video['thumb']),
'game': self.__format_game(video['spielinfo']),
'streams': {
'normal': {
'url': video['url'],
'size': video['filesize']
},
'hq': {
'url': video['urlHQ'],
'size': video['filesizeHQ']
}
}
} for video in raw_videos]
return videos
def __format_games(self, raw_games):
games = [{
'id': game['id'],
'title': game['name'],
'thumb': game['systeme'][0]['cover_big'],
'genre': game['subgenre'],
'studio': game['hersteller']
} for game in raw_games]
return games
def __format_game(self, game_info):
if not isinstance(game_info, list):
game_id = game_info['id']
if game_id in self._game_infos:
game_info = self._game_infos[game_id]
else:
self._game_infos[game_id] = self._get_game_info(game_id)
game_info = self._game_infos[game_id]
else:
self._game_infos[game_info[0]['id']] = game_info
game = {
'id': game_info[0]['id'],
'title': game_info[0]['name'],
'genre': game_info[0]['subgenre'],
'studio': game_info[0]['hersteller'],
}
return game
@property
def systems(self):
if self._systems and not self._systems == SYSTEMS:
return ','.join((s for s in self._systems if s in SYSTEMS))
else:
return 0
@staticmethod
def __format_thumb(url):
return url.replace('-thumb160x90.jpg', '-screenshot.jpg')
@staticmethod
def __format_date(timestamp):
f = '%d.%m.%Y'
return date.fromtimestamp(timestamp).strftime(f)
@staticmethod
def __format_duration(duration_str):
if ':' in duration_str:
time = re.search('(\d+):.*?(\d+)', duration_str)
if time:
sec = int(int(time.group(1)) * 60 + int(time.group(2)))
else:
sec = 0
else:
sec = 0
return sec
@staticmethod
def __api_call(method, *params):
parts = [API_URL, method] + [quote(str(p)) for p in params]
url = '/'.join(parts)
req = Request(url)
req.add_header('User Agent', USER_AGENT)
log('Opening URL: %s' % url)
try:
response = urlopen(req).read()
except HTTPError, error:
raise NetworkError('HTTPError: %s' % error)
except URLError, error:
raise NetworkError('URLError: %s' % error)
log('got %d bytes' % len(response))
json_data = json.loads(response)
return json_data
def log(msg):
print '[VuBox4PlayersApi]: %s' % msg | [
"# -*- coding: utf-8 -*-\n",
"#\n",
"# Copyright (C) 2013 Tristan Fischer (sphere@dersphere.de)\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"#\n",
"\n",
"import json\n",
"import re\n",
"from datetime import date\n",
"from urllib import quote\n",
"from urllib2 import urlopen, Request, HTTPError, URLError\n",
"\n",
"API_URL = 'http://app.4players.de/services/app/data.php'\n",
"USER_AGENT = 'VuBox4PlayersApi'\n",
"\n",
"SYSTEMS = (\n",
" '360', 'PC-CDROM', 'iPhone', 'iPad', 'Android', '3DS', 'NDS', 'Wii_U',\n",
" 'PlayStation3', 'PlayStation4', 'PSP', 'PS_Vita', 'Spielkultur',\n",
" 'WindowsPhone7', 'XBox', 'Wii', 'PlayStation2',\n",
")\n",
"\n",
"class NetworkError(Exception):\n",
" pass\n",
"\n",
"class VuBox4PlayersApi(object):\n",
"\n",
" LIMIT = 50\n",
"\n",
" def __init__(self):\n",
" self._game_infos = {}\n",
" self._systems = []\n",
" pass\n",
"\n",
" def set_systems(self, system_list):\n",
" self._systems = system_list\n",
"\n",
" def get_latest_videos(self, limit=LIMIT, older_than=0):\n",
" params = (\n",
" 0, # video_id\n",
" limit, # limit\n",
" 0, # newer_than\n",
" older_than, # older_than\n",
" 0, # reviews_only\n",
" self.systems, # system filter\n",
" 1, # include spielinfo\n",
" )\n",
" videos = self.__api_call('getVideos', *params)['Video']\n",
" return self.__format_videos(videos)\n",
"\n",
" def get_latest_reviews(self, limit=LIMIT, older_than=0):\n",
" params = (\n",
" 0, # video_id\n",
" limit, # limit\n",
" 0, # newer_than\n",
" older_than, # older_than\n",
" 1, # reviews_only\n",
" self.systems, # system filter\n",
" 1, # include spielinfo\n",
" )\n",
" videos = self.__api_call('getVideos', *params)['Video']\n",
" return self.__format_videos(videos)\n",
"\n",
" def get_popular_videos(self, limit=LIMIT, page=1):\n",
" offset = int(limit) * (int(page) - 1)\n",
" params = (\n",
" limit, # limit\n",
" offset, # offset\n",
" self.systems, # system filter\n",
" 1, # include spielinfo\n",
" )\n",
" videos = self.__api_call('getVideosByViews', *params)['Video']\n",
" return self.__format_videos(videos)\n",
"\n",
" def get_videos_by_game(self, game_id, limit=LIMIT, older_than=0):\n",
" params = (\n",
" game_id, # game_id\n",
" limit, # limit\n",
" 0, # newer_than\n",
" older_than, # older_than\n",
" )\n",
" videos = self.__api_call('getVideosBySpiel', *params)['Video']\n",
" return self.__format_videos(videos)\n",
"\n",
" def get_games(self, search_string, limit=LIMIT):\n",
" params = (\n",
" search_string, # search_string\n",
" limit # limit\n",
" )\n",
" games = self.__api_call('getSpieleBySuchbegriff', *params)['GameInfo']\n",
" return self.__format_games(games)\n",
"\n",
" def _get_game_info(self, game_id):\n",
" params = (\n",
" game_id, # game_id\n",
" 0, # newer than\n",
" )\n",
" return self.__api_call('getSpielinfo', *params)['GameInfo']\n",
"\n",
" def __format_videos(self, raw_videos):\n",
" videos = [{\n",
" 'id': video['id'],\n",
" 'video_title': video['beschreibung'],\n",
" 'rating': video['rating'],\n",
" 'ts': video['datum'],\n",
" 'date': self.__format_date(video['datum']),\n",
" 'duration': self.__format_duration(video['laufzeit']),\n",
" 'thumb': self.__format_thumb(video['thumb']),\n",
" 'game': self.__format_game(video['spielinfo']),\n",
" 'streams': {\n",
" 'normal': {\n",
" 'url': video['url'],\n",
" 'size': video['filesize']\n",
" },\n",
" 'hq': {\n",
" 'url': video['urlHQ'],\n",
" 'size': video['filesizeHQ']\n",
" }\n",
" }\n",
" } for video in raw_videos]\n",
" return videos\n",
"\n",
" def __format_games(self, raw_games):\n",
" games = [{\n",
" 'id': game['id'],\n",
" 'title': game['name'],\n",
" 'thumb': game['systeme'][0]['cover_big'],\n",
" 'genre': game['subgenre'],\n",
" 'studio': game['hersteller']\n",
" } for game in raw_games]\n",
" return games\n",
"\n",
" def __format_game(self, game_info):\n",
" if not isinstance(game_info, list):\n",
" game_id = game_info['id']\n",
" if game_id in self._game_infos:\n",
" game_info = self._game_infos[game_id]\n",
" else:\n",
" self._game_infos[game_id] = self._get_game_info(game_id)\n",
" game_info = self._game_infos[game_id]\n",
" else:\n",
" self._game_infos[game_info[0]['id']] = game_info\n",
" game = {\n",
" 'id': game_info[0]['id'],\n",
" 'title': game_info[0]['name'],\n",
" 'genre': game_info[0]['subgenre'],\n",
" 'studio': game_info[0]['hersteller'],\n",
" }\n",
" return game\n",
"\n",
" @property\n",
" def systems(self):\n",
" if self._systems and not self._systems == SYSTEMS:\n",
" return ','.join((s for s in self._systems if s in SYSTEMS))\n",
" else:\n",
" return 0\n",
"\n",
" @staticmethod\n",
" def __format_thumb(url):\n",
" return url.replace('-thumb160x90.jpg', '-screenshot.jpg')\n",
"\n",
" @staticmethod\n",
" def __format_date(timestamp):\n",
" f = '%d.%m.%Y'\n",
" return date.fromtimestamp(timestamp).strftime(f)\n",
"\n",
" @staticmethod\n",
" def __format_duration(duration_str):\n",
" if ':' in duration_str:\n",
" time = re.search('(\\d+):.*?(\\d+)', duration_str)\n",
" if time:\n",
" sec = int(int(time.group(1)) * 60 + int(time.group(2)))\n",
" else:\n",
" sec = 0\n",
" else:\n",
" sec = 0\n",
" return sec\n",
"\n",
" @staticmethod\n",
" def __api_call(method, *params):\n",
" parts = [API_URL, method] + [quote(str(p)) for p in params]\n",
" url = '/'.join(parts)\n",
" req = Request(url)\n",
" req.add_header('User Agent', USER_AGENT)\n",
" log('Opening URL: %s' % url)\n",
" try:\n",
" response = urlopen(req).read()\n",
" except HTTPError, error:\n",
" raise NetworkError('HTTPError: %s' % error)\n",
" except URLError, error:\n",
" raise NetworkError('URLError: %s' % error)\n",
" log('got %d bytes' % len(response))\n",
" json_data = json.loads(response)\n",
" return json_data\n",
"\n",
"def log(msg):\n",
" print '[VuBox4PlayersApi]: %s' % msg"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03278688524590164,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0.025
] | 208 | 0.000927 | false |
"""
xml2json.py
Kailash Nadh, http://nadh.in
December 2012
License: MIT License
Documentation: http://nadh.in/code/xmlutils.py
"""
import codecs
import xml.etree.ElementTree as et
import json
class xml2json:
def __init__(self, input_file, output_file = None, encoding='utf-8'):
"""Initialize the class with the paths to the input xml file
and the output json file
Keyword arguments:
input_file -- input xml filename
output_file -- output json filename
encoding -- character encoding
"""
# open the xml file for iteration
self.context = et.iterparse(input_file, events=("start", "end"))
self.output_file = output_file
self.encoding = encoding
def get_json(self, pretty=True):
"""
Convert an XML file to json string (Tested with python 2.7.8 on Windows 7)
Keyword arguments:
pretty -- pretty print json (default=True)
"""
iterator = iter(self.context)
try:
while True:
event, root = iterator.next()
except StopIteration:
print("Event StopIteration found, done!")
finally:
return self._elem2json(root, pretty)
def convert(self, pretty=True):
"""
Convert xml file to a json file
Keyword arguments:
pretty -- pretty print json (default=True)
"""
json = self.get_json(pretty)
# output file handle
try:
output = codecs.open(self.output_file, "w", encoding=self.encoding)
except:
print("Failed to open the output file")
raise
output.write(json)
output.close()
def _elem2list(self, elem):
"""Convert an ElementTree element to a list"""
block = {}
# get the element's children
children = elem.getchildren()
if children:
cur = map(self._elem2list, children)
# create meaningful lists
scalar = False
try:
if elem[0].tag != elem[1].tag: # [{a: 1}, {b: 2}, {c: 3}] => {a: 1, b: 2, c: 3}
cur = dict(zip(
map(lambda e: e.keys()[0], cur),
map(lambda e: e.values()[0], cur)
))
else:
scalar = True
except Exception as e: # [{a: 1}, {a: 2}, {a: 3}] => {a: [1, 2, 3]}
scalar = True
if scalar:
if len(cur) > 1:
cur = {elem[0].tag: [e.values()[0] for e in cur if e.values()[0] is not None]}
else:
cur = {elem[0].tag: cur[0].values()[0] }
block[elem.tag] = cur
else:
val = None
if elem.text:
val = elem.text.strip()
val = val if len(val) > 0 else None
elif elem.attrib:
val = elem.attrib
val = val if len(val) > 0 else None
block[elem.tag] = val
return block
def _elem2json(self, elem, pretty=True):
"""
Convert an ElementTree Element (root) to json
"""
# if the given Element is not the root element, find it
if hasattr(elem, 'getroot'):
elem = elem.getroot()
return json.dumps(self._elem2list(elem), indent=(4 if pretty else None))
| [
"\"\"\"\n",
"\txml2json.py\n",
"\tKailash Nadh, http://nadh.in\n",
"\tDecember 2012\n",
"\t\n",
"\tLicense: MIT License\n",
"\tDocumentation: http://nadh.in/code/xmlutils.py\n",
"\"\"\"\n",
"\n",
"import codecs\n",
"import xml.etree.ElementTree as et\n",
"import json\n",
"\n",
"class xml2json:\n",
"\n",
"\tdef __init__(self, input_file, output_file = None, encoding='utf-8'):\n",
"\t\t\"\"\"Initialize the class with the paths to the input xml file\n",
"\t\tand the output json file\n",
"\n",
"\t\tKeyword arguments:\n",
"\t\tinput_file -- input xml filename\n",
"\t\toutput_file -- output json filename\n",
"\t\tencoding -- character encoding\n",
"\t\t\"\"\"\n",
"\n",
"\t\t# open the xml file for iteration\n",
"\t\tself.context = et.iterparse(input_file, events=(\"start\", \"end\"))\n",
"\t\tself.output_file = output_file\n",
"\t\tself.encoding = encoding\n",
"\n",
"\tdef get_json(self, pretty=True):\n",
"\t\t\"\"\"\n",
"\t\t\tConvert an XML file to json string (Tested with python 2.7.8 on Windows 7)\n",
"\n",
"\t\t \tKeyword arguments:\n",
"\t\t\tpretty -- pretty print json (default=True)\n",
"\t\t\"\"\"\n",
"\n",
"\t iterator = iter(self.context)\n",
"\n",
"\t try:\n",
"\t while True:\n",
"\t event, root = iterator.next()\n",
"\t except StopIteration:\n",
"\t print(\"Event StopIteration found, done!\")\n",
"\t finally:\n",
"\t return self._elem2json(root, pretty)\n",
"\n",
"\tdef convert(self, pretty=True):\n",
"\t\t\"\"\"\n",
"\t\t\tConvert xml file to a json file\n",
"\n",
"\t\t \tKeyword arguments:\n",
"\t\t\tpretty -- pretty print json (default=True)\n",
"\t\t\"\"\"\n",
"\n",
"\t\tjson = self.get_json(pretty)\n",
"\n",
"\t\t# output file handle\n",
"\t\ttry:\n",
"\t\t\toutput = codecs.open(self.output_file, \"w\", encoding=self.encoding)\n",
"\t\texcept:\n",
"\t\t\tprint(\"Failed to open the output file\")\n",
"\t\t\traise\n",
"\n",
"\t\toutput.write(json)\n",
"\t\toutput.close()\n",
"\n",
"\n",
"\tdef _elem2list(self, elem):\n",
"\t\t\"\"\"Convert an ElementTree element to a list\"\"\"\n",
"\n",
"\t\tblock = {}\n",
"\n",
"\t\t# get the element's children\n",
"\t\tchildren = elem.getchildren()\n",
"\n",
"\t\tif children:\n",
"\t\t\tcur = map(self._elem2list, children)\n",
"\n",
"\t\t\t# create meaningful lists\n",
"\t\t\tscalar = False\n",
"\t\t\ttry:\n",
"\t\t\t\tif elem[0].tag != elem[1].tag: # [{a: 1}, {b: 2}, {c: 3}] => {a: 1, b: 2, c: 3}\n",
"\t\t\t\t\tcur = dict(zip(\n",
"\t\t\t\t\t\tmap(lambda e: e.keys()[0], cur),\n",
"\t\t\t\t\t\tmap(lambda e: e.values()[0], cur)\n",
"\t\t\t\t\t))\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tscalar = True\n",
"\t\t\texcept Exception as e: # [{a: 1}, {a: 2}, {a: 3}] => {a: [1, 2, 3]}\n",
"\t\t\t\tscalar = True\n",
"\n",
"\t\t\tif scalar:\n",
"\t\t\t\tif len(cur) > 1:\n",
"\t\t\t\t\tcur = {elem[0].tag: [e.values()[0] for e in cur if e.values()[0] is not None]}\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tcur = {elem[0].tag: cur[0].values()[0] }\n",
"\n",
"\t\t\tblock[elem.tag] = cur\n",
"\t\telse:\n",
"\t\t\tval = None\n",
"\t\t\tif elem.text:\n",
"\t\t\t\tval = elem.text.strip()\n",
"\t\t\t\tval = val if len(val) > 0 else None\n",
"\t\t\telif elem.attrib:\n",
"\t\t\t\tval = elem.attrib\n",
"\t\t\t\tval = val if len(val) > 0 else None\n",
"\n",
"\t\t\tblock[elem.tag] = val \n",
"\t\t\n",
"\t\treturn block\n",
"\n",
"\n",
"\tdef _elem2json(self, elem, pretty=True):\n",
"\t\t\"\"\"\n",
"\t\tConvert an ElementTree Element (root) to json\n",
"\t\t\"\"\"\n",
"\t\t# if the given Element is not the root element, find it\n",
"\t\tif hasattr(elem, 'getroot'):\n",
"\t\t\telem = elem.getroot()\n",
"\n",
"\t\treturn json.dumps(self._elem2list(elem), indent=(4 if pretty else None))\n"
] | [
0,
0.07692307692307693,
0.03333333333333333,
0.06666666666666667,
1,
0.034482758620689655,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0.04225352112676056,
0.015873015873015872,
0.037037037037037035,
0,
0.047619047619047616,
0.02857142857142857,
0.02631578947368421,
0.030303030303030304,
0.16666666666666666,
0,
0.027777777777777776,
0.014925373134328358,
0.030303030303030304,
0.037037037037037035,
0,
0.029411764705882353,
0.16666666666666666,
0.01282051282051282,
0,
0.08695652173913043,
0.021739130434782608,
0.16666666666666666,
0,
0.05128205128205128,
0,
0.14285714285714285,
0.08,
0.0425531914893617,
0.06451612903225806,
0.03636363636363636,
0.1111111111111111,
0.04,
0,
0.030303030303030304,
0.16666666666666666,
0.02857142857142857,
0,
0.08695652173913043,
0.021739130434782608,
0.16666666666666666,
0,
0.03225806451612903,
0,
0.043478260869565216,
0.14285714285714285,
0.014084507042253521,
0.2,
0.023255813953488372,
0.1111111111111111,
0,
0.047619047619047616,
0.058823529411764705,
0,
0,
0.06896551724137931,
0.02040816326530612,
0,
0.07692307692307693,
0,
0.03225806451612903,
0.03125,
0,
0.06666666666666667,
0.025,
0,
0.034482758620689655,
0.05555555555555555,
0.125,
0.023529411764705882,
0.047619047619047616,
0.02564102564102564,
0.025,
0.125,
0.1,
0.05263157894736842,
0.013888888888888888,
0.05555555555555555,
0,
0.07142857142857142,
0.047619047619047616,
0.023809523809523808,
0.1,
0.043478260869565216,
0,
0.04,
0.125,
0.07142857142857142,
0.058823529411764705,
0.03571428571428571,
0.025,
0.047619047619047616,
0.045454545454545456,
0.025,
0,
0.07692307692307693,
0.6666666666666666,
0.06666666666666667,
0,
0,
0.047619047619047616,
0.16666666666666666,
0.020833333333333332,
0.16666666666666666,
0.017241379310344827,
0.03225806451612903,
0.04,
0,
0.013333333333333334
] | 123 | 0.057774 | false |
print(""".Language=English,English
.Options CtrlColorChar=\\
@Contents=RGB
@RGB""")
x_step = 16
y_step = 16
def print_color(r, g, b):
print(r"\(T0:T{:02X}{:02X}{:02X})".format(r, g, b))
def line(color, light):
def out(r, g, b):
def with_light(x):
return max(0, min(int(x + light), 255))
print_color(
with_light(r),
with_light(g),
with_light(b)
)
print(" ", end='')
r = (color & 0xFF0000) >> 16
g = (color & 0x00FF00) >> 8
b = color & 0x0000FF
out(128, 128, 128)
out(r, g, b)
limit = int(256 / x_step)
for i in range(0, limit):
g += x_step
out(r, g, b)
for i in range(0, limit):
r -= x_step
out(r, g, b)
for i in range(0, limit):
b += x_step
out(r, g, b)
for i in range(0, limit):
g -= x_step
out(r, g, b)
for i in range(0, limit):
r += x_step
out(r, g, b)
for i in range(0, limit):
b -= x_step
out(r, g, b)
print(r"\-")
def lines():
color = 0xff0000
line(color, 0)
limit = int(256 / y_step)
for i in range(limit, 0, -1):
line(color, i * y_step)
for i in range(0, -limit, -1):
line(color, i * y_step)
if __name__ == "__main__":
lines()
| [
"print(\"\"\".Language=English,English\n",
".Options CtrlColorChar=\\\\\n",
"\n",
"@Contents=RGB\n",
"@RGB\"\"\")\n",
"\n",
"x_step = 16\n",
"y_step = 16\n",
"\n",
"def print_color(r, g, b):\n",
"\tprint(r\"\\(T0:T{:02X}{:02X}{:02X})\".format(r, g, b))\n",
"\n",
"def line(color, light):\n",
"\tdef out(r, g, b):\n",
"\t\tdef with_light(x):\n",
"\t\t\treturn max(0, min(int(x + light), 255))\n",
"\n",
"\t\tprint_color(\n",
"\t\t\twith_light(r),\n",
"\t\t\twith_light(g),\n",
"\t\t\twith_light(b)\n",
"\t\t)\n",
"\n",
"\tprint(\" \", end='')\n",
"\n",
"\tr = (color & 0xFF0000) >> 16\n",
"\tg = (color & 0x00FF00) >> 8\n",
"\tb = color & 0x0000FF\n",
"\n",
"\tout(128, 128, 128)\n",
"\tout(r, g, b)\n",
"\n",
"\tlimit = int(256 / x_step)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tg += x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tr -= x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tb += x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tg -= x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tr += x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tfor i in range(0, limit):\n",
"\t\tb -= x_step\n",
"\t\tout(r, g, b)\n",
"\n",
"\tprint(r\"\\-\")\n",
"\n",
"def lines():\n",
"\tcolor = 0xff0000\n",
"\n",
"\tline(color, 0)\n",
"\n",
"\tlimit = int(256 / y_step)\n",
"\n",
"\tfor i in range(limit, 0, -1):\n",
"\t\tline(color, i * y_step)\n",
"\n",
"\tfor i in range(0, -limit, -1):\n",
"\t\tline(color, i * y_step)\n",
"\n",
"if __name__ == \"__main__\":\n",
"\tlines()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0.018867924528301886,
0,
0.041666666666666664,
0.05263157894736842,
0.047619047619047616,
0.023255813953488372,
0,
0.06666666666666667,
0.05555555555555555,
0.05555555555555555,
0.058823529411764705,
0.25,
0,
0.05,
0,
0.03333333333333333,
0.034482758620689655,
0.045454545454545456,
0,
0.05,
0.07142857142857142,
0,
0.037037037037037035,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.037037037037037035,
0.07142857142857142,
0.06666666666666667,
0,
0.07142857142857142,
0,
0.07692307692307693,
0.05555555555555555,
0,
0.0625,
0,
0.037037037037037035,
0,
0.03225806451612903,
0.038461538461538464,
0,
0.03125,
0.038461538461538464,
0,
0.037037037037037035,
0.1111111111111111
] | 75 | 0.035649 | false |
import datetime
import aiohttp
import discord
from config import WarGamingAppID
async def wows(cmd, message, args):
q = ' '.join(args).lower()
game_region, game_username = q.split(maxsplit=1)
if game_region == 'na':
game_region = 'com'
try:
url_base = 'https://api.worldofwarships.' + game_region + '/wows/account/list/?application_id=' + WarGamingAppID + '&search=' + game_username
async with aiohttp.ClientSession() as session:
async with session.get(url_base) as data:
initial_data = await data.json()
except:
await message.channel.send('`' + game_region + '` is not a valid region.')
return
try:
if initial_data['status'].lower() == 'ok':
pass
else:
return
except Exception as e:
cmd.log.error(e)
return
try:
game_nickname = initial_data['data'][0]['nickname']
except:
await message.channel.send('User `' + game_username + '` not found.')
return
account_id = initial_data['data'][0]['account_id']
url_second = 'https://api.worldofwarships.' + game_region + '/wows/account/info/?application_id=' + WarGamingAppID + '&account_id=' + str(
account_id)
async with aiohttp.ClientSession() as session:
async with session.get(url_second) as data:
main_data = await data.json()
try:
if main_data['status'].lower() == 'ok':
pass
else:
return
except Exception as e:
cmd.log.error(e)
return
data = main_data['data'][str(account_id)]
last_battle = data['last_battle_time']
last_battle_conv = datetime.datetime.fromtimestamp(last_battle).strftime('%B %d, %Y %H:%M')
leveling_tier = data['leveling_tier']
join_date = data['created_at']
join_date_conv = datetime.datetime.fromtimestamp(join_date).strftime('%B %d, %Y %H:%M')
stats = data['statistics']
distance = stats['distance']
battle_count = stats['battles']
pvp_stats = stats['pvp']
max_xp = pvp_stats['max_xp']
max_spotted_dmg = pvp_stats['max_damage_scouting']
main_battery = pvp_stats['main_battery']
max_frags = main_battery['max_frags_battle']
frags = main_battery['frags']
hits = main_battery['hits']
max_frags_ship_id = main_battery['max_frags_ship_id']
shots = main_battery['shots']
max_frags_ship_url = 'https://api.worldofwarships.' + game_region + '/wows/encyclopedia/ships/?application_id=' + WarGamingAppID + '&ship_id=' + str(
max_frags_ship_id)
async with aiohttp.ClientSession() as session:
async with session.get(max_frags_ship_url) as data:
max_frags_ship_data = await data.json()
if max_frags_ship_id is not None:
max_frags_ship_name = max_frags_ship_data['data'][str(max_frags_ship_id)]['name']
max_frags_ship_tier = max_frags_ship_data['data'][str(max_frags_ship_id)]['tier']
else:
max_frags_ship_name = 'None'
max_frags_ship_tier = '0'
# Divider for clarity
embed = discord.Embed(color=0x1abc9c)
embed.add_field(name='Nickname', value='```python\n' + game_nickname + '\n```')
embed.add_field(name='Level', value='```python\n' + str(leveling_tier) + '\n```')
embed.add_field(name='Join Date', value='```python\n' + join_date_conv + '\n```', inline=False)
embed.add_field(name='Distance', value='```python\n' + str(distance) + ' KM' + '\n```')
embed.add_field(name='Battles', value='```python\n' + str(battle_count) + '\n```')
embed.add_field(name='Max XP From a Battle', value='```python\n' + str(max_xp) + '\n```')
embed.add_field(name='Max Spotted Damange', value='```python\n' + str(max_spotted_dmg) + '\n```')
embed.add_field(name='Max Kills In a Battle', value='```python\n' + str(max_frags) + '\n```')
embed.add_field(name='Total Kills', value='```python\n' + str(frags) + '\n```')
embed.add_field(name='Ship With Most Kills',
value='```python\n' + max_frags_ship_name + ' (Tier ' + str(max_frags_ship_tier) + ')' + '\n```')
embed.add_field(name='Total Shots', value='```python\n' + str(shots) + '\n```')
embed.add_field(name='Total Hits', value='```python\n' + str(hits) + '\n```')
embed.add_field(name='Last Battle', value='```python\n' + last_battle_conv + '\n```')
# Divider for clarity
await message.channel.send(None, embed=embed)
| [
"import datetime\n",
"import aiohttp\n",
"import discord\n",
"from config import WarGamingAppID\n",
"\n",
"\n",
"async def wows(cmd, message, args):\n",
" q = ' '.join(args).lower()\n",
" game_region, game_username = q.split(maxsplit=1)\n",
" if game_region == 'na':\n",
" game_region = 'com'\n",
" try:\n",
" url_base = 'https://api.worldofwarships.' + game_region + '/wows/account/list/?application_id=' + WarGamingAppID + '&search=' + game_username\n",
" async with aiohttp.ClientSession() as session:\n",
" async with session.get(url_base) as data:\n",
" initial_data = await data.json()\n",
" except:\n",
" await message.channel.send('`' + game_region + '` is not a valid region.')\n",
" return\n",
" try:\n",
" if initial_data['status'].lower() == 'ok':\n",
" pass\n",
" else:\n",
" return\n",
" except Exception as e:\n",
" cmd.log.error(e)\n",
" return\n",
" try:\n",
" game_nickname = initial_data['data'][0]['nickname']\n",
" except:\n",
" await message.channel.send('User `' + game_username + '` not found.')\n",
" return\n",
" account_id = initial_data['data'][0]['account_id']\n",
" url_second = 'https://api.worldofwarships.' + game_region + '/wows/account/info/?application_id=' + WarGamingAppID + '&account_id=' + str(\n",
" account_id)\n",
" async with aiohttp.ClientSession() as session:\n",
" async with session.get(url_second) as data:\n",
" main_data = await data.json()\n",
" try:\n",
" if main_data['status'].lower() == 'ok':\n",
" pass\n",
" else:\n",
" return\n",
" except Exception as e:\n",
" cmd.log.error(e)\n",
" return\n",
" data = main_data['data'][str(account_id)]\n",
" last_battle = data['last_battle_time']\n",
" last_battle_conv = datetime.datetime.fromtimestamp(last_battle).strftime('%B %d, %Y %H:%M')\n",
" leveling_tier = data['leveling_tier']\n",
" join_date = data['created_at']\n",
" join_date_conv = datetime.datetime.fromtimestamp(join_date).strftime('%B %d, %Y %H:%M')\n",
"\n",
" stats = data['statistics']\n",
" distance = stats['distance']\n",
" battle_count = stats['battles']\n",
"\n",
" pvp_stats = stats['pvp']\n",
" max_xp = pvp_stats['max_xp']\n",
" max_spotted_dmg = pvp_stats['max_damage_scouting']\n",
"\n",
" main_battery = pvp_stats['main_battery']\n",
" max_frags = main_battery['max_frags_battle']\n",
" frags = main_battery['frags']\n",
" hits = main_battery['hits']\n",
" max_frags_ship_id = main_battery['max_frags_ship_id']\n",
" shots = main_battery['shots']\n",
"\n",
" max_frags_ship_url = 'https://api.worldofwarships.' + game_region + '/wows/encyclopedia/ships/?application_id=' + WarGamingAppID + '&ship_id=' + str(\n",
" max_frags_ship_id)\n",
" async with aiohttp.ClientSession() as session:\n",
" async with session.get(max_frags_ship_url) as data:\n",
" max_frags_ship_data = await data.json()\n",
"\n",
" if max_frags_ship_id is not None:\n",
" max_frags_ship_name = max_frags_ship_data['data'][str(max_frags_ship_id)]['name']\n",
" max_frags_ship_tier = max_frags_ship_data['data'][str(max_frags_ship_id)]['tier']\n",
" else:\n",
" max_frags_ship_name = 'None'\n",
" max_frags_ship_tier = '0'\n",
"\n",
" # Divider for clarity\n",
"\n",
" embed = discord.Embed(color=0x1abc9c)\n",
" embed.add_field(name='Nickname', value='```python\\n' + game_nickname + '\\n```')\n",
" embed.add_field(name='Level', value='```python\\n' + str(leveling_tier) + '\\n```')\n",
" embed.add_field(name='Join Date', value='```python\\n' + join_date_conv + '\\n```', inline=False)\n",
" embed.add_field(name='Distance', value='```python\\n' + str(distance) + ' KM' + '\\n```')\n",
" embed.add_field(name='Battles', value='```python\\n' + str(battle_count) + '\\n```')\n",
" embed.add_field(name='Max XP From a Battle', value='```python\\n' + str(max_xp) + '\\n```')\n",
" embed.add_field(name='Max Spotted Damange', value='```python\\n' + str(max_spotted_dmg) + '\\n```')\n",
" embed.add_field(name='Max Kills In a Battle', value='```python\\n' + str(max_frags) + '\\n```')\n",
" embed.add_field(name='Total Kills', value='```python\\n' + str(frags) + '\\n```')\n",
" embed.add_field(name='Ship With Most Kills',\n",
" value='```python\\n' + max_frags_ship_name + ' (Tier ' + str(max_frags_ship_tier) + ')' + '\\n```')\n",
" embed.add_field(name='Total Shots', value='```python\\n' + str(shots) + '\\n```')\n",
" embed.add_field(name='Total Hits', value='```python\\n' + str(hits) + '\\n```')\n",
" embed.add_field(name='Last Battle', value='```python\\n' + last_battle_conv + '\\n```')\n",
"\n",
" # Divider for clarity\n",
"\n",
" await message.channel.send(None, embed=embed)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.006666666666666667,
0,
0,
0,
0.08333333333333333,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.006993006993006993,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.006493506493506494,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011627906976744186,
0.01,
0.010869565217391304,
0.011494252873563218,
0.010638297872340425,
0.00980392156862745,
0.01020408163265306,
0.011904761904761904,
0,
0.00847457627118644,
0.011904761904761904,
0.012195121951219513,
0.011111111111111112,
0,
0,
0,
0
] | 102 | 0.00377 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import time
import uuid
from datetime import datetime
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
)
from azure.storage import (
Logging,
Metrics,
CorsRule,
)
from azure.storage.table import (
Entity,
TableBatch,
EdmType,
EntityProperty,
TablePayloadFormat,
)
class TableSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_table_service()
self.create_table()
self.delete_table()
self.exists()
self.query_entities()
self.batch()
self.create_entity_class()
self.create_entity_dict()
self.insert_entity()
self.get_entity()
self.update_entity()
self.merge_entity()
self.insert_or_merge_entity()
self.insert_or_replace_entity()
self.delete_entity()
self.list_tables()
# This method contains sleeps, so don't run by default
# self.service_properties()
def _get_table_reference(self, prefix='table'):
table_name = '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
return table_name
def _create_table(self, prefix='table'):
table_name = self._get_table_reference(prefix)
self.service.create_table(table_name)
return table_name
def create_table(self):
# Basic
table_name1 = self._get_table_reference()
created = self.service.create_table(table_name1) # True
# Fail on exist
table_name2 = self._get_table_reference()
created = self.service.create_table(table_name2) # True
created = self.service.create_table(table_name2) # False
try:
self.service.create_table(table_name2, fail_on_exist=True)
except AzureConflictHttpError:
pass
self.service.delete_table(table_name1)
self.service.delete_table(table_name2)
def delete_table(self):
# Basic
table_name = self._create_table()
deleted = self.service.delete_table(table_name) # True
# Fail not exist
table_name = self._get_table_reference()
deleted = self.service.delete_table(table_name) # False
try:
self.service.delete_table(table_name, fail_not_exist=True)
except AzureMissingResourceHttpError:
pass
def exists(self):
table_name = self._get_table_reference()
# Does not exist
exists = self.service.exists(table_name) # False
# Exists
self.service.create_table(table_name)
exists = self.service.exists(table_name) # True
self.service.delete_table(table_name)
def query_entities(self):
table_name = self._create_table()
entities = []
for i in range(1, 5):
entity = {'PartitionKey': 'John',
'RowKey': 'Doe the {}'.format(i),
'deceased': False,
'birthday': datetime(1991, 10, i)}
self.service.insert_entity(table_name, entity)
entities.append(entity)
# Basic
# Can access properties as dict or like an object
queried_entities = list(self.service.query_entities(table_name))
for entity in queried_entities:
print(entity.RowKey) # All 4 John Doe characters
# Num results
queried_entities = list(self.service.query_entities(table_name, num_results=2))
for entity in queried_entities:
print(entity.RowKey) # Doe the 1, Doe the 2
# Filter
filter = "RowKey eq '{}'".format(entities[1]['RowKey'])
queried_entities = list(self.service.query_entities(table_name, filter=filter))
for entity in queried_entities:
print(entity.RowKey) # Doe the 2
# Select
# Get only the column(s) specified
queried_entities = list(self.service.query_entities(table_name, select='birthday'))
for entity in queried_entities:
print(entity.birthday) # All 4 John Doe character's birthdays
queried_entities[0].get('RowKey') # None
# Accept
# Default contains all necessary type info. JSON_NO_METADATA returns no type info, though we can guess some client side.
# If type cannot be inferred, the value is simply returned as a string.
queried_entities = list(self.service.query_entities(table_name,
accept=TablePayloadFormat.JSON_NO_METADATA)) # entities w/ all properties, missing type
queried_entities[0].birthday # (string)
queried_entities[0].deceased # (boolean)
# Accept w/ Resolver
# A resolver can be specified to give type info client side if JSON_NO_METADATA is used.
def resolver(pk, rk, name, value, type):
if name == 'birthday':
return EdmType.DATETIME
queried_entities = list(self.service.query_entities(table_name,
accept=TablePayloadFormat.JSON_NO_METADATA,
property_resolver=resolver)) # entityentities w/ all properties, missing type resolved client side
queried_entities[0].birthday # (datetime)
queried_entities[0].deceased # (boolean)
self.service.delete_table(table_name)
def batch(self):
table_name = self._create_table()
entity = Entity()
entity.PartitionKey = 'batch'
entity.test = True
# All operations in the same batch must have the same partition key but different row keys
# Batches can hold from 1 to 100 entities
# Batches are atomic. All operations completed simulatenously. If one operation fails, they all fail.
# Insert, update, merge, insert or merge, insert or replace, and delete entity operations are supported
# Context manager style
with self.service.batch(table_name) as batch:
for i in range(0, 5):
entity.RowKey = 'context_{}'.format(i)
batch.insert_entity(entity)
# Commit style
batch = TableBatch()
for i in range(0, 5):
entity.RowKey = 'commit_{}'.format(i)
batch.insert_entity(entity)
self.service.commit_batch(table_name, batch)
self.service.delete_table(table_name)
def create_entity_class(self):
'''
Creates a class-based entity with fixed values, using all of the supported data types.
'''
entity = Entity()
# Partition key and row key must be strings and are required
entity.PartitionKey= 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity.age = 39 # EdmType.INT64
entity.large = 933311100 # EdmType.INT64
entity.sex = 'male' # EdmType.STRING
entity.married = True # EdmType.BOOLEAN
entity.ratio = 3.1 # EdmType.DOUBLE
entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity.binary = EntityProperty(EdmType.BINARY, b'xyz')
entity.other = EntityProperty(EdmType.INT32, 20)
entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
def create_entity_dict(self):
'''
Creates a dict-based entity with fixed values, using all of the supported data types.
'''
entity = {}
# Partition key and row key must be strings and are required
entity['PartitionKey'] = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity['RowKey'] = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity['age'] = 39 # EdmType.INT64
entity['large'] = 933311100 # EdmType.INT64
entity['sex'] = 'male' # EdmType.STRING
entity['married'] = True # EdmType.BOOLEAN
entity['ratio'] = 3.1 # EdmType.DOUBLE
entity['birthday'] = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity['binary'] = EntityProperty(EdmType.BINARY, b'xyz')
entity['other'] = EntityProperty(EdmType.INT32, 20)
entity['clsid'] = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
def insert_entity(self):
table_name = self._create_table()
# Basic w/ dict
entity = self.create_entity_dict()
etag = self.service.insert_entity(table_name, entity)
# Basic w/ class
entity = self.create_entity_class()
etag = self.service.insert_entity(table_name, entity)
self.service.delete_table(table_name)
def get_entity(self):
table_name = self._create_table()
insert_entity = self.create_entity_class()
etag = self.service.insert_entity(table_name, insert_entity)
# Basic
# Can access properties as dict or like an object
entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey) # entity w/ all properties
entity.age # 39 (number)
entity['age'] #39 (number)
entity.clsid.value # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)
entity.clsid.type # Edm.Guid
# Select
entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey,
select='age') # entity w/ just 'age'
entity['age'] # 39 (number)
entity.get('clsid') # None
# Accept
# Default contains all necessary type info. JSON_NO_METADATA returns no type info, though we can guess some client side.
# If type cannot be inferred, the value is simply returned as a string.
entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey,
accept=TablePayloadFormat.JSON_NO_METADATA) # entity w/ all properties, missing type
entity.age # '39' (string)
entity.clsid # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)
entity.married # True (boolean)
# Accept w/ Resolver
# A resolver can be specified to give type info client side if JSON_NO_METADATA is used.
def resolver(pk, rk, name, value, type):
if name == 'large' or name == 'age':
return EdmType.INT64
if name == 'birthday':
return EdmType.DATETIME
if name == 'clsid':
return EdmType.GUID
entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey,
accept=TablePayloadFormat.JSON_NO_METADATA,
property_resolver=resolver) # entity w/ all properties, missing type
entity.age # 39 (number)
entity.clsid.value # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)
entity.clsid.type # Edm.Guid
entity.married # True (boolean)
self.service.delete_table(table_name)
def update_entity(self):
table_name = self._create_table()
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': False,
'birthday': datetime(1991, 10, 4)}
etag = self.service.insert_entity(table_name, entity)
# Basic
# Replaces entity entirely
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': True}
etag = self.service.update_entity(table_name, entity)
received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])
received_entity.get('deceased') # True
received_entity.get('birthday') # None
# If match
# Replaces entity entirely if etag matches
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'id': 'abc12345'}
self.service.update_entity(table_name, entity, if_match=etag) # Succeeds
try:
self.service.update_entity(table_name, entity, if_match=etag) # Throws as previous update changes etag
except AzureHttpError:
pass
self.service.delete_table(table_name)
def merge_entity(self):
table_name = self._create_table()
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': False,
'birthday': datetime(1991, 10, 4)}
etag = self.service.insert_entity(table_name, entity)
# Basic
# Replaces entity entirely
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': True}
etag = self.service.merge_entity(table_name, entity)
received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])
received_entity.get('deceased') # True
received_entity.get('birthday') # datetime(1991, 10, 4)
# If match
# Merges entity if etag matches
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'id': 'abc12345'}
self.service.merge_entity(table_name, entity, if_match=etag) # Succeeds
try:
self.service.merge_entity(table_name, entity, if_match=etag) # Throws as previous update changes etag
except AzureHttpError:
pass
self.service.delete_table(table_name)
def insert_or_merge_entity(self):
table_name = self._create_table()
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': False,
'birthday': datetime(1991, 10, 4)}
# Basic
# Inserts if entity does not already exist
etag = self.service.insert_or_merge_entity(table_name, entity)
# Merges if entity already exists
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'id': 'abc12345'}
etag = self.service.insert_or_merge_entity(table_name, entity)
received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])
received_entity.get('id') # 'abc12345'
received_entity.get('deceased') # False
self.service.delete_table(table_name)
def insert_or_replace_entity(self):
table_name = self._create_table()
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'deceased': False,
'birthday': datetime(1991, 10, 4)}
# Basic
# Inserts if entity does not already exist
etag = self.service.insert_or_replace_entity(table_name, entity)
# Replaces if entity already exists
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'id': 'abc12345'}
etag = self.service.insert_or_replace_entity(table_name, entity)
received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])
received_entity.get('id') # 'abc12345'
received_entity.get('deceased') # None
self.service.delete_table(table_name)
def delete_entity(self):
table_name = self._create_table()
entity = {'PartitionKey': 'John',
'RowKey': 'Doe'}
etag = self.service.insert_entity(table_name, entity)
# Basic
# Deletes entity
self.service.delete_entity(table_name, entity['PartitionKey'], entity['RowKey'])
# If match
# Deletes entity only if etag matches
entity = {'PartitionKey': 'John',
'RowKey': 'Doe',
'id': 'abc12345'}
etag = self.service.insert_entity(table_name, entity)
self.service.update_entity(table_name, entity, if_match=etag) # Succeeds
try:
self.service.delete_entity(table_name, entity['PartitionKey'], entity['RowKey'], if_match=etag) # Throws as update changes etag
except AzureHttpError:
pass
self.service.delete_table(table_name)
def list_tables(self):
table_name1 = self._create_table('table1')
table_name2 = self._create_table('secondtable')
# Basic
# Commented out as this will list every table in your account
# tables = list(self.service.list_tables())
# for table in tables:
# print(table.name) # secondtable, table1, all other tables created in the self.service
# Num results
# Will return in alphabetical order.
tables = list(self.service.list_tables(num_results=2))
for table in tables:
print(table.name) # secondtable, table1, or whichever 2 queues are alphabetically first in your account
self.service.delete_table(table_name1)
self.service.delete_table(table_name2)
def service_properties(self):
# Basic
self.service.set_table_service_properties(logging=Logging(delete=True),
hour_metrics=Metrics(enabled=True, include_apis=True),
minute_metrics=Metrics(enabled=True, include_apis=False),
cors=[CorsRule(allowed_origins=['*'], allowed_methods=['GET'])])
# Wait 30 seconds for settings to propagate
time.sleep(30)
props = self.service.get_table_service_properties() # props = ServiceProperties() w/ all properties specified above
# Omitted properties will not overwrite what's already on the self.service
# Empty properties will clear
self.service.set_table_service_properties(cors=[])
# Wait 30 seconds for settings to propagate
time.sleep(30)
props = self.service.get_table_service_properties() # props = ServiceProperties() w/ CORS rules cleared
| [
"#-------------------------------------------------------------------------\n",
"# Copyright (c) Microsoft. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"#--------------------------------------------------------------------------\n",
"import time\n",
"import uuid\n",
"from datetime import datetime\n",
"\n",
"from azure.common import (\n",
" AzureHttpError,\n",
" AzureConflictHttpError,\n",
" AzureMissingResourceHttpError,\n",
")\n",
"from azure.storage import (\n",
" Logging,\n",
" Metrics,\n",
" CorsRule,\n",
")\n",
"from azure.storage.table import (\n",
" Entity,\n",
" TableBatch,\n",
" EdmType,\n",
" EntityProperty,\n",
" TablePayloadFormat,\n",
")\n",
"\n",
"class TableSamples(): \n",
"\n",
" def __init__(self, account):\n",
" self.account = account\n",
"\n",
" def run_all_samples(self):\n",
" self.service = self.account.create_table_service()\n",
"\n",
" self.create_table()\n",
" self.delete_table()\n",
" self.exists()\n",
" self.query_entities() \n",
" self.batch()\n",
"\n",
" self.create_entity_class()\n",
" self.create_entity_dict()\n",
" self.insert_entity()\n",
" self.get_entity()\n",
" self.update_entity()\n",
" self.merge_entity()\n",
" self.insert_or_merge_entity()\n",
" self.insert_or_replace_entity()\n",
" self.delete_entity() \n",
"\n",
" self.list_tables() \n",
"\n",
" # This method contains sleeps, so don't run by default\n",
" # self.service_properties()\n",
"\n",
" def _get_table_reference(self, prefix='table'):\n",
" table_name = '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))\n",
" return table_name\n",
"\n",
" def _create_table(self, prefix='table'):\n",
" table_name = self._get_table_reference(prefix)\n",
" self.service.create_table(table_name)\n",
" return table_name\n",
"\n",
" def create_table(self): \n",
" # Basic\n",
" table_name1 = self._get_table_reference()\n",
" created = self.service.create_table(table_name1) # True\n",
"\n",
" # Fail on exist\n",
" table_name2 = self._get_table_reference()\n",
" created = self.service.create_table(table_name2) # True \n",
" created = self.service.create_table(table_name2) # False\n",
" try:\n",
" self.service.create_table(table_name2, fail_on_exist=True)\n",
" except AzureConflictHttpError:\n",
" pass\n",
"\n",
" self.service.delete_table(table_name1)\n",
" self.service.delete_table(table_name2)\n",
"\n",
" def delete_table(self):\n",
" # Basic\n",
" table_name = self._create_table()\n",
" deleted = self.service.delete_table(table_name) # True \n",
"\n",
" # Fail not exist\n",
" table_name = self._get_table_reference()\n",
" deleted = self.service.delete_table(table_name) # False\n",
" try:\n",
" self.service.delete_table(table_name, fail_not_exist=True)\n",
" except AzureMissingResourceHttpError:\n",
" pass\n",
"\n",
" def exists(self):\n",
" table_name = self._get_table_reference()\n",
"\n",
" # Does not exist\n",
" exists = self.service.exists(table_name) # False\n",
"\n",
" # Exists\n",
" self.service.create_table(table_name)\n",
" exists = self.service.exists(table_name) # True\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def query_entities(self):\n",
" table_name = self._create_table()\n",
"\n",
" entities = []\n",
" for i in range(1, 5):\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe the {}'.format(i),\n",
" 'deceased': False,\n",
" 'birthday': datetime(1991, 10, i)}\n",
" self.service.insert_entity(table_name, entity)\n",
" entities.append(entity)\n",
"\n",
" # Basic\n",
" # Can access properties as dict or like an object\n",
" queried_entities = list(self.service.query_entities(table_name))\n",
" for entity in queried_entities:\n",
" print(entity.RowKey) # All 4 John Doe characters\n",
"\n",
" # Num results\n",
" queried_entities = list(self.service.query_entities(table_name, num_results=2))\n",
" for entity in queried_entities:\n",
" print(entity.RowKey) # Doe the 1, Doe the 2\n",
"\n",
" # Filter\n",
" filter = \"RowKey eq '{}'\".format(entities[1]['RowKey'])\n",
" queried_entities = list(self.service.query_entities(table_name, filter=filter))\n",
" for entity in queried_entities:\n",
" print(entity.RowKey) # Doe the 2\n",
"\n",
" # Select\n",
" # Get only the column(s) specified\n",
" queried_entities = list(self.service.query_entities(table_name, select='birthday'))\n",
" for entity in queried_entities:\n",
" print(entity.birthday) # All 4 John Doe character's birthdays\n",
" queried_entities[0].get('RowKey') # None\n",
"\n",
" # Accept\n",
" # Default contains all necessary type info. JSON_NO_METADATA returns no type info, though we can guess some client side.\n",
" # If type cannot be inferred, the value is simply returned as a string.\n",
" queried_entities = list(self.service.query_entities(table_name, \n",
" accept=TablePayloadFormat.JSON_NO_METADATA)) # entities w/ all properties, missing type\n",
" queried_entities[0].birthday # (string)\n",
" queried_entities[0].deceased # (boolean)\n",
"\n",
" # Accept w/ Resolver\n",
" # A resolver can be specified to give type info client side if JSON_NO_METADATA is used.\n",
" def resolver(pk, rk, name, value, type):\n",
" if name == 'birthday':\n",
" return EdmType.DATETIME\n",
" queried_entities = list(self.service.query_entities(table_name, \n",
" accept=TablePayloadFormat.JSON_NO_METADATA, \n",
" property_resolver=resolver)) # entityentities w/ all properties, missing type resolved client side\n",
" queried_entities[0].birthday # (datetime)\n",
" queried_entities[0].deceased # (boolean)\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def batch(self):\n",
" table_name = self._create_table()\n",
"\n",
" entity = Entity()\n",
" entity.PartitionKey = 'batch'\n",
" entity.test = True\n",
"\n",
" # All operations in the same batch must have the same partition key but different row keys\n",
" # Batches can hold from 1 to 100 entities\n",
" # Batches are atomic. All operations completed simulatenously. If one operation fails, they all fail.\n",
" # Insert, update, merge, insert or merge, insert or replace, and delete entity operations are supported\n",
"\n",
" # Context manager style\n",
" with self.service.batch(table_name) as batch:\n",
" for i in range(0, 5):\n",
" entity.RowKey = 'context_{}'.format(i)\n",
" batch.insert_entity(entity)\n",
"\n",
" # Commit style\n",
" batch = TableBatch()\n",
" for i in range(0, 5):\n",
" entity.RowKey = 'commit_{}'.format(i)\n",
" batch.insert_entity(entity)\n",
" self.service.commit_batch(table_name, batch)\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def create_entity_class(self):\n",
" '''\n",
" Creates a class-based entity with fixed values, using all of the supported data types.\n",
" '''\n",
" entity = Entity()\n",
"\n",
" # Partition key and row key must be strings and are required\n",
" entity.PartitionKey= 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))\n",
" entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) \n",
"\n",
" # Some basic types are inferred\n",
" entity.age = 39 # EdmType.INT64\n",
" entity.large = 933311100 # EdmType.INT64\n",
" entity.sex = 'male' # EdmType.STRING\n",
" entity.married = True # EdmType.BOOLEAN\n",
" entity.ratio = 3.1 # EdmType.DOUBLE\n",
" entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME\n",
"\n",
" # Binary, Int32 and GUID must be explicitly typed\n",
" entity.binary = EntityProperty(EdmType.BINARY, b'xyz')\n",
" entity.other = EntityProperty(EdmType.INT32, 20)\n",
" entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')\n",
" return entity\n",
"\n",
" def create_entity_dict(self):\n",
" '''\n",
" Creates a dict-based entity with fixed values, using all of the supported data types.\n",
" '''\n",
" entity = {}\n",
"\n",
" # Partition key and row key must be strings and are required\n",
" entity['PartitionKey'] = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))\n",
" entity['RowKey'] = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) \n",
"\n",
" # Some basic types are inferred\n",
" entity['age'] = 39 # EdmType.INT64\n",
" entity['large'] = 933311100 # EdmType.INT64\n",
" entity['sex'] = 'male' # EdmType.STRING\n",
" entity['married'] = True # EdmType.BOOLEAN\n",
" entity['ratio'] = 3.1 # EdmType.DOUBLE\n",
" entity['birthday'] = datetime(1970, 10, 4) # EdmType.DATETIME\n",
"\n",
" # Binary, Int32 and GUID must be explicitly typed\n",
" entity['binary'] = EntityProperty(EdmType.BINARY, b'xyz')\n",
" entity['other'] = EntityProperty(EdmType.INT32, 20)\n",
" entity['clsid'] = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')\n",
" return entity\n",
"\n",
" def insert_entity(self):\n",
" table_name = self._create_table()\n",
"\n",
" # Basic w/ dict\n",
" entity = self.create_entity_dict()\n",
" etag = self.service.insert_entity(table_name, entity)\n",
"\n",
" # Basic w/ class\n",
" entity = self.create_entity_class()\n",
" etag = self.service.insert_entity(table_name, entity)\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def get_entity(self):\n",
" table_name = self._create_table()\n",
" insert_entity = self.create_entity_class()\n",
" etag = self.service.insert_entity(table_name, insert_entity)\n",
"\n",
" # Basic\n",
" # Can access properties as dict or like an object\n",
" entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey) # entity w/ all properties\n",
" entity.age # 39 (number)\n",
" entity['age'] #39 (number)\n",
" entity.clsid.value # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)\n",
" entity.clsid.type # Edm.Guid\n",
"\n",
" # Select\n",
" entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey, \n",
" select='age') # entity w/ just 'age'\n",
" entity['age'] # 39 (number)\n",
" entity.get('clsid') # None\n",
"\n",
" # Accept\n",
" # Default contains all necessary type info. JSON_NO_METADATA returns no type info, though we can guess some client side.\n",
" # If type cannot be inferred, the value is simply returned as a string.\n",
" entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey, \n",
" accept=TablePayloadFormat.JSON_NO_METADATA) # entity w/ all properties, missing type\n",
" entity.age # '39' (string)\n",
" entity.clsid # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)\n",
" entity.married # True (boolean)\n",
"\n",
" # Accept w/ Resolver\n",
" # A resolver can be specified to give type info client side if JSON_NO_METADATA is used.\n",
" def resolver(pk, rk, name, value, type):\n",
" if name == 'large' or name == 'age':\n",
" return EdmType.INT64\n",
" if name == 'birthday':\n",
" return EdmType.DATETIME\n",
" if name == 'clsid':\n",
" return EdmType.GUID\n",
" entity = self.service.get_entity(table_name, insert_entity.PartitionKey, insert_entity.RowKey, \n",
" accept=TablePayloadFormat.JSON_NO_METADATA, \n",
" property_resolver=resolver) # entity w/ all properties, missing type\n",
" entity.age # 39 (number)\n",
" entity.clsid.value # 'c9da6455-213d-42c9-9a79-3e9149a57833' (string)\n",
" entity.clsid.type # Edm.Guid\n",
" entity.married # True (boolean)\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def update_entity(self):\n",
" table_name = self._create_table()\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': False,\n",
" 'birthday': datetime(1991, 10, 4)}\n",
" etag = self.service.insert_entity(table_name, entity)\n",
"\n",
" # Basic\n",
" # Replaces entity entirely\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': True}\n",
" etag = self.service.update_entity(table_name, entity)\n",
" received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])\n",
" received_entity.get('deceased') # True\n",
" received_entity.get('birthday') # None\n",
"\n",
" # If match\n",
" # Replaces entity entirely if etag matches\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'id': 'abc12345'}\n",
"\n",
" self.service.update_entity(table_name, entity, if_match=etag) # Succeeds\n",
" try:\n",
" self.service.update_entity(table_name, entity, if_match=etag) # Throws as previous update changes etag\n",
" except AzureHttpError:\n",
" pass\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def merge_entity(self):\n",
" table_name = self._create_table()\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': False,\n",
" 'birthday': datetime(1991, 10, 4)}\n",
" etag = self.service.insert_entity(table_name, entity)\n",
"\n",
" # Basic\n",
" # Replaces entity entirely\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': True}\n",
" etag = self.service.merge_entity(table_name, entity)\n",
" received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])\n",
" received_entity.get('deceased') # True\n",
" received_entity.get('birthday') # datetime(1991, 10, 4)\n",
"\n",
" # If match\n",
" # Merges entity if etag matches\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'id': 'abc12345'}\n",
"\n",
" self.service.merge_entity(table_name, entity, if_match=etag) # Succeeds\n",
" try:\n",
" self.service.merge_entity(table_name, entity, if_match=etag) # Throws as previous update changes etag\n",
" except AzureHttpError:\n",
" pass\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def insert_or_merge_entity(self):\n",
" table_name = self._create_table()\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': False,\n",
" 'birthday': datetime(1991, 10, 4)}\n",
"\n",
" # Basic\n",
" # Inserts if entity does not already exist\n",
" etag = self.service.insert_or_merge_entity(table_name, entity)\n",
"\n",
" # Merges if entity already exists\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'id': 'abc12345'}\n",
" etag = self.service.insert_or_merge_entity(table_name, entity)\n",
" received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])\n",
" received_entity.get('id') # 'abc12345'\n",
" received_entity.get('deceased') # False\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def insert_or_replace_entity(self):\n",
" table_name = self._create_table()\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'deceased': False,\n",
" 'birthday': datetime(1991, 10, 4)}\n",
"\n",
" # Basic\n",
" # Inserts if entity does not already exist\n",
" etag = self.service.insert_or_replace_entity(table_name, entity)\n",
"\n",
" # Replaces if entity already exists\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'id': 'abc12345'}\n",
" etag = self.service.insert_or_replace_entity(table_name, entity)\n",
" received_entity = self.service.get_entity(table_name, entity['PartitionKey'], entity['RowKey'])\n",
" received_entity.get('id') # 'abc12345'\n",
" received_entity.get('deceased') # None\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def delete_entity(self):\n",
" table_name = self._create_table()\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe'}\n",
" etag = self.service.insert_entity(table_name, entity)\n",
"\n",
" # Basic\n",
" # Deletes entity\n",
" self.service.delete_entity(table_name, entity['PartitionKey'], entity['RowKey'])\n",
"\n",
" # If match\n",
" # Deletes entity only if etag matches\n",
" entity = {'PartitionKey': 'John',\n",
" 'RowKey': 'Doe',\n",
" 'id': 'abc12345'}\n",
" etag = self.service.insert_entity(table_name, entity)\n",
" self.service.update_entity(table_name, entity, if_match=etag) # Succeeds\n",
" try:\n",
" self.service.delete_entity(table_name, entity['PartitionKey'], entity['RowKey'], if_match=etag) # Throws as update changes etag\n",
" except AzureHttpError:\n",
" pass\n",
"\n",
" self.service.delete_table(table_name)\n",
"\n",
" def list_tables(self):\n",
" table_name1 = self._create_table('table1')\n",
" table_name2 = self._create_table('secondtable')\n",
"\n",
" # Basic\n",
" # Commented out as this will list every table in your account\n",
" # tables = list(self.service.list_tables())\n",
" # for table in tables:\n",
" # print(table.name) # secondtable, table1, all other tables created in the self.service \n",
"\n",
" # Num results\n",
" # Will return in alphabetical order. \n",
" tables = list(self.service.list_tables(num_results=2))\n",
" for table in tables:\n",
" print(table.name) # secondtable, table1, or whichever 2 queues are alphabetically first in your account\n",
"\n",
" self.service.delete_table(table_name1)\n",
" self.service.delete_table(table_name2)\n",
"\n",
" def service_properties(self):\n",
" # Basic\n",
" self.service.set_table_service_properties(logging=Logging(delete=True), \n",
" hour_metrics=Metrics(enabled=True, include_apis=True), \n",
" minute_metrics=Metrics(enabled=True, include_apis=False), \n",
" cors=[CorsRule(allowed_origins=['*'], allowed_methods=['GET'])])\n",
"\n",
" # Wait 30 seconds for settings to propagate\n",
" time.sleep(30)\n",
"\n",
" props = self.service.get_table_service_properties() # props = ServiceProperties() w/ all properties specified above\n",
"\n",
" # Omitted properties will not overwrite what's already on the self.service\n",
" # Empty properties will clear\n",
" self.service.set_table_service_properties(cors=[])\n",
"\n",
" # Wait 30 seconds for settings to propagate\n",
" time.sleep(30)\n",
"\n",
" props = self.service.get_table_service_properties() # props = ServiceProperties() w/ CORS rules cleared\n"
] | [
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0.015625,
0,
0,
0,
0.03076923076923077,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01639344262295082,
0,
0,
0.011363636363636364,
0,
0.017857142857142856,
0,
0,
0,
0.011363636363636364,
0,
0.022222222222222223,
0,
0,
0,
0.010869565217391304,
0,
0.013513513513513514,
0.02040816326530612,
0,
0,
0.007751937984496124,
0,
0.0136986301369863,
0.02097902097902098,
0.020833333333333332,
0.02040816326530612,
0,
0,
0.010309278350515464,
0,
0,
0,
0.0136986301369863,
0.03333333333333333,
0.020833333333333332,
0.02,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0.00909090909090909,
0.008928571428571428,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0.012658227848101266,
0.013333333333333334,
0,
0,
0.025,
0.02040816326530612,
0.022222222222222223,
0.020833333333333332,
0.022727272727272728,
0.014925373134328358,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0.012048192771084338,
0.01282051282051282,
0,
0,
0.023255813953488372,
0.019230769230769232,
0.020833333333333332,
0.0196078431372549,
0.02127659574468085,
0.014285714285714285,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0.030303030303030304,
0.05714285714285714,
0.012987012987012988,
0.02702702702702703,
0,
0,
0.019230769230769232,
0.0273972602739726,
0.027777777777777776,
0.02857142857142857,
0,
0,
0.007751937984496124,
0,
0.019230769230769232,
0.024793388429752067,
0.02857142857142857,
0.014084507042253521,
0.025,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0.024691358024691357,
0.02857142857142857,
0.030303030303030304,
0.012987012987012988,
0.02702702702702703,
0.025,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.02127659574468085,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0.017391304347826087,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.02127659574468085,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0.0125,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.02127659574468085,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.02127659574468085,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018691588785046728,
0,
0,
0.021739130434782608,
0,
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.0297029702970297,
0.028846153846153848,
0.01818181818181818,
0,
0,
0,
0,
0.016129032258064516,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.017857142857142856
] | 479 | 0.004648 | false |
#Задача 10. Вариант 10.
#Напишите программу "Генератор персонажей" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
#Donkor.A.H.
#27.05.2016
print ("""
Добро пожаловать в "Игру - Генератор персонажей"
Вы можете распределить 30 очков между 4 характеристиками:
Сила, здоровье, Мудрость и Ловкость. Так же вы можете брать очки, как из общего числа пунктов, так и возвращать эти очки обратно. Удачи!!! И хорошей вам игры!!!
""")
STR=0
HP=0
INT=0
AGL=0
point=30
number=0
print("Если хотите изменить Силу, то напишите - Сила. Если Здоровье, то - Здоровье. Если Мудрость, то - Мудрость. Если Ловкость, то - Ловкость")
while True:
if STR<0 or HP<0 or INT<0 or AGL<0 or point>30:
print("Ошибка")
break
#number=int (inpuit("Напишите слова"))
elif point==0:
print("Вы распределили очки. Их распределение:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL)
break
print("Ваши очки:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL,"\nНераспределенные очки:",point)
user_input=input("")
if user_input=="Сила" :
number=int(input("Сколько хотите прибавить(отбавить)?"))
if number <= point :
STR=number
point-=number
else :
print('Слишком много')
elif user_input=="Здоровье":
number=int(input("Сколько хотите прибавить(отбавить)?"))
if number <= point :
HP=number
point-=number
else :
print("Слишком много")
elif user_input=="Мудрость" :
number=int(input("Сколько хотите прибавить(отбавить)?"))
if number <= point :
INT=number
point-=number
else :
print("Слишком много")
elif user_input=="Ловкость":
number=int(input("Сколько хотите прибавить(отбавить)?"))
if number <= point :
AGL=number
point-=number
else :
print("Слишком много")
inpuit ("\nНажмите ENTER, чтобы завершить") | [
"#Задача 10. Вариант 10.\n",
"\n",
"#Напишите программу \"Генератор персонажей\" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего \"пула\", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.\n",
"\n",
"#Donkor.A.H.\n",
"#27.05.2016\n",
" \n",
"print (\"\"\"\n",
" \t\t\tДобро пожаловать в \"Игру - Генератор персонажей\"\n",
" \t\tВы можете распределить 30 очков между 4 характеристиками:\n",
" \tСила, здоровье, Мудрость и Ловкость. Так же вы можете брать очки, как из общего числа пунктов, так и возвращать эти очки обратно. Удачи!!! И хорошей вам игры!!!\n",
" \"\"\")\n",
"STR=0\n",
"HP=0\n",
"INT=0\n",
"AGL=0\n",
"point=30\n",
"number=0\n",
"print(\"Если хотите изменить Силу, то напишите - Сила. Если Здоровье, то - Здоровье. Если Мудрость, то - Мудрость. Если Ловкость, то - Ловкость\")\n",
"while True:\n",
" if STR<0 or HP<0 or INT<0 or AGL<0 or point>30:\n",
" print(\"Ошибка\")\n",
" break\n",
" #number=int (inpuit(\"Напишите слова\"))\n",
" elif point==0:\n",
" print(\"Вы распределили очки. Их распределение:\\nСила:\",STR,\"\\nЗдоровье:\",HP,\"\\nМудрость:\",INT,\"\\nЛовкость:\",AGL)\n",
" break\n",
" print(\"Ваши очки:\\nСила:\",STR,\"\\nЗдоровье:\",HP,\"\\nМудрость:\",INT,\"\\nЛовкость:\",AGL,\"\\nНераспределенные очки:\",point)\n",
" user_input=input(\"\")\n",
" if user_input==\"Сила\" :\n",
" number=int(input(\"Сколько хотите прибавить(отбавить)?\"))\n",
" if number <= point :\n",
" STR=number\n",
" point-=number\n",
" else :\n",
" print('Слишком много')\n",
" elif user_input==\"Здоровье\":\n",
" number=int(input(\"Сколько хотите прибавить(отбавить)?\"))\n",
" if number <= point :\n",
" HP=number\n",
" point-=number\n",
" else :\n",
" print(\"Слишком много\")\n",
" elif user_input==\"Мудрость\" :\n",
" number=int(input(\"Сколько хотите прибавить(отбавить)?\"))\n",
" if number <= point :\n",
" INT=number\n",
" point-=number\n",
" else :\n",
" print(\"Слишком много\")\n",
" elif user_input==\"Ловкость\":\n",
" number=int(input(\"Сколько хотите прибавить(отбавить)?\"))\n",
" if number <= point :\n",
" AGL=number\n",
" point-=number\n",
" else :\n",
" print(\"Слишком много\")\n",
"inpuit (\"\\nНажмите ENTER, чтобы завершить\")"
] | [
0.041666666666666664,
0,
0.005361930294906166,
0,
0.07692307692307693,
0.08333333333333333,
0.5,
0.09090909090909091,
0.03773584905660377,
0.03278688524590164,
0.018404907975460124,
0,
0.16666666666666666,
0.2,
0.16666666666666666,
0.16666666666666666,
0.1111111111111111,
0.1111111111111111,
0.00684931506849315,
0,
0.12727272727272726,
0.06666666666666667,
0.05,
0.03773584905660377,
0.09090909090909091,
0.07874015748031496,
0.05,
0.08870967741935484,
0.07142857142857142,
0.0967741935483871,
0.04225352112676056,
0.05714285714285714,
0.05128205128205128,
0.023809523809523808,
0.09523809523809523,
0.045454545454545456,
0.05555555555555555,
0.04225352112676056,
0.05714285714285714,
0.05263157894736842,
0.023809523809523808,
0.09523809523809523,
0.045454545454545456,
0.08108108108108109,
0.04225352112676056,
0.05714285714285714,
0.05128205128205128,
0.023809523809523808,
0.09523809523809523,
0.045454545454545456,
0.05555555555555555,
0.04225352112676056,
0.05714285714285714,
0.05128205128205128,
0.023809523809523808,
0.09523809523809523,
0.045454545454545456,
0.046511627906976744
] | 58 | 0.070262 | false |
# -*- coding: utf-8 -*-
#= IMPORT ======================================================================
from src.channel import rpg_channel
from src.enums import CHANNEL
from src.log import log
#= FUNZIONI ====================================================================
def command_whisper(entity, argument="", behavioured=False):
"""
Comando per parlare sussurrando nella stanza.
"""
# È possibile se il comando è stato deferrato
if not entity:
return False
return rpg_channel(entity, argument, CHANNEL.WHISPER, behavioured=behavioured)
#- Fine Funzione -
def get_syntax_template(entity):
if not entity:
log.bug("entity non è un parametro valido: %r" % entity)
return ""
# -------------------------------------------------------------------------
syntax = ""
syntax += "whisper <messaggio da sussurrare>\n"
syntax += "whisper a <nome bersaglio> <messaggio da sussurrare al bersaglio>\n"
syntax += "whisper al gruppo <messaggio da sussurrare al gruppo>\n"
return syntax
#- Fine Funzione -
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"#= IMPORT ======================================================================\n",
"\n",
"from src.channel import rpg_channel\n",
"from src.enums import CHANNEL\n",
"from src.log import log\n",
"\n",
"\n",
"#= FUNZIONI ====================================================================\n",
"\n",
"def command_whisper(entity, argument=\"\", behavioured=False):\n",
" \"\"\"\n",
" Comando per parlare sussurrando nella stanza.\n",
" \"\"\"\n",
" # È possibile se il comando è stato deferrato\n",
" if not entity:\n",
" return False\n",
"\n",
" return rpg_channel(entity, argument, CHANNEL.WHISPER, behavioured=behavioured)\n",
"#- Fine Funzione -\n",
"\n",
"\n",
"def get_syntax_template(entity):\n",
" if not entity:\n",
" log.bug(\"entity non è un parametro valido: %r\" % entity)\n",
" return \"\"\n",
"\n",
" # -------------------------------------------------------------------------\n",
"\n",
" syntax = \"\"\n",
"\n",
" syntax += \"whisper <messaggio da sussurrare>\\n\"\n",
" syntax += \"whisper a <nome bersaglio> <messaggio da sussurrare al bersaglio>\\n\"\n",
" syntax += \"whisper al gruppo <messaggio da sussurrare al gruppo>\\n\"\n",
"\n",
" return syntax\n",
"#- Fine Funzione -\n"
] | [
0,
0,
0.024691358024691357,
0,
0,
0.03125,
0.03571428571428571,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0.05263157894736842
] | 38 | 0.006462 | false |
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
from PyQt4 import QtGui, QtCore, Qt
from PyQt4.QtGui import QProgressDialog#, QProgressBar, QTextCursor, QImage, QApplication, QShortcut, QKeySequence
# from PyQt4.QtCore import QString
import glob
import logging
import os
import re
import shutil
import tempfile
import threading
import time
# from bitstring import ConstBitStream
import common
from backup import backup_files
from dupe_db import DupesDB
from list_files import list_all_files
from gim_converter import GimConverter, QuantizeType
from model_pak import ModelPak
_CONV = GimConverter()
_DUPE_DB = DupesDB()
SKIP_CONV = ["save_icon0.png", "save_icon0_t.png", "save_new_icon0.png", "save_pic1.png"]
FORCE_QUANTIZE = [
(re.compile(ur"art_chip_002_\d\d\d.*", re.UNICODE), QuantizeType.index8),
(re.compile(ur"bgd_\d\d\d.*", re.UNICODE), QuantizeType.index8),
(re.compile(ur"bustup_\d\d_\d\d.*", re.UNICODE), QuantizeType.index8),
(re.compile(ur"(cutin|gallery|kotodama|present)_icn_\d\d\d.*", re.UNICODE), QuantizeType.index8),
]
MIN_INTERVAL = 0.100
# MODEL_PAK = re.compile(ur"bg_\d\d\d")
_LOGGER_NAME = common.LOGGER_NAME + "." + __name__
_LOGGER = logging.getLogger(_LOGGER_NAME)
################################################################################
### FUNCTIONS
################################################################################
def import_data01(src, dst, convert_png = True, propogate = True, parent = None):
pass
def export_data01(src, dst, convert_gim = True, unique = False, parent = None):
pass
######################################################################
### Importing
######################################################################
def import_umdimage(src, dst, convert_png = True, propogate = True, parent = None):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if os.path.normcase(src) == os.path.normcase(dst):
raise ValueError("Cannot import %s. Source and destination directories are the same." % src)
answer = QtGui.QMessageBox.question(
parent,
"Import Directory",
"Importing directory:\n\n" + src + "\n\n" +
"into directory:\n\n" + dst + "\n\n" +
"Any affected files will be backed up. Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
progress = QProgressDialog("Finding files...", "Cancel", 0, 1, parent)
progress.setWindowTitle("Importing...")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
progress.setMinimumDuration(0)
if parent:
width = parent.width()
height = parent.height()
x = parent.x()
y = parent.y()
else:
width = 1920
height = 1080
x = 0
y = 0
progress.setMaximum(0)
progress.setValue(0)
# The raw list of files we're importing.
files = []
# A list of lists, including all dupes of the files being imported, too.
affected_files = []
file_count = 0
dupe_base = "umdimage"
tmp = tempfile.mkdtemp(prefix = "sdse-")
seen_groups = []
count = 0
last_update = time.time()
for file in list_all_files(src):
if progress.wasCanceled():
break
# Strip our base directory, so we have just a relative file list.
file = os.path.normpath(os.path.normcase(file[len(src) + 1:]))
files.append(file)
count += 1
if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:
last_update = time.time()
progress.setLabelText("Finding files...\n" + file)
# progress.setValue(count)
progress.setValue(progress.value() ^ 1)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
affected_files.append([])
if os.path.splitext(file)[1] == ".png" and convert_png and file not in SKIP_CONV:
file = os.path.splitext(file)[0] + ".gim"
if propogate:
file_group = _DUPE_DB.group_from_file(os.path.join(dupe_base, file))
else:
file_group = None
if file_group in seen_groups:
continue
# If there are no dupes, just add this file.
if file_group == None:
affected_files[-1].append(file)
file_count += 1
continue
seen_groups.append(file_group)
for dupe in _DUPE_DB.files_in_group(file_group):
# Minus the "umdimage" part
dupe = dupe[len(dupe_base) + 1:]
affected_files[-1].append(dupe)
file_count += 1
progress.setValue(0)
progress.setMaximum(file_count)
# Make a backup first.
backup_dir = None
count = 0
for file_set in affected_files:
if progress.wasCanceled():
break
for file in file_set:
if progress.wasCanceled():
break
count += 1
if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:
last_update = time.time()
progress.setLabelText("Backing up...\n" + file)
progress.setValue(count)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
# It's perfectly possible we want to import some files that
# don't already exist. Such as when importing a directory
# with added lines.
if not os.path.isfile(os.path.join(dst, file)):
continue
backup_dir = backup_files(dst, [file], suffix = "_IMPORT", backup_dir = backup_dir)
progress.setValue(0)
# And now do our importing.
import_all_new = False
skip_all_new = False
count = 0
for index, src_file in enumerate(files):
if progress.wasCanceled():
break
if os.path.splitext(src_file)[1] == ".png" and convert_png and src_file not in SKIP_CONV:
tmp_src_file = os.path.join(tmp, os.path.basename(src_file))
tmp_src_file = os.path.splitext(tmp_src_file)[0] + ".gim"
quantize = QuantizeType.auto
for regex, q in FORCE_QUANTIZE:
if not regex.search(src_file) == None:
quantize = q
break
_CONV.png_to_gim(os.path.join(src, src_file), tmp_src_file, quantize)
src_file = tmp_src_file
else:
src_file = os.path.join(src, src_file)
for file in affected_files[index]:
if progress.wasCanceled():
break
dst_file = os.path.join(dst, file)
count += 1
# if count % 25 == 0:
if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:
last_update = time.time()
progress.setLabelText("Importing...\n" + file)
progress.setValue(count)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
# We may be allowed to import files that don't exist, but we're
# going to ask them about it anyway.
if not os.path.isfile(dst_file):
if skip_all_new:
continue
if not import_all_new:
answer = QtGui.QMessageBox.question(
parent,
"File Not Found",
"File:\n\n" + file + "\n\n" +
"does not exist in the target directory. Import anyway?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.YesToAll | QtGui.QMessageBox.No | QtGui.QMessageBox.NoToAll,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.YesToAll:
import_all_new = True
skip_all_new = False
elif answer == QtGui.QMessageBox.NoToAll:
skip_all_new = True
import_all_new = False
continue
elif answer == QtGui.QMessageBox.No:
continue
basedir = os.path.dirname(dst_file)
if not os.path.isdir(basedir):
os.makedirs(basedir)
shutil.copy2(src_file, dst_file)
shutil.rmtree(tmp)
progress.close()
def import_umdimage2(src, dst, convert_png = True, propogate = True, parent = None):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if os.path.normcase(src) == os.path.normcase(dst):
raise ValueError("Cannot import %s. Source and destination directories are the same." % src)
answer = QtGui.QMessageBox.question(
parent,
"Import Directory",
"Importing directory:\n\n" + src + "\n\n" +
"into directory:\n\n" + dst + "\n\n" +
"Any affected files will be backed up. Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
progress = QProgressDialog("Importing...", "Cancel", 0, 0, parent)
progress.setWindowTitle("Importing...")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
progress.setMinimumDuration(0)
tmp_dst = tempfile.mkdtemp(prefix = "sdse-")
backup_dir = None
for pak in glob.iglob(os.path.join(src, "bg_*.pak")):
if progress.wasCanceled():
break
pak_name = os.path.basename(pak)
backup_dir = backup_files(dst, [pak_name], suffix = "_IMPORT", backup_dir = backup_dir)
# If we have a regular file with the bg_*.pak name, then just drop it in.
if os.path.isfile(pak):
progress.setLabelText("Importing:\n" + pak_name)
progress.setValue(progress.value() ^ 1)
shutil.copy2(pak, os.path.join(dst, pak_name))
# Otherwise, if it's a directory, insert all the textures we find
# into the target bg_*.pak file.
elif os.path.isdir(pak):
for image in list_all_files(pak):
if progress.wasCanceled():
break
ext = os.path.splitext(image)[1].lower()
if ext == ".png" and not convert_png:
continue
base_name = image[len(src) + 1:]
dst_files = []
if propogate:
dupe_name = os.path.splitext(base_name)[0] + ".gim"
dupe_name = os.path.join("umdimage2", dupe_name)
dupe_name = os.path.normpath(os.path.normcase(dupe_name))
dupes = _DUPE_DB.files_in_same_group(dupe_name)
if dupes == None:
dupes = [dupe_name]
for dupe in dupes:
dst_file = dupe[10:] # chop off the "umdimage2/"
dst_file = os.path.splitext(dst_file)[0] + ext # original extension
dst_file = os.path.join(tmp_dst, dst_file)
dst_files.append(dst_file)
else:
dst_files = [os.path.join(tmp_dst, base_name)]
for dst_file in dst_files:
try:
os.makedirs(os.path.dirname(dst_file))
except:
pass
shutil.copy(image, dst_file)
if progress.wasCanceled():
break
progress.setLabelText("Inserting textures into:\n" + pak_name)
progress.setValue(progress.value() ^ 1)
pak_dir = os.path.join(tmp_dst, pak_name)
pak_file = os.path.join(dst, pak_name)
# If we didn't copy anything over, just move on.
if not os.path.isdir(pak_dir):
continue
thread = threading.Thread(target = insert_textures, args = (pak_dir, pak_file))
thread.start()
while thread.isAlive():
thread.join(MIN_INTERVAL)
progress.setValue(progress.value() ^ 1)
if progress.wasCanceled():
progress.setLabelText("Canceling...")
shutil.rmtree(tmp_dst)
progress.close()
######################################################################
### Exporting
######################################################################
def export_umdimage(src, dst, convert_gim = True, unique = False, parent = None):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if os.path.normcase(src) == os.path.normcase(dst):
raise ValueError("Cannot export %s. Source and destination directories are the same." % src)
answer = QtGui.QMessageBox.question(
parent,
"Export Directory",
"Exporting directory:\n\n" + src + "\n\n" +
"into directory:\n\n" + dst + "\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
progress = QProgressDialog("Exporting...", "Cancel", 0, 0, parent)
progress.setWindowTitle("Exporting...")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
progress.setMinimumDuration(0)
if parent:
width = parent.width()
height = parent.height()
x = parent.x()
y = parent.y()
else:
width = 1920
height = 1080
x = 0
y = 0
seen_groups = []
count = 0
last_update = time.time()
progress.setMaximum(60000)
for filename in list_all_files(src):
if progress.wasCanceled():
break
count += 1
if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:
last_update = time.time()
progress.setLabelText("Exporting...\n" + filename)
progress.setValue(count)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
base_name = filename[len(src) + 1:]
if unique:
dupe_name = os.path.join("umdimage", base_name)
dupe_name = os.path.normpath(os.path.normcase(dupe_name))
group = _DUPE_DB.group_from_file(dupe_name)
if group in seen_groups:
continue
if not group == None:
seen_groups.append(group)
dst_file = os.path.join(dst, base_name)
dst_dir = os.path.dirname(dst_file)
ext = os.path.splitext(dst_file)[1].lower()
try:
os.makedirs(dst_dir)
except:
pass
if ext == ".gim" and convert_gim:
dst_file = os.path.splitext(dst_file)[0] + ".png"
_CONV.gim_to_png(filename, dst_file)
else:
shutil.copy2(filename, dst_file)
progress.close()
def export_umdimage2(src, dst, convert_gim = True, unique = False, parent = None):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if os.path.normcase(src) == os.path.normcase(dst):
raise ValueError("Cannot export %s. Source and destination directories are the same." % src)
answer = QtGui.QMessageBox.question(
parent,
"Export Directory",
"Exporting directory:\n\n" + src + "\n\n" +
"into directory:\n\n" + dst + "\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
progress = QProgressDialog("Exporting...", "Cancel", 0, 0, parent)
progress.setWindowTitle("Exporting...")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
progress.setMinimumDuration(0)
if unique:
tmp_dst = tempfile.mkdtemp(prefix = "sdse-")
else:
tmp_dst = dst
seen_groups = []
for pak in glob.iglob(os.path.join(src, "bg_*.pak")):
if progress.wasCanceled():
break
pak_name = os.path.basename(pak)
out_dir = os.path.join(tmp_dst, pak_name)
progress.setLabelText("Extracting:\n" + pak)
thread = threading.Thread(target = extract_model_pak, args = (pak, out_dir, convert_gim))
thread.start()
while thread.isAlive():
thread.join(MIN_INTERVAL)
progress.setValue(progress.value() ^ 1)
if progress.wasCanceled():
progress.setLabelText("Canceling...")
if progress.wasCanceled():
break
if unique:
for img in list_all_files(out_dir):
img_base = img[len(tmp_dst) + 1:]
dupe_name = os.path.splitext(img_base)[0] + ".gim"
dupe_name = os.path.join("umdimage2", dupe_name)
dupe_name = os.path.normpath(os.path.normcase(dupe_name))
group = _DUPE_DB.group_from_file(dupe_name)
if group in seen_groups:
continue
if not group == None:
seen_groups.append(group)
dst_file = os.path.join(dst, img_base)
dst_dir = os.path.dirname(dst_file)
try:
os.makedirs(dst_dir)
except:
pass
shutil.copy2(img, dst_file)
shutil.rmtree(out_dir)
if unique:
shutil.rmtree(tmp_dst)
progress.close()
######################################################################
### Models/textures
######################################################################
def extract_model_pak(filename, out_dir, to_png):
pak = ModelPak(filename = filename)
pak.extract(out_dir, to_png)
def insert_textures(pak_dir, filename):
pak = ModelPak(filename = filename)
for gmo_name in os.listdir(pak_dir):
full_path = os.path.join(pak_dir, gmo_name)
if not os.path.isdir(full_path):
_LOGGER.warning("Not a directory of textures. Skipped importing %s to %s" % (full_path, filename))
continue
gmo_id = pak.id_from_name(gmo_name)
if gmo_id == None:
_LOGGER.warning("GMO %s does not exist in %s" % (gmo_name, filename))
continue
gmo = pak.get_gmo(gmo_id)
if gmo == None:
_LOGGER.warning("Failed to retrieve GMO %s from %s" % (gmo_name, filename))
continue
for img in os.listdir(os.path.join(pak_dir, gmo_name)):
name, ext = os.path.splitext(img)
if ext.lower() == ".gim":
is_png = False
elif ext.lower() == ".png":
is_png = True
else:
_LOGGER.warning("Did not insert %s into %s" % (img, gmo_name))
continue
gim_id = int(name)
if is_png:
gmo.replace_png_file(gim_id, os.path.join(pak_dir, gmo_name, img))
else:
gmo.replace_gim_file(gim_id, os.path.join(pak_dir, gmo_name, img))
pak.replace_gmo(gmo_id, gmo)
pak.save(filename)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
handler = logging.StreamHandler(sys.stdout)
# logging.getLogger(common.LOGGER_NAME).addHandler(handler)
# export_umdimage2("Y:/Danganronpa/Danganronpa_BEST/umdimage2", "wip/umdimage3", convert_gim = True, unique = True)
# export_umdimage("Y:/Danganronpa/Danganronpa_BEST/umdimage", "wip/umdimage-out", convert_gim = True, unique = True)
# import_umdimage2("Y:/Danganronpa/Danganronpa_BEST/image-editing/umdimage2-edited-png", "wip/umdimage2-orig")
# import_umdimage2("wip/umdimage2-edited-png", "wip/umdimage2-orig", convert_png = False)
# export_umdimage2("wip/umdimage2-orig", "wip/umdimage2-xxx", convert_gim = True, unique = True)
# import_umdimage("wip/umdimage-import", "wip/umdimage-test")
import_umdimage("wip/umdimage-import", "wip/umdimage-test2", propogate = True, convert_png = True)
# import_umdimage("wip/umdimage-import", "wip/umdimage-test3", propogate = False)
# extract_model_pak("wip/test/bg_042.pak", "wip/test")
# import_model_pak("wip/test/bg_042-eng", "wip/test/bg_042.pak")
# extract_model_pak("wip/test/bg_042.pak", "wip/test")
### EOF ### | [
"################################################################################\n",
"### Copyright © 2012-2013 BlackDragonHunt\n",
"### \n",
"### This file is part of the Super Duper Script Editor.\n",
"### \n",
"### The Super Duper Script Editor is free software: you can redistribute it\n",
"### and/or modify it under the terms of the GNU General Public License as\n",
"### published by the Free Software Foundation, either version 3 of the License,\n",
"### or (at your option) any later version.\n",
"### \n",
"### The Super Duper Script Editor is distributed in the hope that it will be\n",
"### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"### GNU General Public License for more details.\n",
"### \n",
"### You should have received a copy of the GNU General Public License\n",
"### along with the Super Duper Script Editor.\n",
"### If not, see <http://www.gnu.org/licenses/>.\n",
"################################################################################\n",
"\n",
"from PyQt4 import QtGui, QtCore, Qt\n",
"from PyQt4.QtGui import QProgressDialog#, QProgressBar, QTextCursor, QImage, QApplication, QShortcut, QKeySequence\n",
"# from PyQt4.QtCore import QString\n",
"\n",
"import glob\n",
"import logging\n",
"import os\n",
"import re\n",
"import shutil\n",
"import tempfile\n",
"import threading\n",
"import time\n",
"\n",
"# from bitstring import ConstBitStream\n",
"\n",
"import common\n",
"\n",
"from backup import backup_files\n",
"from dupe_db import DupesDB\n",
"from list_files import list_all_files\n",
"from gim_converter import GimConverter, QuantizeType\n",
"from model_pak import ModelPak\n",
"\n",
"_CONV = GimConverter()\n",
"_DUPE_DB = DupesDB()\n",
"\n",
"SKIP_CONV = [\"save_icon0.png\", \"save_icon0_t.png\", \"save_new_icon0.png\", \"save_pic1.png\"]\n",
"\n",
"FORCE_QUANTIZE = [\n",
" (re.compile(ur\"art_chip_002_\\d\\d\\d.*\", re.UNICODE), QuantizeType.index8),\n",
" (re.compile(ur\"bgd_\\d\\d\\d.*\", re.UNICODE), QuantizeType.index8),\n",
" (re.compile(ur\"bustup_\\d\\d_\\d\\d.*\", re.UNICODE), QuantizeType.index8),\n",
" (re.compile(ur\"(cutin|gallery|kotodama|present)_icn_\\d\\d\\d.*\", re.UNICODE), QuantizeType.index8),\n",
"]\n",
"\n",
"MIN_INTERVAL = 0.100\n",
"\n",
"# MODEL_PAK = re.compile(ur\"bg_\\d\\d\\d\")\n",
"\n",
"_LOGGER_NAME = common.LOGGER_NAME + \".\" + __name__\n",
"_LOGGER = logging.getLogger(_LOGGER_NAME)\n",
"\n",
"################################################################################\n",
"### FUNCTIONS\n",
"################################################################################\n",
"def import_data01(src, dst, convert_png = True, propogate = True, parent = None):\n",
" pass\n",
"\n",
"def export_data01(src, dst, convert_gim = True, unique = False, parent = None):\n",
" pass\n",
"\n",
"######################################################################\n",
"### Importing\n",
"######################################################################\n",
"def import_umdimage(src, dst, convert_png = True, propogate = True, parent = None):\n",
" src = os.path.abspath(src)\n",
" dst = os.path.abspath(dst)\n",
" if os.path.normcase(src) == os.path.normcase(dst):\n",
" raise ValueError(\"Cannot import %s. Source and destination directories are the same.\" % src)\n",
" \n",
" answer = QtGui.QMessageBox.question(\n",
" parent,\n",
" \"Import Directory\",\n",
" \"Importing directory:\\n\\n\" + src + \"\\n\\n\" +\n",
" \"into directory:\\n\\n\" + dst + \"\\n\\n\" +\n",
" \"Any affected files will be backed up. Proceed?\",\n",
" buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n",
" defaultButton = QtGui.QMessageBox.No\n",
" )\n",
" \n",
" if answer == QtGui.QMessageBox.No:\n",
" return\n",
" \n",
" progress = QProgressDialog(\"Finding files...\", \"Cancel\", 0, 1, parent)\n",
" progress.setWindowTitle(\"Importing...\")\n",
" progress.setWindowModality(Qt.Qt.WindowModal)\n",
" progress.setValue(0)\n",
" progress.setAutoClose(False)\n",
" progress.setMinimumDuration(0)\n",
" \n",
" if parent:\n",
" width = parent.width()\n",
" height = parent.height()\n",
" x = parent.x()\n",
" y = parent.y()\n",
" else:\n",
" width = 1920\n",
" height = 1080\n",
" x = 0\n",
" y = 0\n",
" \n",
" progress.setMaximum(0)\n",
" progress.setValue(0)\n",
" \n",
" # The raw list of files we're importing.\n",
" files = []\n",
" \n",
" # A list of lists, including all dupes of the files being imported, too.\n",
" affected_files = []\n",
" file_count = 0\n",
" \n",
" dupe_base = \"umdimage\"\n",
" tmp = tempfile.mkdtemp(prefix = \"sdse-\")\n",
" \n",
" seen_groups = []\n",
" \n",
" count = 0\n",
" last_update = time.time()\n",
" \n",
" for file in list_all_files(src):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" # Strip our base directory, so we have just a relative file list.\n",
" file = os.path.normpath(os.path.normcase(file[len(src) + 1:]))\n",
" files.append(file)\n",
" \n",
" count += 1\n",
" if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:\n",
" last_update = time.time()\n",
" progress.setLabelText(\"Finding files...\\n\" + file)\n",
" # progress.setValue(count)\n",
" progress.setValue(progress.value() ^ 1)\n",
" \n",
" # Re-center the dialog.\n",
" progress_w = progress.geometry().width()\n",
" progress_h = progress.geometry().height()\n",
" \n",
" new_x = x + ((width - progress_w) / 2)\n",
" new_y = y + ((height - progress_h) / 2)\n",
" \n",
" progress.move(new_x, new_y)\n",
" \n",
" affected_files.append([])\n",
" \n",
" if os.path.splitext(file)[1] == \".png\" and convert_png and file not in SKIP_CONV:\n",
" file = os.path.splitext(file)[0] + \".gim\"\n",
" \n",
" if propogate:\n",
" file_group = _DUPE_DB.group_from_file(os.path.join(dupe_base, file))\n",
" else:\n",
" file_group = None\n",
" \n",
" if file_group in seen_groups:\n",
" continue\n",
" \n",
" # If there are no dupes, just add this file.\n",
" if file_group == None:\n",
" affected_files[-1].append(file)\n",
" file_count += 1\n",
" continue\n",
" \n",
" seen_groups.append(file_group)\n",
" for dupe in _DUPE_DB.files_in_group(file_group):\n",
" # Minus the \"umdimage\" part\n",
" dupe = dupe[len(dupe_base) + 1:]\n",
" affected_files[-1].append(dupe)\n",
" file_count += 1\n",
" \n",
" progress.setValue(0)\n",
" progress.setMaximum(file_count)\n",
" \n",
" # Make a backup first.\n",
" backup_dir = None\n",
" count = 0\n",
" for file_set in affected_files:\n",
" if progress.wasCanceled():\n",
" break\n",
" for file in file_set:\n",
" if progress.wasCanceled():\n",
" break\n",
" count += 1\n",
" if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:\n",
" last_update = time.time()\n",
" progress.setLabelText(\"Backing up...\\n\" + file)\n",
" progress.setValue(count)\n",
" \n",
" # Re-center the dialog.\n",
" progress_w = progress.geometry().width()\n",
" progress_h = progress.geometry().height()\n",
" \n",
" new_x = x + ((width - progress_w) / 2)\n",
" new_y = y + ((height - progress_h) / 2)\n",
" \n",
" progress.move(new_x, new_y)\n",
" \n",
" # It's perfectly possible we want to import some files that\n",
" # don't already exist. Such as when importing a directory\n",
" # with added lines.\n",
" if not os.path.isfile(os.path.join(dst, file)):\n",
" continue\n",
" \n",
" backup_dir = backup_files(dst, [file], suffix = \"_IMPORT\", backup_dir = backup_dir)\n",
" \n",
" progress.setValue(0)\n",
" \n",
" # And now do our importing.\n",
" import_all_new = False\n",
" skip_all_new = False\n",
" count = 0\n",
" for index, src_file in enumerate(files):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" if os.path.splitext(src_file)[1] == \".png\" and convert_png and src_file not in SKIP_CONV:\n",
" tmp_src_file = os.path.join(tmp, os.path.basename(src_file))\n",
" tmp_src_file = os.path.splitext(tmp_src_file)[0] + \".gim\"\n",
" quantize = QuantizeType.auto\n",
" for regex, q in FORCE_QUANTIZE:\n",
" if not regex.search(src_file) == None:\n",
" quantize = q\n",
" break\n",
" _CONV.png_to_gim(os.path.join(src, src_file), tmp_src_file, quantize)\n",
" src_file = tmp_src_file\n",
" \n",
" else:\n",
" src_file = os.path.join(src, src_file)\n",
" \n",
" for file in affected_files[index]:\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" dst_file = os.path.join(dst, file)\n",
" \n",
" count += 1\n",
" # if count % 25 == 0:\n",
" if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:\n",
" last_update = time.time()\n",
" progress.setLabelText(\"Importing...\\n\" + file)\n",
" progress.setValue(count)\n",
" \n",
" # Re-center the dialog.\n",
" progress_w = progress.geometry().width()\n",
" progress_h = progress.geometry().height()\n",
" \n",
" new_x = x + ((width - progress_w) / 2)\n",
" new_y = y + ((height - progress_h) / 2)\n",
" \n",
" progress.move(new_x, new_y)\n",
" \n",
" # We may be allowed to import files that don't exist, but we're\n",
" # going to ask them about it anyway.\n",
" if not os.path.isfile(dst_file):\n",
" if skip_all_new:\n",
" continue\n",
" \n",
" if not import_all_new:\n",
" answer = QtGui.QMessageBox.question(\n",
" parent,\n",
" \"File Not Found\",\n",
" \"File:\\n\\n\" + file + \"\\n\\n\" +\n",
" \"does not exist in the target directory. Import anyway?\",\n",
" buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.YesToAll | QtGui.QMessageBox.No | QtGui.QMessageBox.NoToAll,\n",
" defaultButton = QtGui.QMessageBox.No\n",
" )\n",
" \n",
" if answer == QtGui.QMessageBox.YesToAll:\n",
" import_all_new = True\n",
" skip_all_new = False\n",
" elif answer == QtGui.QMessageBox.NoToAll:\n",
" skip_all_new = True\n",
" import_all_new = False\n",
" continue\n",
" elif answer == QtGui.QMessageBox.No:\n",
" continue\n",
" \n",
" basedir = os.path.dirname(dst_file)\n",
" if not os.path.isdir(basedir):\n",
" os.makedirs(basedir)\n",
" \n",
" shutil.copy2(src_file, dst_file)\n",
" \n",
" shutil.rmtree(tmp)\n",
" progress.close()\n",
"\n",
"def import_umdimage2(src, dst, convert_png = True, propogate = True, parent = None):\n",
" src = os.path.abspath(src)\n",
" dst = os.path.abspath(dst)\n",
" if os.path.normcase(src) == os.path.normcase(dst):\n",
" raise ValueError(\"Cannot import %s. Source and destination directories are the same.\" % src)\n",
" \n",
" answer = QtGui.QMessageBox.question(\n",
" parent,\n",
" \"Import Directory\",\n",
" \"Importing directory:\\n\\n\" + src + \"\\n\\n\" +\n",
" \"into directory:\\n\\n\" + dst + \"\\n\\n\" +\n",
" \"Any affected files will be backed up. Proceed?\",\n",
" buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n",
" defaultButton = QtGui.QMessageBox.No\n",
" )\n",
" \n",
" if answer == QtGui.QMessageBox.No:\n",
" return\n",
" \n",
" progress = QProgressDialog(\"Importing...\", \"Cancel\", 0, 0, parent)\n",
" progress.setWindowTitle(\"Importing...\")\n",
" progress.setWindowModality(Qt.Qt.WindowModal)\n",
" progress.setValue(0)\n",
" progress.setAutoClose(False)\n",
" progress.setMinimumDuration(0)\n",
" \n",
" tmp_dst = tempfile.mkdtemp(prefix = \"sdse-\")\n",
" backup_dir = None\n",
" \n",
" for pak in glob.iglob(os.path.join(src, \"bg_*.pak\")):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" pak_name = os.path.basename(pak)\n",
" backup_dir = backup_files(dst, [pak_name], suffix = \"_IMPORT\", backup_dir = backup_dir)\n",
" \n",
" # If we have a regular file with the bg_*.pak name, then just drop it in.\n",
" if os.path.isfile(pak):\n",
" progress.setLabelText(\"Importing:\\n\" + pak_name)\n",
" progress.setValue(progress.value() ^ 1)\n",
" shutil.copy2(pak, os.path.join(dst, pak_name))\n",
" \n",
" # Otherwise, if it's a directory, insert all the textures we find\n",
" # into the target bg_*.pak file.\n",
" elif os.path.isdir(pak):\n",
" for image in list_all_files(pak):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" ext = os.path.splitext(image)[1].lower()\n",
" if ext == \".png\" and not convert_png:\n",
" continue\n",
" \n",
" base_name = image[len(src) + 1:]\n",
" dst_files = []\n",
" \n",
" if propogate:\n",
" dupe_name = os.path.splitext(base_name)[0] + \".gim\"\n",
" dupe_name = os.path.join(\"umdimage2\", dupe_name)\n",
" dupe_name = os.path.normpath(os.path.normcase(dupe_name))\n",
" \n",
" dupes = _DUPE_DB.files_in_same_group(dupe_name)\n",
" \n",
" if dupes == None:\n",
" dupes = [dupe_name]\n",
" \n",
" for dupe in dupes:\n",
" dst_file = dupe[10:] # chop off the \"umdimage2/\"\n",
" dst_file = os.path.splitext(dst_file)[0] + ext # original extension\n",
" dst_file = os.path.join(tmp_dst, dst_file)\n",
" dst_files.append(dst_file)\n",
" \n",
" else:\n",
" dst_files = [os.path.join(tmp_dst, base_name)]\n",
" \n",
" for dst_file in dst_files:\n",
" try:\n",
" os.makedirs(os.path.dirname(dst_file))\n",
" except:\n",
" pass\n",
" shutil.copy(image, dst_file)\n",
" \n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" progress.setLabelText(\"Inserting textures into:\\n\" + pak_name)\n",
" progress.setValue(progress.value() ^ 1)\n",
" \n",
" pak_dir = os.path.join(tmp_dst, pak_name)\n",
" pak_file = os.path.join(dst, pak_name)\n",
" \n",
" # If we didn't copy anything over, just move on.\n",
" if not os.path.isdir(pak_dir):\n",
" continue\n",
" \n",
" thread = threading.Thread(target = insert_textures, args = (pak_dir, pak_file))\n",
" thread.start()\n",
" \n",
" while thread.isAlive():\n",
" thread.join(MIN_INTERVAL)\n",
" progress.setValue(progress.value() ^ 1)\n",
" \n",
" if progress.wasCanceled():\n",
" progress.setLabelText(\"Canceling...\")\n",
" \n",
" shutil.rmtree(tmp_dst)\n",
" progress.close()\n",
"\n",
"######################################################################\n",
"### Exporting\n",
"######################################################################\n",
"def export_umdimage(src, dst, convert_gim = True, unique = False, parent = None):\n",
" src = os.path.abspath(src)\n",
" dst = os.path.abspath(dst)\n",
" if os.path.normcase(src) == os.path.normcase(dst):\n",
" raise ValueError(\"Cannot export %s. Source and destination directories are the same.\" % src)\n",
" \n",
" answer = QtGui.QMessageBox.question(\n",
" parent,\n",
" \"Export Directory\",\n",
" \"Exporting directory:\\n\\n\" + src + \"\\n\\n\" +\n",
" \"into directory:\\n\\n\" + dst + \"\\n\\n\" +\n",
" \"Proceed?\",\n",
" buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n",
" defaultButton = QtGui.QMessageBox.No\n",
" )\n",
" \n",
" if answer == QtGui.QMessageBox.No:\n",
" return\n",
" \n",
" progress = QProgressDialog(\"Exporting...\", \"Cancel\", 0, 0, parent)\n",
" progress.setWindowTitle(\"Exporting...\")\n",
" progress.setWindowModality(Qt.Qt.WindowModal)\n",
" progress.setValue(0)\n",
" progress.setAutoClose(False)\n",
" progress.setMinimumDuration(0)\n",
" \n",
" if parent:\n",
" width = parent.width()\n",
" height = parent.height()\n",
" x = parent.x()\n",
" y = parent.y()\n",
" else:\n",
" width = 1920\n",
" height = 1080\n",
" x = 0\n",
" y = 0\n",
" \n",
" seen_groups = []\n",
" \n",
" count = 0\n",
" last_update = time.time()\n",
" progress.setMaximum(60000)\n",
" \n",
" for filename in list_all_files(src):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" count += 1\n",
" if time.time() - last_update > MIN_INTERVAL or count % 25 == 0:\n",
" last_update = time.time()\n",
" progress.setLabelText(\"Exporting...\\n\" + filename)\n",
" progress.setValue(count)\n",
" \n",
" # Re-center the dialog.\n",
" progress_w = progress.geometry().width()\n",
" progress_h = progress.geometry().height()\n",
" \n",
" new_x = x + ((width - progress_w) / 2)\n",
" new_y = y + ((height - progress_h) / 2)\n",
" \n",
" progress.move(new_x, new_y)\n",
" \n",
" base_name = filename[len(src) + 1:]\n",
" if unique:\n",
" dupe_name = os.path.join(\"umdimage\", base_name)\n",
" dupe_name = os.path.normpath(os.path.normcase(dupe_name))\n",
" \n",
" group = _DUPE_DB.group_from_file(dupe_name)\n",
" \n",
" if group in seen_groups:\n",
" continue\n",
" \n",
" if not group == None:\n",
" seen_groups.append(group)\n",
" \n",
" dst_file = os.path.join(dst, base_name)\n",
" dst_dir = os.path.dirname(dst_file)\n",
" ext = os.path.splitext(dst_file)[1].lower()\n",
" \n",
" try:\n",
" os.makedirs(dst_dir)\n",
" except:\n",
" pass\n",
" \n",
" if ext == \".gim\" and convert_gim:\n",
" dst_file = os.path.splitext(dst_file)[0] + \".png\"\n",
" _CONV.gim_to_png(filename, dst_file)\n",
" else:\n",
" shutil.copy2(filename, dst_file)\n",
" \n",
" progress.close()\n",
"\n",
"def export_umdimage2(src, dst, convert_gim = True, unique = False, parent = None):\n",
" src = os.path.abspath(src)\n",
" dst = os.path.abspath(dst)\n",
" if os.path.normcase(src) == os.path.normcase(dst):\n",
" raise ValueError(\"Cannot export %s. Source and destination directories are the same.\" % src)\n",
" \n",
" answer = QtGui.QMessageBox.question(\n",
" parent,\n",
" \"Export Directory\",\n",
" \"Exporting directory:\\n\\n\" + src + \"\\n\\n\" +\n",
" \"into directory:\\n\\n\" + dst + \"\\n\\n\" +\n",
" \"Proceed?\",\n",
" buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n",
" defaultButton = QtGui.QMessageBox.No\n",
" )\n",
" \n",
" if answer == QtGui.QMessageBox.No:\n",
" return\n",
" \n",
" progress = QProgressDialog(\"Exporting...\", \"Cancel\", 0, 0, parent)\n",
" progress.setWindowTitle(\"Exporting...\")\n",
" progress.setWindowModality(Qt.Qt.WindowModal)\n",
" progress.setValue(0)\n",
" progress.setAutoClose(False)\n",
" progress.setMinimumDuration(0)\n",
" \n",
" if unique:\n",
" tmp_dst = tempfile.mkdtemp(prefix = \"sdse-\")\n",
" else:\n",
" tmp_dst = dst\n",
" \n",
" seen_groups = []\n",
" \n",
" for pak in glob.iglob(os.path.join(src, \"bg_*.pak\")):\n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" pak_name = os.path.basename(pak)\n",
" out_dir = os.path.join(tmp_dst, pak_name)\n",
" \n",
" progress.setLabelText(\"Extracting:\\n\" + pak)\n",
" \n",
" thread = threading.Thread(target = extract_model_pak, args = (pak, out_dir, convert_gim))\n",
" thread.start()\n",
" \n",
" while thread.isAlive():\n",
" thread.join(MIN_INTERVAL)\n",
" progress.setValue(progress.value() ^ 1)\n",
" \n",
" if progress.wasCanceled():\n",
" progress.setLabelText(\"Canceling...\")\n",
" \n",
" if progress.wasCanceled():\n",
" break\n",
" \n",
" if unique:\n",
" for img in list_all_files(out_dir):\n",
" img_base = img[len(tmp_dst) + 1:]\n",
" dupe_name = os.path.splitext(img_base)[0] + \".gim\"\n",
" dupe_name = os.path.join(\"umdimage2\", dupe_name)\n",
" dupe_name = os.path.normpath(os.path.normcase(dupe_name))\n",
" \n",
" group = _DUPE_DB.group_from_file(dupe_name)\n",
" \n",
" if group in seen_groups:\n",
" continue\n",
" \n",
" if not group == None:\n",
" seen_groups.append(group)\n",
" \n",
" dst_file = os.path.join(dst, img_base)\n",
" dst_dir = os.path.dirname(dst_file)\n",
" \n",
" try:\n",
" os.makedirs(dst_dir)\n",
" except:\n",
" pass\n",
" \n",
" shutil.copy2(img, dst_file)\n",
" \n",
" shutil.rmtree(out_dir)\n",
" \n",
" if unique:\n",
" shutil.rmtree(tmp_dst)\n",
" \n",
" progress.close()\n",
"\n",
"######################################################################\n",
"### Models/textures\n",
"######################################################################\n",
"def extract_model_pak(filename, out_dir, to_png):\n",
" pak = ModelPak(filename = filename)\n",
" pak.extract(out_dir, to_png)\n",
"\n",
"def insert_textures(pak_dir, filename):\n",
" \n",
" pak = ModelPak(filename = filename)\n",
" \n",
" for gmo_name in os.listdir(pak_dir):\n",
" full_path = os.path.join(pak_dir, gmo_name)\n",
" if not os.path.isdir(full_path):\n",
" _LOGGER.warning(\"Not a directory of textures. Skipped importing %s to %s\" % (full_path, filename))\n",
" continue\n",
" \n",
" gmo_id = pak.id_from_name(gmo_name)\n",
" if gmo_id == None:\n",
" _LOGGER.warning(\"GMO %s does not exist in %s\" % (gmo_name, filename))\n",
" continue\n",
" \n",
" gmo = pak.get_gmo(gmo_id)\n",
" if gmo == None:\n",
" _LOGGER.warning(\"Failed to retrieve GMO %s from %s\" % (gmo_name, filename))\n",
" continue\n",
" \n",
" for img in os.listdir(os.path.join(pak_dir, gmo_name)):\n",
" name, ext = os.path.splitext(img)\n",
" \n",
" if ext.lower() == \".gim\":\n",
" is_png = False\n",
" elif ext.lower() == \".png\":\n",
" is_png = True\n",
" else:\n",
" _LOGGER.warning(\"Did not insert %s into %s\" % (img, gmo_name))\n",
" continue\n",
" \n",
" gim_id = int(name)\n",
" if is_png:\n",
" gmo.replace_png_file(gim_id, os.path.join(pak_dir, gmo_name, img))\n",
" else:\n",
" gmo.replace_gim_file(gim_id, os.path.join(pak_dir, gmo_name, img))\n",
" \n",
" pak.replace_gmo(gmo_id, gmo)\n",
" \n",
" pak.save(filename)\n",
"\n",
"if __name__ == \"__main__\":\n",
" import sys\n",
" app = QtGui.QApplication(sys.argv)\n",
" \n",
" handler = logging.StreamHandler(sys.stdout)\n",
" # logging.getLogger(common.LOGGER_NAME).addHandler(handler)\n",
" \n",
" # export_umdimage2(\"Y:/Danganronpa/Danganronpa_BEST/umdimage2\", \"wip/umdimage3\", convert_gim = True, unique = True)\n",
" # export_umdimage(\"Y:/Danganronpa/Danganronpa_BEST/umdimage\", \"wip/umdimage-out\", convert_gim = True, unique = True)\n",
" # import_umdimage2(\"Y:/Danganronpa/Danganronpa_BEST/image-editing/umdimage2-edited-png\", \"wip/umdimage2-orig\")\n",
" # import_umdimage2(\"wip/umdimage2-edited-png\", \"wip/umdimage2-orig\", convert_png = False)\n",
" # export_umdimage2(\"wip/umdimage2-orig\", \"wip/umdimage2-xxx\", convert_gim = True, unique = True)\n",
" # import_umdimage(\"wip/umdimage-import\", \"wip/umdimage-test\")\n",
" import_umdimage(\"wip/umdimage-import\", \"wip/umdimage-test2\", propogate = True, convert_png = True)\n",
" # import_umdimage(\"wip/umdimage-import\", \"wip/umdimage-test3\", propogate = False)\n",
" \n",
" # extract_model_pak(\"wip/test/bg_042.pak\", \"wip/test\")\n",
" # import_model_pak(\"wip/test/bg_042-eng\", \"wip/test/bg_042.pak\")\n",
" # extract_model_pak(\"wip/test/bg_042.pak\", \"wip/test\")\n",
"\n",
"### EOF ###"
] | [
0.012345679012345678,
0.023809523809523808,
0.2,
0.017857142857142856,
0.2,
0.013157894736842105,
0.013513513513513514,
0.0125,
0.023255813953488372,
0.2,
0.012987012987012988,
0.013333333333333334,
0.015151515151515152,
0.02040816326530612,
0.2,
0.014285714285714285,
0.021739130434782608,
0.020833333333333332,
0.012345679012345678,
0,
0,
0.02608695652173913,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0.045454545454545456,
0,
0.011111111111111112,
0,
0,
0.04,
0.04,
0.05,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.07142857142857142,
0.012345679012345678,
0.0975609756097561,
0.14285714285714285,
0,
0.0875,
0.14285714285714285,
0,
0,
0.07142857142857142,
0,
0.09523809523809523,
0.034482758620689655,
0.034482758620689655,
0.018867924528301886,
0.010309278350515464,
0.2,
0.02564102564102564,
0,
0,
0,
0,
0,
0.03333333333333333,
0.04878048780487805,
0,
0.3333333333333333,
0.02702702702702703,
0,
0.3333333333333333,
0.0136986301369863,
0.023809523809523808,
0.020833333333333332,
0.043478260869565216,
0.03225806451612903,
0.030303030303030304,
0.3333333333333333,
0.07692307692307693,
0,
0,
0,
0,
0.125,
0.05263157894736842,
0.05263157894736842,
0.0625,
0.0625,
0.3333333333333333,
0.04,
0.043478260869565216,
0.3333333333333333,
0.023255813953488372,
0.07692307692307693,
0.3333333333333333,
0.013333333333333334,
0.045454545454545456,
0.058823529411764705,
0.3333333333333333,
0.04,
0.08163265306122448,
0.3333333333333333,
0.05263157894736842,
0.3333333333333333,
0.08333333333333333,
0.03571428571428571,
0.3333333333333333,
0.02857142857142857,
0,
0.08333333333333333,
0.2,
0,
0,
0,
0.2,
0,
0,
0.03125,
0.017543859649122806,
0.030303030303030304,
0.021739130434782608,
0.14285714285714285,
0.03333333333333333,
0.02127659574468085,
0.020833333333333332,
0.14285714285714285,
0.022222222222222223,
0.021739130434782608,
0.14285714285714285,
0.029411764705882353,
0.2,
0,
0.2,
0.011627906976744186,
0.020833333333333332,
0.2,
0,
0.013333333333333334,
0,
0.041666666666666664,
0.2,
0,
0.06666666666666667,
0.2,
0,
0.037037037037037035,
0.02631578947368421,
0.045454545454545456,
0.06666666666666667,
0.2,
0,
0,
0.029411764705882353,
0.02564102564102564,
0.02631578947368421,
0.045454545454545456,
0.3333333333333333,
0.043478260869565216,
0.029411764705882353,
0.3333333333333333,
0.04,
0.05,
0.08333333333333333,
0.029411764705882353,
0,
0.08333333333333333,
0,
0.030303030303030304,
0,
0.058823529411764705,
0.014285714285714285,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.14285714285714285,
0.015151515151515152,
0.015625,
0.038461538461538464,
0.018518518518518517,
0,
0.1111111111111111,
0.06666666666666667,
0.3333333333333333,
0.043478260869565216,
0.3333333333333333,
0.03333333333333333,
0.04,
0.043478260869565216,
0.08333333333333333,
0.023255813953488372,
0,
0.08333333333333333,
0.2,
0.010638297872340425,
0.014925373134328358,
0.015625,
0.02857142857142857,
0.02631578947368421,
0.02127659574468085,
0.043478260869565216,
0.0625,
0.013157894736842105,
0.03333333333333333,
0.2,
0,
0.022222222222222223,
0.2,
0,
0.030303030303030304,
0,
0.14285714285714285,
0.024390243902439025,
0.14285714285714285,
0.058823529411764705,
0.03571428571428571,
0.014285714285714285,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.14285714285714285,
0.014285714285714285,
0.023255813953488372,
0.02564102564102564,
0,
0.05263157894736842,
0.1111111111111111,
0,
0.02127659574468085,
0,
0,
0,
0,
0.024,
0.04081632653061224,
0,
0.09090909090909091,
0.0196078431372549,
0,
0,
0.019230769230769232,
0,
0,
0,
0.02127659574468085,
0,
0.14285714285714285,
0.023809523809523808,
0.02702702702702703,
0,
0.14285714285714285,
0.02564102564102564,
0.3333333333333333,
0.047619047619047616,
0.05263157894736842,
0,
0.09411764705882353,
0.034482758620689655,
0.034482758620689655,
0.018867924528301886,
0.010309278350515464,
0.2,
0.02564102564102564,
0,
0,
0,
0,
0,
0.03333333333333333,
0.04878048780487805,
0,
0.3333333333333333,
0.02702702702702703,
0,
0.3333333333333333,
0.014492753623188406,
0.023809523809523808,
0.020833333333333332,
0.043478260869565216,
0.03225806451612903,
0.030303030303030304,
0.3333333333333333,
0.0784313725490196,
0.09523809523809523,
0.3333333333333333,
0.017857142857142856,
0,
0.08333333333333333,
0.2,
0.025,
0.06451612903225806,
0.2,
0,
0,
0.01818181818181818,
0.021739130434782608,
0.018867924528301886,
0.2,
0,
0,
0,
0.025,
0,
0.0625,
0.1111111111111111,
0,
0,
0.05263157894736842,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.016129032258064516,
0.01694915254237288,
0.014705882352941176,
0.1111111111111111,
0.017241379310344827,
0.09090909090909091,
0.07142857142857142,
0,
0.09090909090909091,
0.034482758620689655,
0.01639344262295082,
0.0125,
0,
0,
0.1111111111111111,
0,
0.017543859649122806,
0.1111111111111111,
0,
0.06666666666666667,
0,
0.1111111111111111,
0,
0.02564102564102564,
0.14285714285714285,
0.030303030303030304,
0,
0.2,
0.014492753623188406,
0.021739130434782608,
0.14285714285714285,
0.04,
0.043478260869565216,
0.14285714285714285,
0.01818181818181818,
0.02702702702702703,
0,
0.14285714285714285,
0.06976744186046512,
0.047619047619047616,
0.14285714285714285,
0.03333333333333333,
0,
0,
0.1111111111111111,
0,
0.020833333333333332,
0.3333333333333333,
0.04,
0.05263157894736842,
0,
0,
0.07142857142857142,
0,
0.0975609756097561,
0.034482758620689655,
0.034482758620689655,
0.018867924528301886,
0.010309278350515464,
0.2,
0.02564102564102564,
0,
0,
0,
0,
0,
0.03333333333333333,
0.04878048780487805,
0,
0.3333333333333333,
0.02702702702702703,
0,
0.3333333333333333,
0.014492753623188406,
0.023809523809523808,
0.020833333333333332,
0.043478260869565216,
0.03225806451612903,
0.030303030303030304,
0.3333333333333333,
0.07692307692307693,
0,
0,
0,
0,
0.125,
0.05263157894736842,
0.05263157894736842,
0.0625,
0.0625,
0.3333333333333333,
0.05263157894736842,
0.3333333333333333,
0.08333333333333333,
0.03571428571428571,
0.034482758620689655,
0.3333333333333333,
0.02564102564102564,
0,
0.08333333333333333,
0.2,
0,
0,
0.03125,
0.017543859649122806,
0.03225806451612903,
0.14285714285714285,
0.03333333333333333,
0.02127659574468085,
0.020833333333333332,
0.14285714285714285,
0.022222222222222223,
0.021739130434782608,
0.14285714285714285,
0.029411764705882353,
0.2,
0,
0,
0.018518518518518517,
0.015625,
0.14285714285714285,
0.02,
0.14285714285714285,
0.03225806451612903,
0,
0.14285714285714285,
0.07142857142857142,
0,
0.2,
0,
0.024390243902439025,
0.018867924528301886,
0.2,
0,
0.037037037037037035,
0.08333333333333333,
0.09090909090909091,
0.2,
0,
0.017857142857142856,
0.023255813953488372,
0,
0.02564102564102564,
0.3333333333333333,
0.05263157894736842,
0,
0.0963855421686747,
0.034482758620689655,
0.034482758620689655,
0.018867924528301886,
0.010309278350515464,
0.2,
0.02564102564102564,
0,
0,
0,
0,
0,
0.03333333333333333,
0.04878048780487805,
0,
0.3333333333333333,
0.02702702702702703,
0,
0.3333333333333333,
0.014492753623188406,
0.023809523809523808,
0.020833333333333332,
0.043478260869565216,
0.03225806451612903,
0.030303030303030304,
0.3333333333333333,
0.07692307692307693,
0.04081632653061224,
0.125,
0,
0.3333333333333333,
0.05263157894736842,
0.3333333333333333,
0.017857142857142856,
0,
0.08333333333333333,
0.2,
0,
0.02127659574468085,
0.3333333333333333,
0,
0.2,
0.05319148936170213,
0,
0.2,
0,
0.03125,
0.021739130434782608,
0.14285714285714285,
0.030303030303030304,
0,
0.2,
0,
0.08333333333333333,
0.3333333333333333,
0,
0.023809523809523808,
0.023255813953488372,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0.05263157894736842,
0.1111111111111111,
0.03333333333333333,
0.027777777777777776,
0.1111111111111111,
0,
0.022222222222222223,
0.1111111111111111,
0,
0.03225806451612903,
0.0625,
0.06666666666666667,
0.1111111111111111,
0,
0.14285714285714285,
0.034482758620689655,
0.3333333333333333,
0.07692307692307693,
0,
0.3333333333333333,
0.05263157894736842,
0,
0,
0.05,
0,
0.02,
0.07894736842105263,
0.03225806451612903,
0,
0.025,
0.3333333333333333,
0.07894736842105263,
0.3333333333333333,
0.02564102564102564,
0,
0,
0.01904761904761905,
0.06666666666666667,
0.3333333333333333,
0,
0.043478260869565216,
0.013157894736842105,
0.06666666666666667,
0.2,
0,
0.05,
0.024390243902439025,
0.06666666666666667,
0.2,
0,
0.025,
0.14285714285714285,
0.03125,
0,
0.029411764705882353,
0,
0.08333333333333333,
0,
0,
0.14285714285714285,
0.04,
0.058823529411764705,
0,
0.08333333333333333,
0,
0.2,
0,
0.3333333333333333,
0.047619047619047616,
0,
0.037037037037037035,
0.07692307692307693,
0.02702702702702703,
0.3333333333333333,
0.021739130434782608,
0.016129032258064516,
0.3333333333333333,
0.01694915254237288,
0.01680672268907563,
0.017699115044247787,
0.021739130434782608,
0.020202020202020204,
0.015625,
0.0594059405940594,
0.023809523809523808,
0.3333333333333333,
0.017543859649122806,
0.014925373134328358,
0.017543859649122806,
0,
0.18181818181818182
] | 654 | 0.061249 | false |
# ---------------------------------------------------------------------------
# Name: IPP_EuclideanDistance2.py
#
# Purpose: Creates the Statistical Search Area around the IPP using Ring Model
# (25%, 50%, 75% and 95%) based on historical data related to Lost Person
# Behavior. Specific subject category is obtained from the Subject
# Information. IPP Distances are provided by Robert Koester (dbs Productions -
# Lost Person Behvaior) and are not included in this copyright.
#
# Author: Don Ferguson
#
# Created: 01/25/2012
# Copyright: (c) Don Ferguson 2012
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The GNU General Public License can be found at
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Import arcpy module
import arcpy
import string
mxd = arcpy.mapping.MapDocument("CURRENT")
df=arcpy.mapping.ListDataFrames(mxd,"*")[0]
# Script arguments
SubNum = arcpy.GetParameterAsText(0) # Get the subject number
IPP = arcpy.GetParameterAsText(1) # Determine to use PLS or LKP
UserSelect = arcpy.GetParameterAsText(2) # Subejct Category or User Defined Values
bufferUnit = arcpy.GetParameterAsText(3) # Desired units
IPPDists = arcpy.GetParameterAsText(4) # Optional - User entered distancesDetermine to use PLS or LKP
#arcpy.env.workspace = workspc
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
IPPBuffer = "Planning\StatisticalArea"
SubNum = int(SubNum)
fc1="Incident_Info"
try:
rows = arcpy.SearchCursor(fc1)
row = rows.next()
while row:
# you need to insert correct field names in your getvalue function
EcoReg = row.getValue("Eco_Region")
Terrain = row.getValue("Terrain")
PopDen = row.getValue("Pop_Den")
row = rows.next()
del rows
del row
except:
EcoReg = "Temperate"
Terrain = "Mountainous"
PopDen = "Wilderness"
arcpy.AddWarning("No Incident Information provided. Default values used.")
arcpy.AddWarning("Eco_Region = Temperate; Terrain = Mountainous; Population Density = Wilderness.")
arcpy.AddWarning("If inappropriate...Remove Statistical Search Area Layer, \nprovide Incident Information and re-run\n")
fc2 = "Subject_Information"
where = '"Subject_Number"= %d' % (SubNum)
rows = arcpy.SearchCursor(fc2, where)
row = rows.next()
while row:
# you need to insert correct field names in your getvalue function
Subject_Category = row.getValue("Category")
row = rows.next()
del rows
del row
del where
if UserSelect=='User Defined Values':
Dist = IPPDists.split(',')
Distances=map(float,Dist)
Distances.sort()
mult = 1.0
else:
if bufferUnit =='kilometers':
mult = 1.6093472
else:
mult = 1.0
# bufferUnit = "miles"
arcpy.AddMessage("Subject Category is " + Subject_Category)
if Subject_Category == "Abduction":
Distances = [0.2,1.5,12.0]
elif Subject_Category == "Aircraft":
Distances = [0.4,0.9,3.7,10.4]
elif Subject_Category == "Angler":
if EcoReg == "Temperate":
if Terrain == "Mountainous":
Distances = [0.2,0.9,3.4,6.1]
else:
Distances = [0.5,1.0,3.4,6.1]
elif EcoReg == "Dry":
Distances = [2.0,6.0,6.5,8.0]
else:
Distances = [0.5,1.0,3.4,6.1]
elif Subject_Category == "All Terrain Vehicle":
Distances = [1.0,2.0,3.5,5.0]
elif Subject_Category == "Autistic":
if EcoReg == "Urban":
Distances = [0.2,0.6,2.4,5.0]
else:
Distances = [0.4,1.0,2.3,9.5]
elif Subject_Category == "Camper":
if Terrain == "Mountainous":
if EcoReg == "Dry":
Distances = [0.4,1.0,2.6,20.3]
else:
Distances = [0.1,1.4,1.9,24.7]
else:
Distances = [0.1,0.7,2.0,8.0]
elif Subject_Category == "Child (1-3)":
if EcoReg == "Dry":
Distances = [0.4,0.8,2.4,5.6]
elif EcoReg == "Urban":
Distances = [0.1,0.3,0.5,0.7]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.1,0.2,0.6,2.0]
else:
Distances = [0.1,0.2,0.6,2.0]
elif Subject_Category == "Child (4-6)":
if EcoReg == "Dry":
Distances = [0.4,1.2,2.0,5.1]
elif EcoReg == "Urban":
Distances = [0.06,0.3,0.6,2.1]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.1,0.5,0.9,2.3]
else:
Distances = [0.1,0.4,0.9,4.1]
elif Subject_Category == "Child (7-9)":
if EcoReg == "Dry":
Distances = [0.3,0.8,2.0,4.5]
elif EcoReg == "Urban":
Distances = [0.1,0.3,0.9,3.2]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.5,1.0,2.0,7.0]
else:
Distances = [0.1,0.5,1.3,5.0]
elif Subject_Category == "Child (10-12)":
if EcoReg == "Dry":
Distances = [0.5,1.3,4.5,10.0]
elif EcoReg == "Urban":
Distances = [0.2,0.9,1.8,3.6]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.5,1.0,2.0,5.6]
else:
Distances = [0.3,1.0,3.0,6.2]
elif Subject_Category == "Child (13-15)":
if EcoReg == "Dry":
Distances = [1.5,2.0,3.0,7.4]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.5,1.3,3.0,13.3]
else:
Distances = [0.4,0.8,2.0,6.2]
elif Subject_Category == "Climber":
Distances = [0.1,1.0,2.0,9.2]
elif Subject_Category == "Dementia":
if EcoReg == "Dry" and Terrain == "Mountainous":
Distances = [0.6,1.2,1.9,3.8]
elif EcoReg == "Dry" and Terrain == "Flat":
Distances = [0.3,1.0,2.2,7.3]
elif EcoReg == "Urban":
Distances = [0.2,0.7,2.0,7.8]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.2,0.5,1.2,5.1]
else:
Distances = [0.2,0.6,1.5,7.9]
elif Subject_Category == "Despondent":
if EcoReg == "Dry" and Terrain == "Mountainous":
Distances = [0.5,1.0,2.1,11.1]
elif EcoReg == "Dry" and Terrain == "Flat":
Distances = [0.3,1.2,2.3,12.8]
elif EcoReg == "Urban":
Distances = [0.06,0.3,0.9,8.1]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.2,0.7,2.0,13.3]
else:
Distances = [0.2,0.5,1.4,10.7]
elif Subject_Category == "Gatherer":
if EcoReg == "Dry":
Distances = [1.0,1.6,3.6,6.9]
else:
Distances = [0.9,2.0,4.0,8.0]
elif Subject_Category == "Hiker":
if EcoReg == "Dry" and Terrain == "Mountainous":
Distances = [1.0,2.0,4.0,11.9]
elif EcoReg == "Dry" and Terrain == "Flat":
Distances = [0.8,1.3,4.1,8.1]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.7,1.9,3.6,11.3]
else:
Distances = [0.4,1.1,2.0,6.1]
elif Subject_Category == "Horseback Rider":
Distances = [0.2,2.0,5.0,12.2]
elif Subject_Category == "Hunter":
if EcoReg == "Dry" and Terrain == "Mountainous":
Distances = [1.3,3.0,5.0,13.8]
elif EcoReg == "Dry" and Terrain == "Flat":
Distances = [1.0,1.9,4.0,7.0]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.6,1.3,3.0,10.7]
else:
Distances = [0.4,1.0,1.9,8.5]
elif Subject_Category == "Mental Illness":
if EcoReg == "Urban":
Distances = [0.2,0.4,0.9,7.7]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.4,1.4,5.1,9.0]
else:
Distances = [0.5,0.6,1.4,5.0]
elif Subject_Category == "Mental Retardation":
if EcoReg == "Dry":
Distances = [0.7,2.5,5.4,38.9]
elif EcoReg == "Urban":
Distances = [0.2,0.5,2.3,6.14]
elif EcoReg == "Temperate" and Terrain == "Mountainous":
Distances = [0.4,1.0,2.0,7.0]
else:
Distances = [0.2,0.6,1.3,7.3]
elif Subject_Category == "Mountain Biker":
if EcoReg == "Dry":
Distances = [1.7,4.0,8.2,18.1]
else:
Distances = [1.9,2.5,7.0,15.5]
elif Subject_Category == "Other (Extreme Sport)":
Distances = [0.3,1.6,3.5,8.3]
elif Subject_Category == "Runner":
Distances = [0.9,1.6,2.1,3.6]
elif Subject_Category == "Skier-Alpine":
Distances = [0.7,1.7,3.0,9.4]
elif Subject_Category == "Skier-Nordic":
if EcoReg == "Dry":
Distances = [1.2,2.7,4.0,12.1]
else:
Distances = [1.0,2.2,4.0,12.2]
elif Subject_Category == "Snowboarder":
Distances = [1.0,2.0,3.8,9.5]
elif Subject_Category == "Snowmobiler":
if EcoReg == "Dry":
Distances = [1.0,3.0,8.7,18.9]
elif EcoReg == "Temperate" and Terrain == "Flat":
Distances = [0.8,2.9,25.5,59.7]
else:
Distances = [2.0,4.0,6.9,10.0]
elif Subject_Category == "Substance Abuse":
Distances = [0.3,0.7,2.6,6.0]
else:
Distances = [0.4,1.1,2.0,6.1]
# Buffer areas of impact around major roads
fc3 = "Planning Point"
where1 = '"Subject_Number" = ' + str(SubNum)
where2 = ' AND "IPPType" = ' + "'" + IPP + "'"
where = where1 + where2
arcpy.SelectLayerByAttribute_management(fc3, "NEW_SELECTION", where)
arcpy.AddMessage("Buffer IPP around the " + IPP )
dissolve_option = "ALL"
k=0
rows = arcpy.SearchCursor(fc3, where)
for row in rows:
##row = rows.next()
k=1
if bufferUnit =='kilometers':
fieldName3 = "Area_SqKm"
fieldAlias3 = "Area (sq km)"
expression3 = "round(!shape.area@squarekilometers!,2)"
pDistIPP = '"IPPDist"'
else:
fieldName3 = "Area_SqMi"
fieldAlias3 = "Area (sq miles)"
expression3 = "round(!shape.area@squaremiles!,2)"
pDistIPP = '"IPPDist"'
perct = ['25%', '50%', '75%', '95%']
inFeatures = IPPBuffer
fieldName1 = "Descrip"
fieldName2 = "Area_Ac"
fieldName4 = "Sub_Cat"
fieldAlias1 = "Description"
fieldAlias2 = "Area (Acres)"
fieldAlias4 = "Subject Category"
expression2 = "int(!shape.area@acres!)"
pDist=[]
for x in Distances:
pDist.append(round(x * mult,2))
arcpy.AddMessage("Units = " + bufferUnit)
arcpy.AddMessage(pDist)
while row:
# you need to insert correct field names in your getvalue function
arcpy.MultipleRingBuffer_analysis(fc3, IPPBuffer, pDist, bufferUnit, "DistFrmIPP", dissolve_option, "FULL")
row = rows.next()
del rows
del row
del where
arcpy.AddMessage('Completed multi-ring buffer')
arcpy.AddField_management(inFeatures, fieldName1, "TEXT", "", "", "25",
fieldAlias1, "NULLABLE", "","PrtRange")
arcpy.AddField_management(inFeatures, fieldName2, "DOUBLE", "", "", "",
fieldAlias2, "NULLABLE")
arcpy.AddField_management(inFeatures, fieldName3, "DOUBLE", "", "", "",
fieldAlias3, "NULLABLE")
arcpy.AddField_management(inFeatures, fieldName4, "TEXT", "", "", "25",
fieldAlias4, "NULLABLE", "","PrtRange")
arcpy.AddMessage('Completed AddFields')
arcpy.CalculateField_management(IPPBuffer, fieldName2, expression2,
"PYTHON")
arcpy.CalculateField_management(IPPBuffer, fieldName3, expression3,
"PYTHON")
rows = arcpy.UpdateCursor(IPPBuffer)
arcpy.AddMessage('Completed update cursor')
row = rows.next()
k=0
while row:
# you need to insert correct field names in your getvalue function
row.setValue(fieldName1, perct[k])
row.setValue(fieldName4, Subject_Category)
arcpy.AddMessage('Completed setValues')
rows.updateRow(row)
k=k+1
row = rows.next()
del rows
del row
##del where
arcpy.AddMessage('get current map document')
# get the map document
mxd = arcpy.mapping.MapDocument("CURRENT")
arcpy.AddMessage('get data frame')
# get the data frame
df = arcpy.mapping.ListDataFrames(mxd,"*")[0]
# create a new layer
arcpy.AddMessage('Insert IPPBuffer')
insertLayer = arcpy.mapping.Layer(IPPBuffer)
#Reference layer
arcpy.AddMessage('Grab reference layer')
refLayer = arcpy.mapping.ListLayers(mxd, "Hasty_Points", df)[0]
# add the layer to the map at the bottom of the TOC in data frame 0
arcpy.mapping.InsertLayer(df, refLayer, insertLayer,"BEFORE")
arcpy.AddMessage("8 Segments_Group\StatisticalArea")
tryLayer = "8 Segments_Group\StatisticalArea"
try:
# Set layer that output symbology will be based on
symbologyLayer = "C:\MapSAR_Ex\Tools\Layers Files - Local\Layer Groups\StatisticalArea.lyr"
# Apply the symbology from the symbology layer to the input layer
arcpy.ApplySymbologyFromLayer_management (tryLayer, symbologyLayer)
except:
pass
if k == 0:
arcpy.AddMessage("There was no " + IPP + " defined")
| [
"# ---------------------------------------------------------------------------\n",
"# Name: IPP_EuclideanDistance2.py\n",
"#\n",
"# Purpose: Creates the Statistical Search Area around the IPP using Ring Model\n",
"# (25%, 50%, 75% and 95%) based on historical data related to Lost Person\n",
"# Behavior. Specific subject category is obtained from the Subject\n",
"# Information. IPP Distances are provided by Robert Koester (dbs Productions -\n",
"# Lost Person Behvaior) and are not included in this copyright.\n",
"#\n",
"# Author: Don Ferguson\n",
"#\n",
"# Created: 01/25/2012\n",
"# Copyright: (c) Don Ferguson 2012\n",
"# Licence:\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# The GNU General Public License can be found at\n",
"# <http://www.gnu.org/licenses/>.\n",
"#-------------------------------------------------------------------------------\n",
"# Import arcpy module\n",
"\n",
"import arcpy\n",
"import string\n",
"\n",
"mxd = arcpy.mapping.MapDocument(\"CURRENT\")\n",
"df=arcpy.mapping.ListDataFrames(mxd,\"*\")[0]\n",
"\n",
"# Script arguments\n",
"SubNum = arcpy.GetParameterAsText(0) # Get the subject number\n",
"IPP = arcpy.GetParameterAsText(1) # Determine to use PLS or LKP\n",
"UserSelect = arcpy.GetParameterAsText(2) # Subejct Category or User Defined Values\n",
"bufferUnit = arcpy.GetParameterAsText(3) # Desired units\n",
"IPPDists = arcpy.GetParameterAsText(4) # Optional - User entered distancesDetermine to use PLS or LKP\n",
"\n",
"#arcpy.env.workspace = workspc\n",
"\n",
"# Overwrite pre-existing files\n",
"arcpy.env.overwriteOutput = True\n",
"IPPBuffer = \"Planning\\StatisticalArea\"\n",
"\n",
"SubNum = int(SubNum)\n",
"fc1=\"Incident_Info\"\n",
"try:\n",
" rows = arcpy.SearchCursor(fc1)\n",
" row = rows.next()\n",
"\n",
" while row:\n",
" # you need to insert correct field names in your getvalue function\n",
" EcoReg = row.getValue(\"Eco_Region\")\n",
" Terrain = row.getValue(\"Terrain\")\n",
" PopDen = row.getValue(\"Pop_Den\")\n",
" row = rows.next()\n",
" del rows\n",
" del row\n",
"\n",
"except:\n",
" EcoReg = \"Temperate\"\n",
" Terrain = \"Mountainous\"\n",
" PopDen = \"Wilderness\"\n",
" arcpy.AddWarning(\"No Incident Information provided. Default values used.\")\n",
" arcpy.AddWarning(\"Eco_Region = Temperate; Terrain = Mountainous; Population Density = Wilderness.\")\n",
" arcpy.AddWarning(\"If inappropriate...Remove Statistical Search Area Layer, \\nprovide Incident Information and re-run\\n\")\n",
"\n",
"\n",
"\n",
"fc2 = \"Subject_Information\"\n",
"where = '\"Subject_Number\"= %d' % (SubNum)\n",
"rows = arcpy.SearchCursor(fc2, where)\n",
"row = rows.next()\n",
"\n",
"while row:\n",
" # you need to insert correct field names in your getvalue function\n",
" Subject_Category = row.getValue(\"Category\")\n",
" row = rows.next()\n",
"del rows\n",
"del row\n",
"del where\n",
"\n",
"if UserSelect=='User Defined Values':\n",
" Dist = IPPDists.split(',')\n",
" Distances=map(float,Dist)\n",
" Distances.sort()\n",
" mult = 1.0\n",
"else:\n",
" if bufferUnit =='kilometers':\n",
" mult = 1.6093472\n",
" else:\n",
" mult = 1.0\n",
"\n",
"# bufferUnit = \"miles\"\n",
" arcpy.AddMessage(\"Subject Category is \" + Subject_Category)\n",
"\n",
" if Subject_Category == \"Abduction\":\n",
" Distances = [0.2,1.5,12.0]\n",
"\n",
" elif Subject_Category == \"Aircraft\":\n",
" Distances = [0.4,0.9,3.7,10.4]\n",
"\n",
" elif Subject_Category == \"Angler\":\n",
" if EcoReg == \"Temperate\":\n",
" if Terrain == \"Mountainous\":\n",
" Distances = [0.2,0.9,3.4,6.1]\n",
" else:\n",
" Distances = [0.5,1.0,3.4,6.1]\n",
" elif EcoReg == \"Dry\":\n",
" Distances = [2.0,6.0,6.5,8.0]\n",
" else:\n",
" Distances = [0.5,1.0,3.4,6.1]\n",
"\n",
" elif Subject_Category == \"All Terrain Vehicle\":\n",
" Distances = [1.0,2.0,3.5,5.0]\n",
"\n",
" elif Subject_Category == \"Autistic\":\n",
" if EcoReg == \"Urban\":\n",
" Distances = [0.2,0.6,2.4,5.0]\n",
" else:\n",
" Distances = [0.4,1.0,2.3,9.5]\n",
"\n",
" elif Subject_Category == \"Camper\":\n",
" if Terrain == \"Mountainous\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.4,1.0,2.6,20.3]\n",
" else:\n",
" Distances = [0.1,1.4,1.9,24.7]\n",
" else:\n",
" Distances = [0.1,0.7,2.0,8.0]\n",
"\n",
" elif Subject_Category == \"Child (1-3)\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.4,0.8,2.4,5.6]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.1,0.3,0.5,0.7]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.1,0.2,0.6,2.0]\n",
" else:\n",
" Distances = [0.1,0.2,0.6,2.0]\n",
"\n",
" elif Subject_Category == \"Child (4-6)\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.4,1.2,2.0,5.1]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.06,0.3,0.6,2.1]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.1,0.5,0.9,2.3]\n",
" else:\n",
" Distances = [0.1,0.4,0.9,4.1]\n",
"\n",
" elif Subject_Category == \"Child (7-9)\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.3,0.8,2.0,4.5]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.1,0.3,0.9,3.2]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.5,1.0,2.0,7.0]\n",
" else:\n",
" Distances = [0.1,0.5,1.3,5.0]\n",
"\n",
" elif Subject_Category == \"Child (10-12)\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.5,1.3,4.5,10.0]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.2,0.9,1.8,3.6]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.5,1.0,2.0,5.6]\n",
" else:\n",
" Distances = [0.3,1.0,3.0,6.2]\n",
"\n",
" elif Subject_Category == \"Child (13-15)\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [1.5,2.0,3.0,7.4]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.5,1.3,3.0,13.3]\n",
" else:\n",
" Distances = [0.4,0.8,2.0,6.2]\n",
"\n",
" elif Subject_Category == \"Climber\":\n",
" Distances = [0.1,1.0,2.0,9.2]\n",
"\n",
" elif Subject_Category == \"Dementia\":\n",
" if EcoReg == \"Dry\" and Terrain == \"Mountainous\":\n",
" Distances = [0.6,1.2,1.9,3.8]\n",
" elif EcoReg == \"Dry\" and Terrain == \"Flat\":\n",
" Distances = [0.3,1.0,2.2,7.3]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.2,0.7,2.0,7.8]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.2,0.5,1.2,5.1]\n",
" else:\n",
" Distances = [0.2,0.6,1.5,7.9]\n",
"\n",
" elif Subject_Category == \"Despondent\":\n",
" if EcoReg == \"Dry\" and Terrain == \"Mountainous\":\n",
" Distances = [0.5,1.0,2.1,11.1]\n",
" elif EcoReg == \"Dry\" and Terrain == \"Flat\":\n",
" Distances = [0.3,1.2,2.3,12.8]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.06,0.3,0.9,8.1]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.2,0.7,2.0,13.3]\n",
" else:\n",
" Distances = [0.2,0.5,1.4,10.7]\n",
"\n",
" elif Subject_Category == \"Gatherer\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [1.0,1.6,3.6,6.9]\n",
" else:\n",
" Distances = [0.9,2.0,4.0,8.0]\n",
"\n",
" elif Subject_Category == \"Hiker\":\n",
" if EcoReg == \"Dry\" and Terrain == \"Mountainous\":\n",
" Distances = [1.0,2.0,4.0,11.9]\n",
" elif EcoReg == \"Dry\" and Terrain == \"Flat\":\n",
" Distances = [0.8,1.3,4.1,8.1]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.7,1.9,3.6,11.3]\n",
" else:\n",
" Distances = [0.4,1.1,2.0,6.1]\n",
"\n",
" elif Subject_Category == \"Horseback Rider\":\n",
" Distances = [0.2,2.0,5.0,12.2]\n",
"\n",
" elif Subject_Category == \"Hunter\":\n",
" if EcoReg == \"Dry\" and Terrain == \"Mountainous\":\n",
" Distances = [1.3,3.0,5.0,13.8]\n",
" elif EcoReg == \"Dry\" and Terrain == \"Flat\":\n",
" Distances = [1.0,1.9,4.0,7.0]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.6,1.3,3.0,10.7]\n",
" else:\n",
" Distances = [0.4,1.0,1.9,8.5]\n",
"\n",
" elif Subject_Category == \"Mental Illness\":\n",
" if EcoReg == \"Urban\":\n",
" Distances = [0.2,0.4,0.9,7.7]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.4,1.4,5.1,9.0]\n",
" else:\n",
" Distances = [0.5,0.6,1.4,5.0]\n",
"\n",
" elif Subject_Category == \"Mental Retardation\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [0.7,2.5,5.4,38.9]\n",
" elif EcoReg == \"Urban\":\n",
" Distances = [0.2,0.5,2.3,6.14]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Mountainous\":\n",
" Distances = [0.4,1.0,2.0,7.0]\n",
" else:\n",
" Distances = [0.2,0.6,1.3,7.3]\n",
"\n",
" elif Subject_Category == \"Mountain Biker\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [1.7,4.0,8.2,18.1]\n",
" else:\n",
" Distances = [1.9,2.5,7.0,15.5]\n",
"\n",
" elif Subject_Category == \"Other (Extreme Sport)\":\n",
" Distances = [0.3,1.6,3.5,8.3]\n",
"\n",
" elif Subject_Category == \"Runner\":\n",
" Distances = [0.9,1.6,2.1,3.6]\n",
"\n",
" elif Subject_Category == \"Skier-Alpine\":\n",
" Distances = [0.7,1.7,3.0,9.4]\n",
"\n",
" elif Subject_Category == \"Skier-Nordic\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [1.2,2.7,4.0,12.1]\n",
" else:\n",
" Distances = [1.0,2.2,4.0,12.2]\n",
"\n",
" elif Subject_Category == \"Snowboarder\":\n",
" Distances = [1.0,2.0,3.8,9.5]\n",
"\n",
" elif Subject_Category == \"Snowmobiler\":\n",
" if EcoReg == \"Dry\":\n",
" Distances = [1.0,3.0,8.7,18.9]\n",
" elif EcoReg == \"Temperate\" and Terrain == \"Flat\":\n",
" Distances = [0.8,2.9,25.5,59.7]\n",
" else:\n",
" Distances = [2.0,4.0,6.9,10.0]\n",
"\n",
" elif Subject_Category == \"Substance Abuse\":\n",
" Distances = [0.3,0.7,2.6,6.0]\n",
"\n",
" else:\n",
" Distances = [0.4,1.1,2.0,6.1]\n",
"\n",
"# Buffer areas of impact around major roads\n",
"fc3 = \"Planning Point\"\n",
"\n",
"where1 = '\"Subject_Number\" = ' + str(SubNum)\n",
"where2 = ' AND \"IPPType\" = ' + \"'\" + IPP + \"'\"\n",
"where = where1 + where2\n",
"\n",
"arcpy.SelectLayerByAttribute_management(fc3, \"NEW_SELECTION\", where)\n",
"arcpy.AddMessage(\"Buffer IPP around the \" + IPP )\n",
"\n",
"dissolve_option = \"ALL\"\n",
"\n",
"k=0\n",
"\n",
"rows = arcpy.SearchCursor(fc3, where)\n",
"for row in rows:\n",
"##row = rows.next()\n",
" k=1\n",
" if bufferUnit =='kilometers':\n",
" fieldName3 = \"Area_SqKm\"\n",
" fieldAlias3 = \"Area (sq km)\"\n",
" expression3 = \"round(!shape.area@squarekilometers!,2)\"\n",
" pDistIPP = '\"IPPDist\"'\n",
"\n",
" else:\n",
" fieldName3 = \"Area_SqMi\"\n",
" fieldAlias3 = \"Area (sq miles)\"\n",
" expression3 = \"round(!shape.area@squaremiles!,2)\"\n",
" pDistIPP = '\"IPPDist\"'\n",
"\n",
" perct = ['25%', '50%', '75%', '95%']\n",
" inFeatures = IPPBuffer\n",
" fieldName1 = \"Descrip\"\n",
" fieldName2 = \"Area_Ac\"\n",
" fieldName4 = \"Sub_Cat\"\n",
"\n",
" fieldAlias1 = \"Description\"\n",
" fieldAlias2 = \"Area (Acres)\"\n",
" fieldAlias4 = \"Subject Category\"\n",
"\n",
" expression2 = \"int(!shape.area@acres!)\"\n",
"\n",
" pDist=[]\n",
" for x in Distances:\n",
" pDist.append(round(x * mult,2))\n",
"\n",
" arcpy.AddMessage(\"Units = \" + bufferUnit)\n",
" arcpy.AddMessage(pDist)\n",
"\n",
" while row:\n",
" # you need to insert correct field names in your getvalue function\n",
" arcpy.MultipleRingBuffer_analysis(fc3, IPPBuffer, pDist, bufferUnit, \"DistFrmIPP\", dissolve_option, \"FULL\")\n",
" row = rows.next()\n",
"\n",
" del rows\n",
" del row\n",
" del where\n",
"\n",
" arcpy.AddMessage('Completed multi-ring buffer')\n",
"\n",
" arcpy.AddField_management(inFeatures, fieldName1, \"TEXT\", \"\", \"\", \"25\",\n",
" fieldAlias1, \"NULLABLE\", \"\",\"PrtRange\")\n",
" arcpy.AddField_management(inFeatures, fieldName2, \"DOUBLE\", \"\", \"\", \"\",\n",
" fieldAlias2, \"NULLABLE\")\n",
" arcpy.AddField_management(inFeatures, fieldName3, \"DOUBLE\", \"\", \"\", \"\",\n",
" fieldAlias3, \"NULLABLE\")\n",
" arcpy.AddField_management(inFeatures, fieldName4, \"TEXT\", \"\", \"\", \"25\",\n",
" fieldAlias4, \"NULLABLE\", \"\",\"PrtRange\")\n",
"\n",
" arcpy.AddMessage('Completed AddFields')\n",
"\n",
" arcpy.CalculateField_management(IPPBuffer, fieldName2, expression2,\n",
" \"PYTHON\")\n",
" arcpy.CalculateField_management(IPPBuffer, fieldName3, expression3,\n",
" \"PYTHON\")\n",
"\n",
" rows = arcpy.UpdateCursor(IPPBuffer)\n",
" arcpy.AddMessage('Completed update cursor')\n",
" row = rows.next()\n",
"\n",
" k=0\n",
" while row:\n",
" # you need to insert correct field names in your getvalue function\n",
" row.setValue(fieldName1, perct[k])\n",
" row.setValue(fieldName4, Subject_Category)\n",
" arcpy.AddMessage('Completed setValues')\n",
" rows.updateRow(row)\n",
" k=k+1\n",
" row = rows.next()\n",
"\n",
" del rows\n",
" del row\n",
" ##del where\n",
"\n",
" arcpy.AddMessage('get current map document')\n",
" # get the map document\n",
" mxd = arcpy.mapping.MapDocument(\"CURRENT\")\n",
"\n",
" arcpy.AddMessage('get data frame')\n",
" # get the data frame\n",
" df = arcpy.mapping.ListDataFrames(mxd,\"*\")[0]\n",
"\n",
" # create a new layer\n",
" arcpy.AddMessage('Insert IPPBuffer')\n",
" insertLayer = arcpy.mapping.Layer(IPPBuffer)\n",
" #Reference layer\n",
" arcpy.AddMessage('Grab reference layer')\n",
" refLayer = arcpy.mapping.ListLayers(mxd, \"Hasty_Points\", df)[0]\n",
" # add the layer to the map at the bottom of the TOC in data frame 0\n",
"\n",
" arcpy.mapping.InsertLayer(df, refLayer, insertLayer,\"BEFORE\")\n",
" arcpy.AddMessage(\"8 Segments_Group\\StatisticalArea\")\n",
" tryLayer = \"8 Segments_Group\\StatisticalArea\"\n",
" try:\n",
" # Set layer that output symbology will be based on\n",
" symbologyLayer = \"C:\\MapSAR_Ex\\Tools\\Layers Files - Local\\Layer Groups\\StatisticalArea.lyr\"\n",
"\n",
" # Apply the symbology from the symbology layer to the input layer\n",
" arcpy.ApplySymbologyFromLayer_management (tryLayer, symbologyLayer)\n",
" except:\n",
" pass\n",
"\n",
"if k == 0:\n",
" arcpy.AddMessage(\"There was no \" + IPP + \" defined\")\n",
"\n",
"\n",
"\n"
] | [
0,
0,
0,
0.012345679012345678,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0.011904761904761904,
0.017543859649122806,
0.009708737864077669,
0,
0.03225806451612903,
0,
0,
0,
0.02564102564102564,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.125,
0,
0,
0,
0,
0.009615384615384616,
0.008,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0.06666666666666667,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0.05714285714285714,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.06521739130434782,
0,
0.06521739130434782,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0,
0.07894736842105263,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0,
0,
0,
0.06382978723404255,
0,
0.06382978723404255,
0,
0.07142857142857142,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.07142857142857142,
0,
0.06976744186046512,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.06976744186046512,
0,
0.07142857142857142,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.07142857142857142,
0,
0.0851063829787234,
0,
0.07142857142857142,
0,
0,
0.07894736842105263,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.06976744186046512,
0,
0.06976744186046512,
0,
0.06976744186046512,
0,
0.0851063829787234,
0,
0.06976744186046512,
0,
0,
0,
0.07142857142857142,
0,
0.07142857142857142,
0,
0,
0,
0.06976744186046512,
0,
0.07142857142857142,
0,
0.0851063829787234,
0,
0.07142857142857142,
0,
0,
0.07692307692307693,
0,
0,
0,
0.06976744186046512,
0,
0.07142857142857142,
0,
0.0851063829787234,
0,
0.07142857142857142,
0,
0,
0,
0.07142857142857142,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.06976744186046512,
0,
0.06976744186046512,
0,
0.08695652173913043,
0,
0.07142857142857142,
0,
0,
0,
0.06976744186046512,
0,
0.06976744186046512,
0,
0,
0.07894736842105263,
0,
0,
0.07894736842105263,
0,
0,
0.07894736842105263,
0,
0,
0,
0.06976744186046512,
0,
0.06976744186046512,
0,
0,
0.07894736842105263,
0,
0,
0,
0.06976744186046512,
0,
0.08333333333333333,
0,
0.06976744186046512,
0,
0,
0.07894736842105263,
0,
0,
0.07894736842105263,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0.25,
0,
0,
0,
0.1,
0.125,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0.025,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0.02,
0,
0.02,
0,
0,
0,
0,
0,
0.125,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0.015151515151515152,
0.017543859649122806,
0.02,
0,
0,
0.06,
0,
0,
0.013157894736842105,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
1
] | 422 | 0.01937 | false |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import sys
from requests import RequestException
_LOGGER = logging.getLogger(__name__)
def raise_with_traceback(exception, message="", *args, **kwargs):
"""Raise exception with a specified traceback.
:param Exception exception: Error type to be raised.
:param str message: Message to include with error, empty by default.
:param args: Any additional args to be included with exception.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
exc_msg = "{}, {}: {}".format(message, exc_type.__name__, exc_value)
error = exception(exc_msg, *args, **kwargs)
try:
raise error.with_traceback(exc_traceback)
except AttributeError:
error.__traceback__ = exc_traceback
raise error
class ClientException(Exception):
"""Base exception for all Client Runtime exceptions."""
def __init__(self, message, inner_exception=None, *args, **kwargs):
self.inner_exception = inner_exception
_LOGGER.debug(message)
super(ClientException, self).__init__(message, *args, **kwargs)
class SerializationError(ClientException):
"""Error raised during request serialization."""
pass
class DeserializationError(ClientException):
"""Error raised during response deserialization."""
pass
class TokenExpiredError(ClientException):
"""OAuth token expired, request failed."""
pass
class ValidationError(ClientException):
"""Request parameter validation failed."""
messages = {
"min_length": "must have length greater than {!r}.",
"max_length": "must have length less than {!r}.",
"minimum": "must be greater than {!r}.",
"maximum": "must be less than {!r}.",
"minimum_ex": "must be equal to or greater than {!r}.",
"maximum_ex": "must be equal to or less than {!r}.",
"min_items": "must contain at least {!r} items.",
"max_items": "must contain at most {!r} items.",
"pattern": "must conform to the following pattern: {!r}.",
"unique": "must contain only unique items.",
"multiple": "must be a multiple of {!r}.",
"required": "can not be None."
}
def __init__(self, rule, target, value, *args, **kwargs):
self.rule = rule
self.target = target
message = "Parameter {!r} ".format(target)
reason = self.messages.get(
rule, "failed to meet validation requirement.")
message += reason.format(value)
super(ValidationError, self).__init__(message, *args, **kwargs)
class ClientRequestError(ClientException):
"""Client request failed."""
pass
class AuthenticationError(ClientException):
"""Client request failed to authentication."""
pass
class HttpOperationError(ClientException):
"""Client request failed due to server-specificed HTTP operation error.
Attempts to deserialize response into specific error object.
:param Deserializer deserialize: Deserializer with data on custom
error objects.
:param requests.Response response: Server response
:param str resp_type: Objects type to deserialize response.
:param args: Additional args to pass to exception object.
"""
def __str__(self):
return str(self.message)
def __init__(self, deserialize, response,
resp_type=None, *args, **kwargs):
self.error = None
self.message = None
self.response = response
try:
if resp_type:
self.error = deserialize(resp_type, response)
if self.error is None:
self.error = deserialize.dependencies[resp_type]()
self.message = self.error.message
except (DeserializationError, AttributeError, KeyError):
pass
if not self.error or not self.message:
try:
response.raise_for_status()
except RequestException as err:
if not self.error:
self.error = err
if not self.message:
msg = "Operation returned an invalid status code {!r}"
self.message = msg.format(response.reason)
else:
if not self.error:
self.error = response
if not self.message:
self.message = "Unknown error"
super(HttpOperationError, self).__init__(
self.message, self.error, *args, **kwargs)
| [
"# --------------------------------------------------------------------------\n",
"#\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
"#\n",
"# The MIT License (MIT)\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"\"Software\"\"), to\n",
"# deal in the Software without restriction, including without limitation the\n",
"# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n",
"# sell copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in\n",
"# all copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
"# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n",
"# IN THE SOFTWARE.\n",
"#\n",
"# --------------------------------------------------------------------------\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"from requests import RequestException\n",
"\n",
"\n",
"_LOGGER = logging.getLogger(__name__)\n",
"\n",
"\n",
"def raise_with_traceback(exception, message=\"\", *args, **kwargs):\n",
" \"\"\"Raise exception with a specified traceback.\n",
"\n",
" :param Exception exception: Error type to be raised.\n",
" :param str message: Message to include with error, empty by default.\n",
" :param args: Any additional args to be included with exception.\n",
" \"\"\"\n",
" exc_type, exc_value, exc_traceback = sys.exc_info()\n",
" exc_msg = \"{}, {}: {}\".format(message, exc_type.__name__, exc_value)\n",
" error = exception(exc_msg, *args, **kwargs)\n",
" try:\n",
" raise error.with_traceback(exc_traceback)\n",
" except AttributeError:\n",
" error.__traceback__ = exc_traceback\n",
" raise error\n",
"\n",
"\n",
"class ClientException(Exception):\n",
" \"\"\"Base exception for all Client Runtime exceptions.\"\"\"\n",
"\n",
" def __init__(self, message, inner_exception=None, *args, **kwargs):\n",
" self.inner_exception = inner_exception\n",
" _LOGGER.debug(message)\n",
" super(ClientException, self).__init__(message, *args, **kwargs)\n",
"\n",
"\n",
"class SerializationError(ClientException):\n",
" \"\"\"Error raised during request serialization.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class DeserializationError(ClientException):\n",
" \"\"\"Error raised during response deserialization.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class TokenExpiredError(ClientException):\n",
" \"\"\"OAuth token expired, request failed.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class ValidationError(ClientException):\n",
" \"\"\"Request parameter validation failed.\"\"\"\n",
"\n",
" messages = {\n",
" \"min_length\": \"must have length greater than {!r}.\",\n",
" \"max_length\": \"must have length less than {!r}.\",\n",
" \"minimum\": \"must be greater than {!r}.\",\n",
" \"maximum\": \"must be less than {!r}.\",\n",
" \"minimum_ex\": \"must be equal to or greater than {!r}.\",\n",
" \"maximum_ex\": \"must be equal to or less than {!r}.\",\n",
" \"min_items\": \"must contain at least {!r} items.\",\n",
" \"max_items\": \"must contain at most {!r} items.\",\n",
" \"pattern\": \"must conform to the following pattern: {!r}.\",\n",
" \"unique\": \"must contain only unique items.\",\n",
" \"multiple\": \"must be a multiple of {!r}.\",\n",
" \"required\": \"can not be None.\"\n",
" }\n",
"\n",
" def __init__(self, rule, target, value, *args, **kwargs):\n",
" self.rule = rule\n",
" self.target = target\n",
" message = \"Parameter {!r} \".format(target)\n",
" reason = self.messages.get(\n",
" rule, \"failed to meet validation requirement.\")\n",
" message += reason.format(value)\n",
" super(ValidationError, self).__init__(message, *args, **kwargs)\n",
"\n",
"\n",
"class ClientRequestError(ClientException):\n",
" \"\"\"Client request failed.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class AuthenticationError(ClientException):\n",
" \"\"\"Client request failed to authentication.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class HttpOperationError(ClientException):\n",
" \"\"\"Client request failed due to server-specificed HTTP operation error.\n",
" Attempts to deserialize response into specific error object.\n",
"\n",
" :param Deserializer deserialize: Deserializer with data on custom\n",
" error objects.\n",
" :param requests.Response response: Server response\n",
" :param str resp_type: Objects type to deserialize response.\n",
" :param args: Additional args to pass to exception object.\n",
" \"\"\"\n",
"\n",
" def __str__(self):\n",
" return str(self.message)\n",
"\n",
" def __init__(self, deserialize, response,\n",
" resp_type=None, *args, **kwargs):\n",
" self.error = None\n",
" self.message = None\n",
" self.response = response\n",
" try:\n",
" if resp_type:\n",
" self.error = deserialize(resp_type, response)\n",
" if self.error is None:\n",
" self.error = deserialize.dependencies[resp_type]()\n",
" self.message = self.error.message\n",
" except (DeserializationError, AttributeError, KeyError):\n",
" pass\n",
"\n",
" if not self.error or not self.message:\n",
" try:\n",
" response.raise_for_status()\n",
" except RequestException as err:\n",
" if not self.error:\n",
" self.error = err\n",
"\n",
" if not self.message:\n",
" msg = \"Operation returned an invalid status code {!r}\"\n",
" self.message = msg.format(response.reason)\n",
" else:\n",
" if not self.error:\n",
" self.error = response\n",
"\n",
" if not self.message:\n",
" self.message = \"Unknown error\"\n",
"\n",
" super(HttpOperationError, self).__init__(\n",
" self.message, self.error, *args, **kwargs)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 161 | 0 | false |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for launching run functions in computing clusters.
During the submit process, this file is copied to the appropriate run dir.
When the job is launched in the cluster, this module is the first thing that
is run inside the docker container.
"""
import os
import pickle
import sys
# PYTHONPATH should have been set so that the run_dir/src is in it
import dnnlib
def main():
if not len(sys.argv) >= 4:
raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
run_dir = str(sys.argv[1])
task_name = str(sys.argv[2])
host_name = str(sys.argv[3])
submit_config_path = os.path.join(run_dir, "submit_config.pkl")
# SubmitConfig should have been pickled to the run dir
if not os.path.exists(submit_config_path):
raise RuntimeError("SubmitConfig pickle file does not exist!")
submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
submit_config.task_name = task_name
submit_config.host_name = host_name
dnnlib.submission.submit.run_wrapper(submit_config)
if __name__ == "__main__":
main()
| [
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n",
"#\n",
"# This work is licensed under the Creative Commons Attribution-NonCommercial\n",
"# 4.0 International License. To view a copy of this license, visit\n",
"# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n",
"# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n",
"\n",
"\"\"\"Helper for launching run functions in computing clusters.\n",
"\n",
"During the submit process, this file is copied to the appropriate run dir.\n",
"When the job is launched in the cluster, this module is the first thing that\n",
"is run inside the docker container.\n",
"\"\"\"\n",
"\n",
"import os\n",
"import pickle\n",
"import sys\n",
"\n",
"# PYTHONPATH should have been set so that the run_dir/src is in it\n",
"import dnnlib\n",
"\n",
"def main():\n",
" if not len(sys.argv) >= 4:\n",
" raise RuntimeError(\"This script needs three arguments: run_dir, task_name and host_name!\")\n",
"\n",
" run_dir = str(sys.argv[1])\n",
" task_name = str(sys.argv[2])\n",
" host_name = str(sys.argv[3])\n",
"\n",
" submit_config_path = os.path.join(run_dir, \"submit_config.pkl\")\n",
"\n",
" # SubmitConfig should have been pickled to the run dir\n",
" if not os.path.exists(submit_config_path):\n",
" raise RuntimeError(\"SubmitConfig pickle file does not exist!\")\n",
"\n",
" submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, \"rb\"))\n",
" dnnlib.submission.submit.set_user_name_override(submit_config.user_name)\n",
"\n",
" submit_config.task_name = task_name\n",
" submit_config.host_name = host_name\n",
"\n",
" dnnlib.submission.submit.run_wrapper(submit_config)\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 45 | 0.003161 | false |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import datetime
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import xlsxwriter
import conopy.util as util
class LinksMenu(QMenu):
sections = None
win = None
view = None
def __init__(self, win, parent=None):
super().__init__(parent)
self.win = win
if not win:
#print("No focused window")
return
self.view = util.focusItemView(self.win)
if not self.view:
#print("No focused item view")
return
index = self.view.currentIndex()
if not index.isValid():
return
self.row = index.row()
model = self.view.model()
#self.headers = [ str(model.headerData(col, Qt.Horizontal)).upper() for col in range(model.columnCount()) ]
self.headers = []
for col in range(model.columnCount()):
d = model.headerData(col, Qt.Horizontal, Qt.EditRole)
if d is None:
d = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
self.headers.append(str(d).upper())
self.roles = win.fieldRoles if 'fieldRoles' in dir(win) else {} # { role: fieldName }
self.roles = { str(r).upper():str(self.roles[r]).upper() for r in self.roles }
#print('headers',self.headers)
#print('roles',self.roles)
iniFile = util.nearFile('.','data/links.ini')
ini = QSettings(iniFile, QSettings.IniFormat)
ini.setIniCodec("utf-8")
ini.beginGroup('Links')
self.sections = ini.value('Sections')
ini.endGroup()
if self.sections is None:
return
if type(self.sections) != type([]):
self.sections = [self.sections]
#print(self.sections)
rhset = set(self.headers).union(set(self.roles))
for s in self.sections:
ini.beginGroup(s)
t = ini.value('Title')
if not t:
t = s
params = ini.value("Params")
if params is None:
params = []
if type(params) != type([]):
params = [params]
exeIni = ini.value("Ini")
ini.endGroup()
upar = [ p.upper() for p in params]
#print('sect',s,'params',upar)
if not set(upar).issubset(rhset):
#print('not added')
continue
a = self.addAction(t)
a.params = params
a.exeIni = util.nearFile(iniFile,exeIni)
a.iniFile = iniFile
a.section = s
a.win = win
#print('added')
self.triggered.connect(self.exeAction)
def isValid(self):
return self.win and self.view and self.sections
def exeAction(self, a):
model = self.view.model()
#print(2, a.params, a.exeIni)
values = {}
for p in a.params:
par = str(p).upper()
if not par in self.headers:
if par in self.roles:
par = self.roles[par]
try:
col = self.headers.index(par)
values[p] = model.index(self.row, col).data(Qt.DisplayRole)
except:
#print(str(sys.exc_info()[1]))
#print(a.params)
return
#print(3, values)
w = util.mainWindow.runIni(a.exeIni)
w.clearParamValues()
for v in values:
w.setParamValue(v, values[v])
def showMenu(win):
menu = LinksMenu(win)
if menu.isValid():
menu.exec(QCursor.pos())
| [
"#!/usr/bin/python3\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"import sys\n",
"import datetime\n",
"from PyQt5.QtCore import *\n",
"from PyQt5.QtWidgets import *\n",
"from PyQt5.QtGui import *\n",
"import xlsxwriter\n",
"import conopy.util as util\n",
"\n",
"class LinksMenu(QMenu):\n",
" sections = None\n",
" win = None\n",
" view = None\n",
" def __init__(self, win, parent=None):\n",
" super().__init__(parent)\n",
" self.win = win\n",
" if not win:\n",
" #print(\"No focused window\")\n",
" return\n",
" self.view = util.focusItemView(self.win)\n",
" if not self.view:\n",
" #print(\"No focused item view\")\n",
" return\n",
" index = self.view.currentIndex()\n",
" if not index.isValid():\n",
" return\n",
" self.row = index.row()\n",
" model = self.view.model()\n",
" #self.headers = [ str(model.headerData(col, Qt.Horizontal)).upper() for col in range(model.columnCount()) ]\n",
" self.headers = []\n",
" for col in range(model.columnCount()):\n",
" d = model.headerData(col, Qt.Horizontal, Qt.EditRole)\n",
" if d is None:\n",
" d = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)\n",
" self.headers.append(str(d).upper())\n",
" self.roles = win.fieldRoles if 'fieldRoles' in dir(win) else {} # { role: fieldName }\n",
" self.roles = { str(r).upper():str(self.roles[r]).upper() for r in self.roles }\n",
" #print('headers',self.headers)\n",
" #print('roles',self.roles)\n",
" iniFile = util.nearFile('.','data/links.ini')\n",
" ini = QSettings(iniFile, QSettings.IniFormat)\n",
" ini.setIniCodec(\"utf-8\")\n",
" ini.beginGroup('Links')\n",
" self.sections = ini.value('Sections')\n",
" ini.endGroup()\n",
" if self.sections is None:\n",
" return\n",
" if type(self.sections) != type([]):\n",
" self.sections = [self.sections]\n",
" #print(self.sections)\n",
" rhset = set(self.headers).union(set(self.roles))\n",
" for s in self.sections:\n",
" ini.beginGroup(s)\n",
" t = ini.value('Title')\n",
" if not t:\n",
" t = s\n",
" params = ini.value(\"Params\")\n",
" if params is None:\n",
" params = []\n",
" if type(params) != type([]):\n",
" params = [params]\n",
" exeIni = ini.value(\"Ini\")\n",
" ini.endGroup()\n",
" upar = [ p.upper() for p in params]\n",
" #print('sect',s,'params',upar)\n",
" if not set(upar).issubset(rhset):\n",
" #print('not added')\n",
" continue\n",
" a = self.addAction(t)\n",
" a.params = params\n",
" a.exeIni = util.nearFile(iniFile,exeIni)\n",
" a.iniFile = iniFile\n",
" a.section = s\n",
" a.win = win\n",
" #print('added')\n",
" self.triggered.connect(self.exeAction)\n",
"\n",
" def isValid(self):\n",
" return self.win and self.view and self.sections\n",
"\n",
" def exeAction(self, a):\n",
" model = self.view.model()\n",
" #print(2, a.params, a.exeIni)\n",
" values = {}\n",
" for p in a.params:\n",
" par = str(p).upper()\n",
" if not par in self.headers:\n",
" if par in self.roles:\n",
" par = self.roles[par]\n",
" try:\n",
" col = self.headers.index(par)\n",
" values[p] = model.index(self.row, col).data(Qt.DisplayRole)\n",
" except:\n",
" #print(str(sys.exc_info()[1]))\n",
" #print(a.params)\n",
" return\n",
"\n",
" #print(3, values)\n",
" w = util.mainWindow.runIni(a.exeIni)\n",
" w.clearParamValues()\n",
" for v in values:\n",
" w.setParamValue(v, values[v]) \n",
" \n",
"\n",
"def showMenu(win):\n",
" menu = LinksMenu(win)\n",
" if menu.isValid():\n",
" menu.exec(QCursor.pos())\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0.05263157894736842,
0.07142857142857142,
0.06666666666666667,
0.04878048780487805,
0.03225806451612903,
0.047619047619047616,
0.05555555555555555,
0.05405405405405406,
0.0625,
0.02127659574468085,
0.041666666666666664,
0.05,
0.0625,
0.02564102564102564,
0.03333333333333333,
0.0625,
0.034482758620689655,
0.03125,
0.02631578947368421,
0.041666666666666664,
0.022222222222222223,
0.015873015873015872,
0.043478260869565216,
0,
0.022222222222222223,
0.021505376344086023,
0.058823529411764705,
0.05405405405405406,
0.06060606060606061,
0.038461538461538464,
0.019230769230769232,
0.03225806451612903,
0.03333333333333333,
0.022727272727272728,
0.047619047619047616,
0.03125,
0.0625,
0.047619047619047616,
0.024390243902439025,
0.07142857142857142,
0.01818181818181818,
0.03333333333333333,
0.037037037037037035,
0.03125,
0.05263157894736842,
0,
0.02631578947368421,
0.03571428571428571,
0,
0.05263157894736842,
0,
0.02857142857142857,
0.041666666666666664,
0.06521739130434782,
0.05,
0.023255813953488372,
0.03125,
0,
0.03225806451612903,
0.037037037037037035,
0.04,
0.034482758620689655,
0.043478260869565216,
0.047619047619047616,
0.08,
0.022222222222222223,
0,
0.045454545454545456,
0.018518518518518517,
0,
0.037037037037037035,
0.03125,
0.05555555555555555,
0.05555555555555555,
0.04,
0.03333333333333333,
0.05405405405405406,
0,
0.02702702702702703,
0.07142857142857142,
0,
0,
0.11764705882352941,
0.023255813953488372,
0.034482758620689655,
0,
0,
0.08333333333333333,
0.023255813953488372,
0.037037037037037035,
0.043478260869565216,
0.047619047619047616,
0.25,
0,
0,
0.04,
0.045454545454545456,
0.03225806451612903
] | 110 | 0.034587 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from Alphas.ConstantAlphaModel import ConstantAlphaModel
from Selection.EmaCrossUniverseSelectionModel import EmaCrossUniverseSelectionModel
from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel
from datetime import timedelta
### <summary>
### Framework algorithm that uses the EmaCrossUniverseSelectionModel to
### select the universe based on a moving average cross.
### </summary>
class EmaCrossUniverseSelectionFrameworkAlgorithm(QCAlgorithmFramework):
'''Framework algorithm that uses the EmaCrossUniverseSelectionModel to select the universe based on a moving average cross.'''
def Initialize(self):
self.SetStartDate(2013,1,1)
self.SetEndDate(2015,1,1)
self.SetCash(100000)
fastPeriod = 100
slowPeriod = 300
count = 10
self.UniverseSettings.Leverage = 2.0
self.UniverseSettings.Resolution = Resolution.Daily
self.SetUniverseSelection(EmaCrossUniverseSelectionModel(fastPeriod, slowPeriod, count))
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(1), None, None))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel()) | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Algorithm.Framework\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Orders import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Algorithm.Framework import *\n",
"from QuantConnect.Algorithm.Framework.Alphas import *\n",
"from Alphas.ConstantAlphaModel import ConstantAlphaModel\n",
"from Selection.EmaCrossUniverseSelectionModel import EmaCrossUniverseSelectionModel\n",
"from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel\n",
"from datetime import timedelta\n",
"\n",
"### <summary>\n",
"### Framework algorithm that uses the EmaCrossUniverseSelectionModel to\n",
"### select the universe based on a moving average cross.\n",
"### </summary>\n",
"class EmaCrossUniverseSelectionFrameworkAlgorithm(QCAlgorithmFramework):\n",
" '''Framework algorithm that uses the EmaCrossUniverseSelectionModel to select the universe based on a moving average cross.'''\n",
"\n",
" def Initialize(self):\n",
" \n",
" self.SetStartDate(2013,1,1)\n",
" self.SetEndDate(2015,1,1)\n",
" self.SetCash(100000)\n",
"\n",
" fastPeriod = 100\n",
" slowPeriod = 300\n",
" count = 10\n",
"\n",
" self.UniverseSettings.Leverage = 2.0\n",
" self.UniverseSettings.Resolution = Resolution.Daily\n",
"\n",
" self.SetUniverseSelection(EmaCrossUniverseSelectionModel(fastPeriod, slowPeriod, count))\n",
" self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(1), None, None))\n",
" self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.029411764705882353,
0.02702702702702703,
0.02127659574468085,
0.018518518518518517,
0.017543859649122806,
0.023809523809523808,
0.019230769230769232,
0.03225806451612903,
0,
0.07142857142857142,
0.013888888888888888,
0.017543859649122806,
0.06666666666666667,
0.0136986301369863,
0.007633587786259542,
0,
0,
0.5,
0.05555555555555555,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.009259259259259259,
0.024691358024691357
] | 53 | 0.021648 | false |
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
"""
read audio stream from audio file
"""
import os
import struct
import xbmcvfs
class UnknownFormat(Exception):pass
class FormatError(Exception):pass
class AudioFile(object):
f = None
audioStart = 0
def AudioFile(self):
self.f = None
self.audioStart = 0
def Open(self,file):
self.audioStart = 0
self.f = xbmcvfs.File(file)
ext = os.path.splitext(file)[1].lower()
if ext == '.mp3': self.AnalyzeMp3()
elif ext == '.ogg': self.AnalyzeOgg()
elif ext == '.wma': self.AnalyzeWma()
#elif ext == '.flac': self.AnalyzeFlac()
elif ext == '.flac': pass
elif ext == '.ape': pass
elif ext == '.wav': pass
else: # not supported format
self.f.close()
self.f = None
raise UnknownFormat
def Close(self):
self.f.close()
self.f = None
def ReadAudioStream(self, len, offset=0):
self.f.seek(self.audioStart+offset, 0)
return self.f.read(len)
def AnalyzeMp3(self):
# Searching ID3v2 tag
while True:
buf = self.f.read(3)
# no tell() in xbmcvfs yet, but seek() can return position
if len(buf) < 3 or self.f.seek(0,1) > 50000:
# ID tag is not found
self.f.seek(0,0)
self.audioStart = 0
return
if buf == 'ID3':
self.f.seek(3,1) # skip version/flag
# ID length (synchsafe integer)
tl = struct.unpack('4b', self.f.read(4))
taglen = (tl[0]<<21)|(tl[1]<<14)|(tl[2]<<7)|tl[3]
self.f.seek(taglen,1)
break
self.f.seek(-2,1)
# Searching MPEG SOF
while True:
buf = self.f.read(1)
if len(buf) < 1 or self.f.seek(0,1) > 1000000:
raise FormatError
if buf == '\xff':
rbit = struct.unpack('B',self.f.read(1))[0] >> 5
if rbit == 7: # 11 1's in total
self.f.seek(-2,1)
self.audioStart = self.f.seek(0,1)
return
def AnalyzeOgg(self):
# Parse page (OggS)
while True:
buf = self.f.read(27) # header
if len(buf) < 27 or self.f.seek(0,1) > 50000:
# parse error
raise FormatError
if buf[0:4] != 'OggS':
# not supported page format
raise UnknownFormat
numseg = struct.unpack('B', buf[26])[0]
#print "#seg: %d" % numseg
segtbl = struct.unpack('%dB'%numseg, self.f.read(numseg)) # segment table
for seglen in segtbl:
buf = self.f.read(7) # segment header
#print "segLen(%s): %d" % (buf[1:7],seglen)
if buf == "\x05vorbis":
self.f.seek(-7,1) # rollback
self.audioStart = self.f.seek(0,1)
return
self.f.seek(seglen-7,1) # skip to next segment
def AnalyzeWma(self):
# Searching GUID
while True:
buf = self.f.read(16)
if len(buf) < 16 or self.f.seek(0,1) > 50000:
raise FormatError
guid = buf.encode("hex");
if guid == "3626b2758e66cf11a6d900aa0062ce6c":
# ASF_Data_Object
self.f.seek(-16,1) # rollback
self.audioStart = self.f.seek(0,1)
return
else:
objlen = struct.unpack('<Q', self.f.read(8))[0]
self.f.seek(objlen-24,1) # jump to next object
def AnalyzeFlac(self):
if self.f.read(4) != 'fLaC':
raise UnknownFormat
# Searching GUID
while True:
buf = self.f.read(4)
if len(buf) < 16 or self.f.seek(0,1) > 50000:
# not found
raise FormatError
metalen = buf[1] | (buf[2]<<8) | (buf[3]<<16);
self.f.seek(metalen,1) # skip this metadata block
if buf[0] & 0x80:
# it was the last metadata block
self.audioStart = self.f.seek(0,1)
return
| [
"# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-\n",
"\"\"\"\n",
"read audio stream from audio file\n",
"\"\"\"\n",
"\n",
"import os\n",
"import struct\n",
"import xbmcvfs\n",
"\n",
"class UnknownFormat(Exception):pass\n",
"class FormatError(Exception):pass\n",
"\n",
"class AudioFile(object):\n",
" f = None\n",
" audioStart = 0\n",
"\n",
" def AudioFile(self):\n",
" self.f = None\n",
" self.audioStart = 0\n",
"\n",
" def Open(self,file):\n",
" self.audioStart = 0\n",
" self.f = xbmcvfs.File(file)\n",
" ext = os.path.splitext(file)[1].lower()\n",
" if ext == '.mp3': self.AnalyzeMp3()\n",
" elif ext == '.ogg': self.AnalyzeOgg()\n",
" elif ext == '.wma': self.AnalyzeWma()\n",
" #elif ext == '.flac': self.AnalyzeFlac()\n",
" elif ext == '.flac': pass\n",
" elif ext == '.ape': pass\n",
" elif ext == '.wav': pass\n",
" else:\t# not supported format\n",
" self.f.close()\n",
" self.f = None\n",
" raise UnknownFormat\n",
"\n",
" def Close(self):\n",
" self.f.close()\n",
" self.f = None\n",
"\n",
" def ReadAudioStream(self, len, offset=0):\n",
" self.f.seek(self.audioStart+offset, 0)\n",
" return self.f.read(len)\n",
"\n",
" def AnalyzeMp3(self):\n",
" # Searching ID3v2 tag\n",
" while True:\n",
" buf = self.f.read(3)\n",
" # no tell() in xbmcvfs yet, but seek() can return position\n",
" if len(buf) < 3 or self.f.seek(0,1) > 50000:\n",
" # ID tag is not found\n",
" self.f.seek(0,0)\n",
" self.audioStart = 0\n",
" return\n",
" if buf == 'ID3':\n",
" self.f.seek(3,1) # skip version/flag\n",
" # ID length (synchsafe integer)\n",
" tl = struct.unpack('4b', self.f.read(4))\n",
" taglen = (tl[0]<<21)|(tl[1]<<14)|(tl[2]<<7)|tl[3]\n",
" self.f.seek(taglen,1)\n",
" break\n",
" self.f.seek(-2,1)\n",
" # Searching MPEG SOF\n",
" while True:\n",
" buf = self.f.read(1)\n",
" if len(buf) < 1 or self.f.seek(0,1) > 1000000:\n",
" raise FormatError\n",
" if buf == '\\xff':\n",
" rbit = struct.unpack('B',self.f.read(1))[0] >> 5\n",
" if rbit == 7: # 11 1's in total\n",
" self.f.seek(-2,1)\n",
" self.audioStart = self.f.seek(0,1)\n",
" return\n",
"\n",
" def AnalyzeOgg(self):\n",
" # Parse page (OggS)\n",
" while True:\n",
" buf = self.f.read(27) # header \n",
" if len(buf) < 27 or self.f.seek(0,1) > 50000:\n",
" # parse error\n",
" raise FormatError\n",
" if buf[0:4] != 'OggS':\n",
" # not supported page format\n",
" raise UnknownFormat\n",
" numseg = struct.unpack('B', buf[26])[0]\n",
" #print \"#seg: %d\" % numseg\n",
"\n",
" segtbl = struct.unpack('%dB'%numseg, self.f.read(numseg)) # segment table\n",
" for seglen in segtbl:\n",
" buf = self.f.read(7) # segment header \n",
" #print \"segLen(%s): %d\" % (buf[1:7],seglen)\n",
" if buf == \"\\x05vorbis\":\n",
" self.f.seek(-7,1) # rollback\n",
" self.audioStart = self.f.seek(0,1)\n",
" return\n",
" self.f.seek(seglen-7,1)\t# skip to next segment\n",
"\n",
" def AnalyzeWma(self):\n",
" # Searching GUID\n",
" while True:\n",
" buf = self.f.read(16)\n",
" if len(buf) < 16 or self.f.seek(0,1) > 50000:\n",
" raise FormatError\n",
" guid = buf.encode(\"hex\");\n",
" if guid == \"3626b2758e66cf11a6d900aa0062ce6c\":\n",
" # ASF_Data_Object\n",
" self.f.seek(-16,1) # rollback\n",
" self.audioStart = self.f.seek(0,1)\n",
" return\n",
" else:\n",
" objlen = struct.unpack('<Q', self.f.read(8))[0]\n",
" self.f.seek(objlen-24,1) # jump to next object\n",
"\n",
" def AnalyzeFlac(self):\n",
" if self.f.read(4) != 'fLaC':\n",
" raise UnknownFormat\n",
" # Searching GUID\n",
" while True:\n",
" buf = self.f.read(4)\n",
" if len(buf) < 16 or self.f.seek(0,1) > 50000:\n",
" # not found\n",
" raise FormatError\n",
" metalen = buf[1] | (buf[2]<<8) | (buf[3]<<16);\n",
" self.f.seek(metalen,1) # skip this metadata block\n",
" if buf[0] & 0x80:\n",
" # it was the last metadata block\n",
" self.audioStart = self.f.seek(0,1)\n",
" return\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0.058823529411764705,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0.0425531914893617,
0.02127659574468085,
0.02127659574468085,
0.02,
0.029411764705882353,
0.030303030303030304,
0.030303030303030304,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0.030303030303030304,
0,
0,
0,
0.017543859649122806,
0,
0,
0.09090909090909091,
0.02631578947368421,
0,
0.03333333333333333,
0,
0,
0,
0.01694915254237288,
0,
0,
0.015384615384615385,
0,
0.02631578947368421,
0.01818181818181818,
0,
0,
0,
0,
0,
0.02127659574468085,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0.02247191011235955,
0,
0.017241379310344827,
0.016666666666666666,
0,
0.0196078431372549,
0.01818181818181818,
0,
0.031746031746031744,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0.02631578947368421,
0,
0,
0.02,
0.0196078431372549,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0.05084745762711865,
0.015625,
0,
0,
0.0196078431372549,
0
] | 128 | 0.008817 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import Universe
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from datetime import date, timedelta
### <summary>
### Regression algorithm used to test a fine and coarse selection methods returning Universe.Unchanged
### </summary>
class UniverseUnchangedRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Daily
self.SetStartDate(2014,3,25)
self.SetEndDate(2014,4,7)
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(days = 1), 0.025, None))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
self.AddUniverse(self.CoarseSelectionFunction, self.FineSelectionFunction)
self.numberOfSymbolsFine = 2
def CoarseSelectionFunction(self, coarse):
# the first and second selection
if self.Time.date() <= date(2014, 3, 26):
tickers = [ "AAPL", "AIG", "IBM" ]
return [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in tickers ]
# will skip fine selection
return Universe.Unchanged
def FineSelectionFunction(self, fine):
if self.Time.date() == date(2014, 3, 25):
sortedByPeRatio = sorted(fine, key=lambda x: x.ValuationRatios.PERatio, reverse=True)
return [ x.Symbol for x in sortedByPeRatio[:self.numberOfSymbolsFine] ]
# the second selection will return unchanged, in the following fine selection will be skipped
return Universe.Unchanged
# assert security changes, throw if called more than once
def OnSecuritiesChanged(self, changes):
addedSymbols = [ x.Symbol for x in changes.AddedSecurities ]
if (len(changes.AddedSecurities) != 2
or self.Time.date() != date(2014, 3, 25)
or Symbol.Create("AAPL", SecurityType.Equity, Market.USA) not in addedSymbols
or Symbol.Create("IBM", SecurityType.Equity, Market.USA) not in addedSymbols):
raise ValueError("Unexpected security changes")
self.Log(f"OnSecuritiesChanged({self.Time}):: {changes}")
| [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System.Core\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import QCAlgorithm\n",
"from QuantConnect.Data.UniverseSelection import Universe\n",
"from QuantConnect.Algorithm.Framework.Alphas import *\n",
"from QuantConnect.Algorithm.Framework.Portfolio import *\n",
"from datetime import date, timedelta\n",
"\n",
"### <summary>\n",
"### Regression algorithm used to test a fine and coarse selection methods returning Universe.Unchanged\n",
"### </summary>\n",
"class UniverseUnchangedRegressionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" self.UniverseSettings.Resolution = Resolution.Daily\n",
" self.SetStartDate(2014,3,25)\n",
" self.SetEndDate(2014,4,7)\n",
"\n",
" self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(days = 1), 0.025, None))\n",
" self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())\n",
"\n",
" self.AddUniverse(self.CoarseSelectionFunction, self.FineSelectionFunction)\n",
"\n",
" self.numberOfSymbolsFine = 2\n",
"\n",
" def CoarseSelectionFunction(self, coarse):\n",
" # the first and second selection\n",
" if self.Time.date() <= date(2014, 3, 26):\n",
" tickers = [ \"AAPL\", \"AIG\", \"IBM\" ]\n",
" return [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in tickers ]\n",
"\n",
" # will skip fine selection\n",
" return Universe.Unchanged\n",
"\n",
" def FineSelectionFunction(self, fine):\n",
" if self.Time.date() == date(2014, 3, 25):\n",
" sortedByPeRatio = sorted(fine, key=lambda x: x.ValuationRatios.PERatio, reverse=True)\n",
" return [ x.Symbol for x in sortedByPeRatio[:self.numberOfSymbolsFine] ]\n",
"\n",
" # the second selection will return unchanged, in the following fine selection will be skipped\n",
" return Universe.Unchanged\n",
"\n",
" # assert security changes, throw if called more than once\n",
" def OnSecuritiesChanged(self, changes):\n",
" addedSymbols = [ x.Symbol for x in changes.AddedSecurities ]\n",
" if (len(changes.AddedSecurities) != 2\n",
" or self.Time.date() != date(2014, 3, 25)\n",
" or Symbol.Create(\"AAPL\", SecurityType.Equity, Market.USA) not in addedSymbols\n",
" or Symbol.Create(\"IBM\", SecurityType.Equity, Market.USA) not in addedSymbols):\n",
" raise ValueError(\"Unexpected security changes\")\n",
" self.Log(f\"OnSecuritiesChanged({self.Time}):: {changes}\")\n"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02127659574468085,
0.017543859649122806,
0.018518518518518517,
0.017543859649122806,
0.02702702702702703,
0,
0.07142857142857142,
0.019417475728155338,
0.06666666666666667,
0.017543859649122806,
0,
0,
0,
0.05405405405405406,
0.058823529411764705,
0,
0.02586206896551724,
0.012195121951219513,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.0425531914893617,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0.03571428571428571,
0,
0.00980392156862745,
0,
0,
0,
0,
0.028985507246376812,
0,
0,
0.011111111111111112,
0.02197802197802198,
0,
0
] | 69 | 0.010759 | false |
import json, urllib
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout, authenticate, login as auth_login
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django_openid_auth.models import UserOpenID
from django_openid_auth.views import parse_openid_response
from itertools import chain
from elo.models import UserProfile
from ladder.models import Game, Match, Rank, Ladder, Challenge
def index(request):
game_list = Game.objects.all()
match_list = Match.objects.all().order_by('-date_complete')
rank_list = Rank.objects.all()
ladder_list = Ladder.objects.all()
try:
challenger_list = Challenge.objects.filter(match__challenger = request.user, accepted=False)
except Exception, e:
challenger_list = []
try:
challengee_list = Challenge.objects.filter(match__challengee = request.user, accepted=False)
except Exception, e:
challengee_list = []
your_challenges = list(chain(challenger_list, challengee_list))
return render_to_response('home.html', {'game_list':game_list, 'match_list':match_list, 'rank_list':rank_list, 'ladder_list':ladder_list, 'your_challenges':your_challenges}, context_instance=RequestContext(request))
def login(request):
data = {}
data['response'] = request.GET["openid.claimed_id"]
data['id'] = request.GET["openid.claimed_id"][36:]
data['url'] = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key="+settings.STEAM_API_KEY+"&steamids="+data['id']
claim = data['response']
# Get public info
data['info'] = json.loads(urllib.urlopen(data["url"]).read())["response"]["players"][0]
handle = data['info']['personaname']
steamid = data['info']['steamid']
url = data['info']['profileurl']
avatar = data['info']['avatar']
avatarM = data['info']['avatarmedium']
avatarL = data['info']['avatarfull']
try:
primarygroup = data['info']['primaryclanid']
except KeyError:
primarygroup = ""
try:
realname = data['info']['realname']
except KeyError:
realname = ""
# Find the user
try:
useroid = UserOpenID.objects.get(claimed_id=claim)
# Get them by steamid, then get the User instance associated with this
# profile.
userP = UserProfile.objects.get(steamid=steamid)
user = User.objects.get(id=userP.user_id)
# New user
except UserOpenID.DoesNotExist:
# Slugify their current display name, this will be used for internal control panels only.
slugName = "{0}-{1}".format(slugify(handle), steamid[len(steamid)-3:len(steamid)])
user = User.objects.create_user(username=slugName, email='', password='!')
user.save()
useroid = UserOpenID(user=user, claimed_id=claim, display_id=claim)
useroid.save()
try:
up = UserProfile.objects.get(user_id=user.id)
except UserProfile.DoesNotExist:
up = UserProfile(user_id=user.id)
up.save()
# User exists, fill out profile, which is auto-filled with blanks atm.
up.handle=handle
up.steamid=steamid
up.url=url
up.avatar=avatar
up.avatarM=avatarM
up.avatarL=avatarL
up.primarygroup=primarygroup
up.realname=realname
up.save()
# Stole these lines from inside the openid_auth files. idk why now
# PROB. IMPORTANT THO
openid_response = parse_openid_response(request)
user = authenticate(openid_response=openid_response)
auth_login(request, user)
return HttpResponseRedirect('/')
def logout_view(request):
logout(request)
messages.success(request, "You have been signed out!")
return HttpResponseRedirect(reverse('index')) | [
"import json, urllib\r\n",
"\r\n",
"from django.conf import settings\r\n",
"from django.contrib import messages\r\n",
"from django.contrib.auth import logout, authenticate, login as auth_login\r\n",
"from django.contrib.auth.models import User\r\n",
"from django.core.urlresolvers import reverse\r\n",
"from django.http import HttpResponse, HttpResponseRedirect\r\n",
"from django.shortcuts import render_to_response\r\n",
"from django.template import RequestContext\r\n",
"from django.template.defaultfilters import slugify\r\n",
"\r\n",
"from django_openid_auth.models import UserOpenID\r\n",
"from django_openid_auth.views import parse_openid_response\r\n",
"\r\n",
"from itertools import chain\r\n",
"\r\n",
"from elo.models import UserProfile\r\n",
"from ladder.models import Game, Match, Rank, Ladder, Challenge\r\n",
"\r\n",
"def index(request):\r\n",
"\r\n",
" game_list = Game.objects.all()\r\n",
"\r\n",
" match_list = Match.objects.all().order_by('-date_complete')\r\n",
"\r\n",
" rank_list = Rank.objects.all()\r\n",
"\r\n",
" ladder_list = Ladder.objects.all()\r\n",
"\r\n",
" try:\r\n",
" challenger_list = Challenge.objects.filter(match__challenger = request.user, accepted=False)\r\n",
" except Exception, e:\r\n",
" challenger_list = []\r\n",
" try:\r\n",
" challengee_list = Challenge.objects.filter(match__challengee = request.user, accepted=False)\r\n",
" except Exception, e:\r\n",
" challengee_list = []\r\n",
"\r\n",
" your_challenges = list(chain(challenger_list, challengee_list))\r\n",
"\r\n",
" return render_to_response('home.html', {'game_list':game_list, 'match_list':match_list, 'rank_list':rank_list, 'ladder_list':ladder_list, 'your_challenges':your_challenges}, context_instance=RequestContext(request))\r\n",
"\r\n",
"def login(request):\r\n",
" data = {}\r\n",
" data['response'] = request.GET[\"openid.claimed_id\"]\r\n",
" data['id'] = request.GET[\"openid.claimed_id\"][36:]\r\n",
" data['url'] = \"http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=\"+settings.STEAM_API_KEY+\"&steamids=\"+data['id']\r\n",
"\r\n",
" claim = data['response']\r\n",
"\r\n",
" # Get public info\r\n",
" data['info'] = json.loads(urllib.urlopen(data[\"url\"]).read())[\"response\"][\"players\"][0]\r\n",
"\r\n",
" handle = data['info']['personaname']\r\n",
" steamid = data['info']['steamid']\r\n",
" url = data['info']['profileurl']\r\n",
" avatar = data['info']['avatar']\r\n",
" avatarM = data['info']['avatarmedium']\r\n",
" avatarL = data['info']['avatarfull']\r\n",
" try:\r\n",
" primarygroup = data['info']['primaryclanid']\r\n",
" except KeyError:\r\n",
" primarygroup = \"\"\r\n",
" try:\r\n",
" realname = data['info']['realname']\r\n",
" except KeyError:\r\n",
" realname = \"\"\r\n",
"\r\n",
" # Find the user\r\n",
" try:\r\n",
" useroid = UserOpenID.objects.get(claimed_id=claim)\r\n",
"\r\n",
" # Get them by steamid, then get the User instance associated with this\r\n",
" # profile.\r\n",
" userP = UserProfile.objects.get(steamid=steamid)\r\n",
" user = User.objects.get(id=userP.user_id)\r\n",
"\r\n",
" # New user\r\n",
" except UserOpenID.DoesNotExist:\r\n",
" # Slugify their current display name, this will be used for internal control panels only.\r\n",
" slugName = \"{0}-{1}\".format(slugify(handle), steamid[len(steamid)-3:len(steamid)])\r\n",
" user = User.objects.create_user(username=slugName, email='', password='!')\r\n",
" user.save()\r\n",
" useroid = UserOpenID(user=user, claimed_id=claim, display_id=claim)\r\n",
" useroid.save()\r\n",
" try:\r\n",
" up = UserProfile.objects.get(user_id=user.id)\r\n",
" except UserProfile.DoesNotExist:\r\n",
" up = UserProfile(user_id=user.id)\r\n",
" up.save()\r\n",
"\r\n",
" # User exists, fill out profile, which is auto-filled with blanks atm.\r\n",
" up.handle=handle\r\n",
" up.steamid=steamid\r\n",
" up.url=url\r\n",
" up.avatar=avatar\r\n",
" up.avatarM=avatarM\r\n",
" up.avatarL=avatarL\r\n",
" up.primarygroup=primarygroup\r\n",
" up.realname=realname\r\n",
"\r\n",
" up.save()\r\n",
"\r\n",
" # Stole these lines from inside the openid_auth files. idk why now\r\n",
" # PROB. IMPORTANT THO\r\n",
" openid_response = parse_openid_response(request)\r\n",
" user = authenticate(openid_response=openid_response)\r\n",
"\r\n",
" auth_login(request, user)\r\n",
"\r\n",
" return HttpResponseRedirect('/')\r\n",
"\r\n",
"def logout_view(request):\r\n",
" logout(request)\r\n",
" messages.success(request, \"You have been signed out!\")\r\n",
" return HttpResponseRedirect(reverse('index'))"
] | [
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.027149321266968326,
0,
0.047619047619047616,
0,
0,
0,
0.007246376811594203,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0.010869565217391304,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0.041666666666666664,
0.0625,
0.045454545454545456,
0.041666666666666664,
0.041666666666666664,
0.029411764705882353,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0.02040816326530612
] | 117 | 0.005841 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem419.py
#
# Look and say sequence
# =====================
# Published on Saturday, 16th March 2013, 10:00 pm
#
# The look and say sequence goes 1, 11, 21, 1211, 111221, 312211, 13112221,
# 1113213211, ... The sequence starts with 1 and all other members are
# obtained by describing the previous member in terms of consecutive digits.
# It helps to do this out loud: 1 is 'one one' 11 11 is 'two ones' 21 21
# is 'one two and one one' 1211 1211 is 'one one, one two and two ones'
# 111221 111221 is 'three ones, two twos and one one' 312211 ... Define
# A(n), B(n) and C(n) as the number of ones, twos and threes in the n'th
# element of the sequence respectively. One can verify that A(40) = 31254,
# B(40) = 20259 and C(40) = 11625. Find A(n), B(n) and C(n) for n = 1012.
# Give your answer modulo 230 and separate your values for A, B and C by a
# comma. E.g. for n = 40 the answer would be 31254,20259,11625
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem419.py\n",
"#\n",
"# Look and say sequence\n",
"# =====================\n",
"# Published on Saturday, 16th March 2013, 10:00 pm\n",
"#\n",
"# The look and say sequence goes 1, 11, 21, 1211, 111221, 312211, 13112221,\n",
"# 1113213211, ... The sequence starts with 1 and all other members are\n",
"# obtained by describing the previous member in terms of consecutive digits.\n",
"# It helps to do this out loud: 1 is 'one one' 11 11 is 'two ones' 21 21\n",
"# is 'one two and one one' 1211 1211 is 'one one, one two and two ones'\n",
"# 111221 111221 is 'three ones, two twos and one one' 312211 ... Define\n",
"# A(n), B(n) and C(n) as the number of ones, twos and threes in the n'th\n",
"# element of the sequence respectively. One can verify that A(40) = 31254,\n",
"# B(40) = 20259 and C(40) = 11625. Find A(n), B(n) and C(n) for n = 1012.\n",
"# Give your answer modulo 230 and separate your values for A, B and C by a\n",
"# comma. E.g. for n = 40 the answer would be 31254,20259,11625\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 26 | 0.00463 | false |
#--------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
import json
import requests
import datetime
from enum import Enum
import unittest
try:
from unittest import mock
except ImportError:
import mock
from msrest.pipeline import (
ClientHTTPAdapter,
ClientPipelineHook,
ClientRequest,
ClientRawResponse)
from msrest import Configuration
class TestPipelineHooks(unittest.TestCase):
def event_hook(event):
def event_wrapper(func):
def execute_hook(self, *args, **kwargs):
return self.adp._client_hooks[event](func, self, *args, **kwargs)
return execute_hook
return event_wrapper
@event_hook('request')
def mock_send(*args, **kwargs):
return "Not a real response"
@event_hook('response')
def mock_response(*args, **kwargs):
resp = mock.MagicMock(result=200, headers={"a":1, "b":False})
return resp
def setUp(self):
self.cfg = mock.create_autospec(Configuration)
self.cfg.log_name = "test_log"
self.adp = ClientHTTPAdapter(self.cfg)
self.adp.send = self.mock_send
self.adp.build_response = self.mock_response
return super(TestPipelineHooks, self).setUp()
def test_adding_hook(self):
self.assertTrue('request' in self.adp._client_hooks)
self.assertTrue('response' in self.adp._client_hooks)
with self.assertRaises(TypeError):
self.adp.add_hook('request', None)
with self.assertRaises(TypeError):
self.adp.add_hook('response', 'NotCallable')
with self.assertRaises(KeyError):
self.adp.add_hook('Something', lambda a:True)
def hook(*args, **kwargs):
pass
self.adp.add_hook('request', hook)
self.assertTrue(hook in self.adp._client_hooks['request'].precalls)
self.assertFalse(hook in self.adp._client_hooks['request'].postcalls)
def hook2(*args, **kwargs):
pass
self.adp.add_hook('response', hook2, precall=False)
self.assertFalse(hook2 in self.adp._client_hooks['response'].precalls)
self.assertTrue(hook2 in self.adp._client_hooks['response'].postcalls)
def test_pre_event_callback(self):
class TestEvent(Exception):
pass
def hook(*args, **kwargs):
raise TestEvent("Entered hook function")
self.adp.add_hook('request', hook)
with self.assertRaises(TestEvent):
self.adp.send("request_obj")
def test_overwrite_event_hook(self):
resp = self.adp.send("request_obj")
self.assertEqual(resp, "Not a real response")
def hook(*args, **kwargs):
self.assertEqual(args[1], "request_obj")
return None
self.adp.add_hook('request', hook, precall=False, overwrite=True)
resp = self.adp.send("request_obj")
self.assertIsNone(resp)
def test_post_event_callback(self):
def hook(*args, **kwargs):
self.assertTrue('result' in kwargs)
self.assertEqual(kwargs['result'].result, 200)
return kwargs['result']
self.adp.add_hook('response', hook, precall=False)
resp = self.adp.build_response('request_obj')
self.assertEqual(resp.result, 200)
def test_alter_response_callback(self):
def hook(*args, **kwargs):
kwargs['result'].headers['a'] = "Changed!"
return kwargs['result']
self.adp.add_hook('response', hook, precall=False)
resp = self.adp.build_response('request_obj')
self.assertEqual(resp.headers['a'], "Changed!")
self.assertEqual(resp.headers['b'], False)
class TestClientRequest(unittest.TestCase):
def test_request_headers(self):
request = ClientRequest()
request.add_header("a", 1)
request.add_headers({'b':2, 'c':3})
self.assertEqual(request.headers, {'a':1, 'b':2, 'c':3})
def test_request_data(self):
request = ClientRequest()
data = "Lots of dataaaa"
request.add_content(data)
self.assertEqual(request.data, json.dumps(data))
self.assertEqual(request.headers.get('Content-Length'), 17)
class TestClientResponse(unittest.TestCase):
class Colors(Enum):
red = 'red'
blue = 'blue'
def test_raw_response(self):
response = mock.create_autospec(requests.Response)
response.headers = {}
response.headers["my-test"] = '1999-12-31T23:59:59-23:59'
response.headers["colour"] = "red"
raw = ClientRawResponse([], response)
raw.add_headers({'my-test': 'iso-8601',
'another_header': 'str',
'colour': TestClientResponse.Colors})
self.assertIsInstance(raw.headers['my-test'], datetime.datetime)
if __name__ == '__main__':
unittest.main()
| [
"#--------------------------------------------------------------------------\n",
"#\n",
"# Copyright (c) Microsoft Corporation. All rights reserved. \n",
"#\n",
"# The MIT License (MIT)\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"\"Software\"\"), to deal\n",
"# in the Software without restriction, including without limitation the rights\n",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"# copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in\n",
"# all copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n",
"# THE SOFTWARE.\n",
"#\n",
"#--------------------------------------------------------------------------\n",
"\n",
"import json\n",
"import requests\n",
"import datetime\n",
"from enum import Enum\n",
"import unittest\n",
"try:\n",
" from unittest import mock\n",
"except ImportError:\n",
" import mock\n",
"\n",
"from msrest.pipeline import (\n",
" ClientHTTPAdapter,\n",
" ClientPipelineHook,\n",
" ClientRequest,\n",
" ClientRawResponse)\n",
"\n",
"from msrest import Configuration\n",
"\n",
"\n",
"class TestPipelineHooks(unittest.TestCase):\n",
"\n",
" def event_hook(event):\n",
" def event_wrapper(func):\n",
" def execute_hook(self, *args, **kwargs):\n",
" return self.adp._client_hooks[event](func, self, *args, **kwargs)\n",
" return execute_hook\n",
" return event_wrapper\n",
"\n",
" @event_hook('request')\n",
" def mock_send(*args, **kwargs):\n",
" return \"Not a real response\"\n",
"\n",
" @event_hook('response')\n",
" def mock_response(*args, **kwargs):\n",
" resp = mock.MagicMock(result=200, headers={\"a\":1, \"b\":False})\n",
" return resp\n",
"\n",
" def setUp(self):\n",
"\n",
" self.cfg = mock.create_autospec(Configuration)\n",
" self.cfg.log_name = \"test_log\"\n",
" self.adp = ClientHTTPAdapter(self.cfg)\n",
" self.adp.send = self.mock_send\n",
" self.adp.build_response = self.mock_response\n",
"\n",
" return super(TestPipelineHooks, self).setUp()\n",
" \n",
" def test_adding_hook(self):\n",
"\n",
" self.assertTrue('request' in self.adp._client_hooks)\n",
" self.assertTrue('response' in self.adp._client_hooks)\n",
"\n",
" with self.assertRaises(TypeError):\n",
" self.adp.add_hook('request', None)\n",
"\n",
" with self.assertRaises(TypeError):\n",
" self.adp.add_hook('response', 'NotCallable')\n",
"\n",
" with self.assertRaises(KeyError):\n",
" self.adp.add_hook('Something', lambda a:True)\n",
"\n",
" def hook(*args, **kwargs):\n",
" pass\n",
"\n",
" self.adp.add_hook('request', hook)\n",
" self.assertTrue(hook in self.adp._client_hooks['request'].precalls)\n",
" self.assertFalse(hook in self.adp._client_hooks['request'].postcalls)\n",
"\n",
" def hook2(*args, **kwargs):\n",
" pass\n",
"\n",
" self.adp.add_hook('response', hook2, precall=False)\n",
" self.assertFalse(hook2 in self.adp._client_hooks['response'].precalls)\n",
" self.assertTrue(hook2 in self.adp._client_hooks['response'].postcalls)\n",
"\n",
" def test_pre_event_callback(self):\n",
"\n",
" class TestEvent(Exception):\n",
" pass\n",
"\n",
" def hook(*args, **kwargs):\n",
" raise TestEvent(\"Entered hook function\")\n",
"\n",
" self.adp.add_hook('request', hook)\n",
"\n",
" with self.assertRaises(TestEvent):\n",
" self.adp.send(\"request_obj\")\n",
"\n",
" def test_overwrite_event_hook(self):\n",
"\n",
" resp = self.adp.send(\"request_obj\")\n",
" self.assertEqual(resp, \"Not a real response\")\n",
"\n",
" def hook(*args, **kwargs):\n",
" self.assertEqual(args[1], \"request_obj\")\n",
" return None\n",
"\n",
" self.adp.add_hook('request', hook, precall=False, overwrite=True)\n",
" resp = self.adp.send(\"request_obj\")\n",
" self.assertIsNone(resp)\n",
"\n",
" def test_post_event_callback(self):\n",
"\n",
" def hook(*args, **kwargs):\n",
" self.assertTrue('result' in kwargs)\n",
" self.assertEqual(kwargs['result'].result, 200)\n",
" return kwargs['result']\n",
"\n",
" self.adp.add_hook('response', hook, precall=False)\n",
" resp = self.adp.build_response('request_obj')\n",
" self.assertEqual(resp.result, 200)\n",
"\n",
" def test_alter_response_callback(self):\n",
" \n",
" def hook(*args, **kwargs):\n",
" kwargs['result'].headers['a'] = \"Changed!\"\n",
" return kwargs['result']\n",
"\n",
" self.adp.add_hook('response', hook, precall=False)\n",
" resp = self.adp.build_response('request_obj')\n",
" self.assertEqual(resp.headers['a'], \"Changed!\")\n",
" self.assertEqual(resp.headers['b'], False)\n",
"\n",
"\n",
"class TestClientRequest(unittest.TestCase):\n",
"\n",
" def test_request_headers(self):\n",
"\n",
" request = ClientRequest()\n",
" request.add_header(\"a\", 1)\n",
" request.add_headers({'b':2, 'c':3})\n",
"\n",
" self.assertEqual(request.headers, {'a':1, 'b':2, 'c':3})\n",
"\n",
" def test_request_data(self):\n",
"\n",
" request = ClientRequest()\n",
" data = \"Lots of dataaaa\"\n",
" request.add_content(data)\n",
"\n",
" self.assertEqual(request.data, json.dumps(data))\n",
" self.assertEqual(request.headers.get('Content-Length'), 17)\n",
"\n",
"class TestClientResponse(unittest.TestCase):\n",
"\n",
" class Colors(Enum):\n",
" red = 'red'\n",
" blue = 'blue'\n",
"\n",
" def test_raw_response(self):\n",
"\n",
" response = mock.create_autospec(requests.Response)\n",
" response.headers = {}\n",
" response.headers[\"my-test\"] = '1999-12-31T23:59:59-23:59'\n",
" response.headers[\"colour\"] = \"red\"\n",
"\n",
" raw = ClientRawResponse([], response)\n",
"\n",
" raw.add_headers({'my-test': 'iso-8601',\n",
" 'another_header': 'str',\n",
" 'colour': TestClientResponse.Colors})\n",
" self.assertIsInstance(raw.headers['my-test'], datetime.datetime)\n",
"\n",
"if __name__ == '__main__':\n",
" unittest.main()\n"
] | [
0.013157894736842105,
0,
0.01639344262295082,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0.046153846153846156,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 191 | 0.00301 | false |
# The MIT License (MIT)
#
# Copyright (c) 2015 Lucas Koegel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from piko import Piko
from datetime import datetime
import csv
import time
import sys
import os.path
import logging, logging.handlers
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.handlers.RotatingFileHandler('pikoToCSV.log', maxBytes=1024*1024*512, backupCount=2)
fh.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
ch.setFormatter(format)
logger.addHandler(ch)
logger.addHandler(fh)
logging.info('Started')
p = Piko(host='http://192.168.178.123')
FIELDNAMES = ['zeit', 'ost', 'west']
INTERVAL = 30 # seconds
while(True):
try:
string1Current = p.get_string1_current()
string2Current = p.get_string2_current()
string1Voltage = p.get_string1_voltage()
string2Voltage = p.get_string2_voltage()
if (string1Current < 0 or string2Current < 0 or string1Voltage < 0 or string2Voltage < 0):
# Piko is off
logging.info('Piko is off, going to sleep 10 minutes.')
time.sleep(600)
continue
# Calculate power
string1 = round(string1Current * string1Voltage)
string2 = round(string2Current * string2Voltage)
today = datetime.now()
fileName = 'piko-' + today.strftime('%d-%m-%Y') + '.csv'
if (not os.path.isfile(fileName)):
# New File -> write Headers
logging.info('Creating new file... ' + fileName)
with open(fileName, 'w') as newFile:
newFileWriter = csv.DictWriter(newFile, fieldnames=FIELDNAMES)
newFileWriter.writeheader()
with open(fileName, 'a') as csvfile:
# Existing file -> write piko values
logging.info('Writing to file ... ' + fileName)
writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
writer.writerow({'zeit': today.strftime('%X'), 'ost': string1, 'west': string2})
except: # catch *all* exceptions
logging.info("Error:", sys.exc_info()[0])
# Sleep
time.sleep(INTERVAL) | [
"# The MIT License (MIT)\n",
"#\n",
"# Copyright (c) 2015 Lucas Koegel\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"Software\"), to deal\n",
"# in the Software without restriction, including without limitation the rights\n",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"# copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in all\n",
"# copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n",
"# SOFTWARE.\n",
"\n",
"from piko import Piko\n",
"\n",
"from datetime import datetime\n",
"\n",
"import csv\n",
"import time\n",
"import sys\n",
"import os.path\n",
"import logging, logging.handlers\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" logger = logging.getLogger()\n",
" logger.setLevel(logging.INFO)\n",
" ch = logging.StreamHandler()\n",
" ch.setLevel(logging.INFO)\n",
" fh = logging.handlers.RotatingFileHandler('pikoToCSV.log', maxBytes=1024*1024*512, backupCount=2)\n",
" fh.setLevel(logging.INFO)\n",
" format = logging.Formatter(\"%(asctime)s %(levelname)s: %(message)s\")\n",
" ch.setFormatter(format)\n",
" logger.addHandler(ch)\n",
" logger.addHandler(fh)\n",
"\n",
" logging.info('Started')\n",
" p = Piko(host='http://192.168.178.123')\n",
" FIELDNAMES = ['zeit', 'ost', 'west']\n",
" INTERVAL = 30 # seconds\n",
"\n",
" while(True):\n",
" try:\n",
" string1Current = p.get_string1_current()\n",
" string2Current = p.get_string2_current()\n",
" string1Voltage = p.get_string1_voltage()\n",
" string2Voltage = p.get_string2_voltage()\n",
"\n",
" if (string1Current < 0 or string2Current < 0 or string1Voltage < 0 or string2Voltage < 0):\n",
" # Piko is off\n",
" logging.info('Piko is off, going to sleep 10 minutes.')\n",
" time.sleep(600)\n",
" continue\n",
"\n",
" # Calculate power\n",
" string1 = round(string1Current * string1Voltage)\n",
" string2 = round(string2Current * string2Voltage)\n",
"\t\t\t\n",
" today = datetime.now()\n",
" fileName = 'piko-' + today.strftime('%d-%m-%Y') + '.csv'\n",
"\n",
" if (not os.path.isfile(fileName)):\n",
" # New File -> write Headers\n",
" logging.info('Creating new file... ' + fileName)\n",
" with open(fileName, 'w') as newFile:\n",
" newFileWriter = csv.DictWriter(newFile, fieldnames=FIELDNAMES)\n",
" newFileWriter.writeheader()\n",
"\n",
" with open(fileName, 'a') as csvfile:\n",
" # Existing file -> write piko values\n",
" logging.info('Writing to file ... ' + fileName)\n",
" writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)\n",
" writer.writerow({'zeit': today.strftime('%X'), 'ost': string1, 'west': string2})\n",
"\n",
" except: # catch *all* exceptions\n",
" logging.info(\"Error:\", sys.exc_info()[0])\n",
"\n",
" # Sleep\n",
" time.sleep(INTERVAL)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0.75,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0.04878048780487805,
0,
0,
0,
0.03571428571428571
] | 88 | 0.011174 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '1.1.5'
class AzureException(Exception):
pass
class AzureHttpError(AzureException):
def __init__(self, message, status_code):
super(AzureHttpError, self).__init__(message)
self.status_code = status_code
def __new__(cls, message, status_code, *args, **kwargs):
if cls is AzureHttpError:
if status_code == 404:
cls = AzureMissingResourceHttpError
elif status_code == 409:
cls = AzureConflictHttpError
return AzureException.__new__(cls, message, status_code, *args, **kwargs)
class AzureConflictHttpError(AzureHttpError):
def __init__(self, message, status_code):
super(AzureConflictHttpError, self).__init__(message, status_code)
class AzureMissingResourceHttpError(AzureHttpError):
def __init__(self, message, status_code):
super(AzureMissingResourceHttpError, self).__init__(message, status_code)
| [
"#-------------------------------------------------------------------------\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
"# Licensed under the MIT License. See License.txt in the project root for\n",
"# license information.\n",
"#--------------------------------------------------------------------------\n",
"\n",
"__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'\n",
"__version__ = '1.1.5'\n",
"\n",
"\n",
"class AzureException(Exception):\n",
" pass\n",
"\n",
"\n",
"class AzureHttpError(AzureException):\n",
" def __init__(self, message, status_code):\n",
" super(AzureHttpError, self).__init__(message)\n",
" self.status_code = status_code\n",
"\n",
" def __new__(cls, message, status_code, *args, **kwargs):\n",
" if cls is AzureHttpError:\n",
" if status_code == 404:\n",
" cls = AzureMissingResourceHttpError\n",
" elif status_code == 409:\n",
" cls = AzureConflictHttpError\n",
" return AzureException.__new__(cls, message, status_code, *args, **kwargs)\n",
"\n",
"\n",
"class AzureConflictHttpError(AzureHttpError):\n",
" def __init__(self, message, status_code):\n",
" super(AzureConflictHttpError, self).__init__(message, status_code)\n",
"\n",
"\n",
"class AzureMissingResourceHttpError(AzureHttpError):\n",
" def __init__(self, message, status_code):\n",
" super(AzureMissingResourceHttpError, self).__init__(message, status_code)\n"
] | [
0.013333333333333334,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513
] | 36 | 0.001413 | false |
from .ir import *
from .irvisitor import IRTransformer
from .symbol import Symbol
class TupleTransformer(IRTransformer):
def __init__(self):
super().__init__()
def process(self, scope):
super().process(scope)
def visit_EXPR(self, ir):
ir.exp = self.visit(ir.exp)
self.new_stms.append(ir)
def visit_CJUMP(self, ir):
ir.exp = self.visit(ir.exp)
self.new_stms.append(ir)
def visit_MCJUMP(self, ir):
for i in range(len(ir.conds)):
ir.conds[i] = self.visit(ir.conds[i])
self.new_stms.append(ir)
def visit_JUMP(self, ir):
self.new_stms.append(ir)
def visit_RET(self, ir):
ir.exp = self.visit(ir.exp)
self.new_stms.append(ir)
def _can_direct_unpack(self, lhs, rhs):
assert len(lhs) == len(rhs)
def is_contain(ir, irs):
if not ir.is_a([TEMP, ATTR]):
return False
sym = ir.symbol()
return sym in [ir.symbol() for ir in irs if ir.is_a([TEMP, ATTR])]
for i, l in enumerate(lhs):
if is_contain(l, rhs[i + 1:]):
return False
return True
def _unpack(self, lhs, rhs):
assert len(lhs) == len(rhs)
return [MOVE(dst, src) for dst, src in zip(lhs, rhs)]
def _make_temp_syms(self, items):
assert all([item.is_a([TEMP, ATTR]) for item in items])
return [self.scope.add_temp('{}_{}'.format(Symbol.temp_prefix, item.symbol().name)) for item in items]
def _make_temps(self, syms, ctx):
return [TEMP(sym, ctx) for sym in syms]
def _make_mrefs(self, var, length):
return [MREF(var.clone(), CONST(i), Ctx.LOAD) for i in range(length)]
def visit_MOVE(self, ir):
if ir.dst.is_a(ARRAY):
assert not ir.dst.is_mutable
if ir.src.is_a(ARRAY) and not ir.src.is_mutable:
if self._can_direct_unpack(ir.dst.items, ir.src.items):
mvs = self._unpack(ir.dst.items, ir.src.items)
else:
tempsyms = self._make_temp_syms(ir.dst.items)
mvs = self._unpack(self._make_temps(tempsyms, Ctx.STORE), ir.src.items)
mvs.extend(self._unpack(ir.dst.items, self._make_temps(tempsyms, Ctx.LOAD)))
for mv in mvs:
mv.loc = ir.loc
self.new_stms.append(mv)
return
elif ir.src.is_a([TEMP, ATTR]) and ir.src.symbol().typ.is_tuple():
mvs = self._unpack(ir.dst.items, self._make_mrefs(ir.src, len(ir.dst.items)))
for mv in mvs:
mv.loc = ir.loc
self.new_stms.append(mv)
return
elif ir.src.is_a(CALL) and self.scope.is_testbench():
raise NotImplementedError('Return of suquence type value is not implemented')
else:
ir.src = self.visit(ir.src)
ir.dst = self.visit(ir.dst)
self.new_stms.append(ir)
| [
"from .ir import *\n",
"from .irvisitor import IRTransformer\n",
"from .symbol import Symbol\n",
"\n",
"\n",
"class TupleTransformer(IRTransformer):\n",
" def __init__(self):\n",
" super().__init__()\n",
"\n",
" def process(self, scope):\n",
" super().process(scope)\n",
"\n",
" def visit_EXPR(self, ir):\n",
" ir.exp = self.visit(ir.exp)\n",
" self.new_stms.append(ir)\n",
"\n",
" def visit_CJUMP(self, ir):\n",
" ir.exp = self.visit(ir.exp)\n",
" self.new_stms.append(ir)\n",
"\n",
" def visit_MCJUMP(self, ir):\n",
" for i in range(len(ir.conds)):\n",
" ir.conds[i] = self.visit(ir.conds[i])\n",
" self.new_stms.append(ir)\n",
"\n",
" def visit_JUMP(self, ir):\n",
" self.new_stms.append(ir)\n",
"\n",
" def visit_RET(self, ir):\n",
" ir.exp = self.visit(ir.exp)\n",
" self.new_stms.append(ir)\n",
"\n",
" def _can_direct_unpack(self, lhs, rhs):\n",
" assert len(lhs) == len(rhs)\n",
"\n",
" def is_contain(ir, irs):\n",
" if not ir.is_a([TEMP, ATTR]):\n",
" return False\n",
" sym = ir.symbol()\n",
" return sym in [ir.symbol() for ir in irs if ir.is_a([TEMP, ATTR])]\n",
"\n",
" for i, l in enumerate(lhs):\n",
" if is_contain(l, rhs[i + 1:]):\n",
" return False\n",
" return True\n",
"\n",
" def _unpack(self, lhs, rhs):\n",
" assert len(lhs) == len(rhs)\n",
" return [MOVE(dst, src) for dst, src in zip(lhs, rhs)]\n",
"\n",
" def _make_temp_syms(self, items):\n",
" assert all([item.is_a([TEMP, ATTR]) for item in items])\n",
" return [self.scope.add_temp('{}_{}'.format(Symbol.temp_prefix, item.symbol().name)) for item in items]\n",
"\n",
" def _make_temps(self, syms, ctx):\n",
" return [TEMP(sym, ctx) for sym in syms]\n",
"\n",
" def _make_mrefs(self, var, length):\n",
" return [MREF(var.clone(), CONST(i), Ctx.LOAD) for i in range(length)]\n",
"\n",
" def visit_MOVE(self, ir):\n",
" if ir.dst.is_a(ARRAY):\n",
" assert not ir.dst.is_mutable\n",
" if ir.src.is_a(ARRAY) and not ir.src.is_mutable:\n",
" if self._can_direct_unpack(ir.dst.items, ir.src.items):\n",
" mvs = self._unpack(ir.dst.items, ir.src.items)\n",
" else:\n",
" tempsyms = self._make_temp_syms(ir.dst.items)\n",
" mvs = self._unpack(self._make_temps(tempsyms, Ctx.STORE), ir.src.items)\n",
" mvs.extend(self._unpack(ir.dst.items, self._make_temps(tempsyms, Ctx.LOAD)))\n",
" for mv in mvs:\n",
" mv.loc = ir.loc\n",
" self.new_stms.append(mv)\n",
" return\n",
" elif ir.src.is_a([TEMP, ATTR]) and ir.src.symbol().typ.is_tuple():\n",
" mvs = self._unpack(ir.dst.items, self._make_mrefs(ir.src, len(ir.dst.items)))\n",
" for mv in mvs:\n",
" mv.loc = ir.loc\n",
" self.new_stms.append(mv)\n",
" return\n",
" elif ir.src.is_a(CALL) and self.scope.is_testbench():\n",
" raise NotImplementedError('Return of suquence type value is not implemented')\n",
" else:\n",
" ir.src = self.visit(ir.src)\n",
" ir.dst = self.visit(ir.dst)\n",
" self.new_stms.append(ir)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.010309278350515464,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0
] | 86 | 0.000598 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import clr
clr.AddReference("System")
clr.AddReference("QuantConnect.Algorithm")
clr.AddReference("QuantConnect.Indicators")
clr.AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
import decimal as d
class MovingAverageCrossAlgorithm(QCAlgorithm):
'''In this example we look at the canonical 15/30 day moving average cross. This algorithm
will go long when the 15 crosses above the 30 and will liquidate when the 15 crosses
back below the 30.'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2009, 01, 01) #Set Start Date
self.SetEndDate(2015, 01, 01) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY")
# create a 15 day exponential moving average
self.fast = self.EMA("SPY", 15, Resolution.Daily);
# create a 30 day exponential moving average
self.slow = self.EMA("SPY", 30, Resolution.Daily);
self.previous = None
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
# a couple things to notice in this method:
# 1. We never need to 'update' our indicators with the data, the engine takes care of this for us
# 2. We can use indicators directly in math expressions
# 3. We can easily plot many indicators at the same time
# wait for our slow ema to fully initialize
if not self.slow.IsReady:
return
# only once per day
if self.previous is not None and self.previous.date() == self.Time.date():
return
# define a small tolerance on our checks to avoid bouncing
tolerance = 0.00015;
holdings = self.Portfolio["SPY"].Quantity
# we only want to go long if we're currently short or flat
if holdings <= 0:
# if the fast is greater than the slow, we'll go long
if self.fast.Current.Value > self.slow.Current.Value * d.Decimal(1 + tolerance):
self.Log("BUY >> {0}".format(self.Securities["SPY"].Price))
self.SetHoldings("SPY", 1.0)
# we only want to liquidate if we're currently long
# if the fast is less than the slow we'll liquidate our long
if holdings > 0 and self.fast.Current.Value < self.slow.Current.Value:
self.Log("SELL >> {0}".format(self.Securities["SPY"].Price))
self.Liquidate("SPY")
self.previous = self.Time | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"# \n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); \n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"# \n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"import clr\n",
"clr.AddReference(\"System\")\n",
"clr.AddReference(\"QuantConnect.Algorithm\")\n",
"clr.AddReference(\"QuantConnect.Indicators\")\n",
"clr.AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Indicators import *\n",
"import decimal as d \n",
"\n",
"\n",
"class MovingAverageCrossAlgorithm(QCAlgorithm):\n",
" '''In this example we look at the canonical 15/30 day moving average cross. This algorithm\n",
" will go long when the 15 crosses above the 30 and will liquidate when the 15 crosses\n",
" back below the 30.'''\n",
" \n",
" def Initialize(self):\n",
" '''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''\n",
" \n",
" self.SetStartDate(2009, 01, 01) #Set Start Date\n",
" self.SetEndDate(2015, 01, 01) #Set End Date\n",
" self.SetCash(100000) #Set Strategy Cash\n",
" # Find more symbols here: http://quantconnect.com/data\n",
" self.AddEquity(\"SPY\")\n",
" \n",
" # create a 15 day exponential moving average\n",
" self.fast = self.EMA(\"SPY\", 15, Resolution.Daily);\n",
"\n",
" # create a 30 day exponential moving average\n",
" self.slow = self.EMA(\"SPY\", 30, Resolution.Daily);\n",
"\n",
" self.previous = None\n",
"\n",
" \n",
" def OnData(self, data):\n",
" '''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''\n",
" # a couple things to notice in this method:\n",
" # 1. We never need to 'update' our indicators with the data, the engine takes care of this for us\n",
" # 2. We can use indicators directly in math expressions\n",
" # 3. We can easily plot many indicators at the same time \n",
"\n",
" # wait for our slow ema to fully initialize\n",
" if not self.slow.IsReady:\n",
" return \n",
"\n",
" # only once per day\n",
" if self.previous is not None and self.previous.date() == self.Time.date():\n",
" return\n",
"\n",
" # define a small tolerance on our checks to avoid bouncing\n",
" tolerance = 0.00015;\n",
" \n",
" holdings = self.Portfolio[\"SPY\"].Quantity\n",
"\n",
" # we only want to go long if we're currently short or flat\n",
" if holdings <= 0:\n",
" # if the fast is greater than the slow, we'll go long\n",
" if self.fast.Current.Value > self.slow.Current.Value * d.Decimal(1 + tolerance):\n",
" self.Log(\"BUY >> {0}\".format(self.Securities[\"SPY\"].Price))\n",
" self.SetHoldings(\"SPY\", 1.0)\n",
" \n",
" # we only want to liquidate if we're currently long\n",
" # if the fast is less than the slow we'll liquidate our long\n",
" if holdings > 0 and self.fast.Current.Value < self.slow.Current.Value:\n",
" self.Log(\"SELL >> {0}\".format(self.Securities[\"SPY\"].Price))\n",
" self.Liquidate(\"SPY\") \n",
"\n",
" self.previous = self.Time"
] | [
0,
0.012345679012345678,
0.3333333333333333,
0.014925373134328358,
0,
0.011764705882352941,
0.3333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0.02631578947368421,
0.09523809523809523,
0,
0,
0,
0.010526315789473684,
0.011235955056179775,
0,
0.2,
0,
0.006578947368421052,
0.1111111111111111,
0.017543859649122806,
0.01818181818181818,
0.016666666666666666,
0,
0,
0.1111111111111111,
0,
0.01694915254237288,
0,
0,
0.01694915254237288,
0,
0,
0,
0.1111111111111111,
0.03571428571428571,
0.00847457627118644,
0,
0.009345794392523364,
0,
0.014285714285714285,
0,
0,
0,
0.043478260869565216,
0,
0,
0.012048192771084338,
0,
0,
0,
0.034482758620689655,
0.1111111111111111,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0.027777777777777776,
0,
0.030303030303030304
] | 83 | 0.024049 | false |
import urllib2
import json
import sys
import os
from HTMLParser import HTMLParser
#Created by: @Elite_Soba
#Usage: openrec.py VIDEOID
#Usage: openrec.py VIDEOID STARTTIME ENDTIME
#Choosing a STARTTIME < 0 is equivalent to setting STARTTIME to 0
#Choosing an ENDTIME > video length is equivalent to setting ENDTIME to video length
#Choosing STARTTIME < ENDTIME will get you nothing
#Choosing specific times is difficult because it only works in 10 second chunks
#So it may dl an extra 10s or miss 10s.
def hmsToSec(hms):
#Works for HH:MM:SS and MM:SS
split = hms.split(":")
rate = 1
total = 0
for part in reversed(split):
total = total + rate * int(part)
rate = rate * 60
return total
def tupleListToDict(list):
dict = {}
for tuple in list:
dict[tuple[0]] = tuple[1]
return dict
class getDataParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == "div":
attributes = tupleListToDict(attrs)
if "class" in attributes and attributes["class"] == "js-data__player":
if "data-file" in attributes:
self.vidSource = attributes["data-file"]
def cutBandwidth(info):
if not "BANDWIDTH" in info:
return 0
bw = info[info.find("BANDWIDTH=")+10:]
bw = bw[:bw.find(",")]
out = 0
try:
out = int(bw)
except:
print "Error getting bandwidth"
return out
def main(argv):
video = ""
if len(argv) == 0:
video = raw_input("Enter Openrec Video ID: ")
else:
video = argv[0]
start = False
end = False
if len(argv) == 3:
start = hmsToSec(argv[1])
end = hmsToSec(argv[2])
else:
response = raw_input("Would you like to download a specific part (Y/N)? ")
if response.lower()[0] == "y":
start = raw_input("Enter Start Time (HH:MM:SS): ")
end = raw_input("Enter End Time (HH:MM:SS): ")
start = hmsToSec(start)
end = hmsToSec(end)
try:
x = urllib2.urlopen("https://www.openrec.tv/movie/" + video).read()
except:
print "Error getting video. Please confirm video ID"
return
parser = getDataParser()
parser.feed(x)
url = "/".join(parser.vidSource.split("/")[:-1]) + "/"
header = url.replace("https", "http")
print parser.vidSource.replace("https", "http")
try:
x = urllib2.urlopen(parser.vidSource.replace("https", "http")).read()
except:
print "Error getting video. Please let me know which video it was"
return
qualities = {}
lines = x.split("\n")
for i in range(len(lines)-1):
line = lines[i]
if line and line[0] == "#":
if "EXT-X-STREAM-INF" in line:
bw = cutBandwidth(line)
playlist = lines[i+1]
qualities[bw] = playlist
print "Bandwidth options are: " + str(", ".join([str(x) for x in sorted(qualities)]))
print "Choosing Bandwidth option: " + str(sorted(qualities)[-1])
footer = qualities[sorted(qualities)[-1]]#"2000kbps.m3u8"
playlist = ""
print header + footer
try:
playlist = urllib2.urlopen(header + footer)
except:
print "Error getting video. Please let me know which video it was"
return
list = playlist.read()
parts = list.split("\n")
segments = []
time = 0
for part in parts:
if "#EXTINF" in part:
time = time + float(part[8:-1])
if len(part) != 0 and part[0] != "#":
if start or end:
#Get only parts of video between start and end
if time >= start and time <= end:
segments.append(part)
else:
#Get whole video
segments.append(part)
filename = video + ".ts"
i = 1
while os.path.exists(filename) and os.path.isfile(filename):
filename = video + "-" + str(i) + ".ts"
i = i + 1
vid = open(filename, "wb")
header = "/".join((header + footer).split("/")[:-1]) + "/"
print "Downloading " + str(len(segments)) + " parts. This could take a while."
progress = 0
for segment in segments:
part = urllib2.urlopen(header + segment).read()
vid.write(part)
progress = progress + 1
if progress % 10 == 0:
print "Downloaded " + str(progress) + " parts out of " + str(len(segments))
print "Download succeeded"
vid.close()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"import urllib2\n",
"import json\n",
"import sys\n",
"import os\n",
"from HTMLParser import HTMLParser\n",
"\n",
"#Created by: @Elite_Soba\n",
"#Usage: openrec.py VIDEOID\n",
"#Usage: openrec.py VIDEOID STARTTIME ENDTIME\n",
"#Choosing a STARTTIME < 0 is equivalent to setting STARTTIME to 0\n",
"#Choosing an ENDTIME > video length is equivalent to setting ENDTIME to video length\n",
"#Choosing STARTTIME < ENDTIME will get you nothing\n",
"#Choosing specific times is difficult because it only works in 10 second chunks\n",
"#So it may dl an extra 10s or miss 10s.\n",
"\n",
"def hmsToSec(hms):\n",
"\t#Works for HH:MM:SS and MM:SS\n",
"\tsplit = hms.split(\":\")\n",
"\trate = 1\n",
"\ttotal = 0\n",
"\tfor part in reversed(split):\n",
"\t\ttotal = total + rate * int(part)\n",
"\t\trate = rate * 60\n",
"\treturn total\n",
"\n",
"def tupleListToDict(list):\n",
"\tdict = {}\n",
"\tfor tuple in list:\n",
"\t\tdict[tuple[0]] = tuple[1]\n",
"\treturn dict\n",
"\n",
"class getDataParser(HTMLParser):\n",
"\tdef handle_starttag(self, tag, attrs):\n",
"\t\tif tag == \"div\":\n",
"\t\t\tattributes = tupleListToDict(attrs)\n",
"\t\t\tif \"class\" in attributes and attributes[\"class\"] == \"js-data__player\":\n",
"\t\t\t\tif \"data-file\" in attributes:\n",
"\t\t\t\t\tself.vidSource = attributes[\"data-file\"]\n",
"\n",
"def cutBandwidth(info):\n",
"\tif not \"BANDWIDTH\" in info:\n",
"\t\treturn 0\n",
"\tbw = info[info.find(\"BANDWIDTH=\")+10:]\n",
"\tbw = bw[:bw.find(\",\")]\n",
"\tout = 0\n",
"\ttry:\n",
"\t\tout = int(bw)\n",
"\texcept:\n",
"\t\tprint \"Error getting bandwidth\"\n",
"\treturn out\n",
"\t\n",
"def main(argv):\n",
"\tvideo = \"\"\n",
"\tif len(argv) == 0:\n",
"\t\tvideo = raw_input(\"Enter Openrec Video ID: \")\n",
"\telse:\n",
"\t\tvideo = argv[0]\n",
"\t\n",
"\tstart = False\n",
"\tend = False\n",
"\t\n",
"\tif len(argv) == 3:\n",
"\t\tstart = hmsToSec(argv[1])\n",
"\t\tend = hmsToSec(argv[2])\n",
"\telse:\n",
"\t\tresponse = raw_input(\"Would you like to download a specific part (Y/N)? \")\n",
"\t\tif response.lower()[0] == \"y\":\n",
"\t\t\tstart = raw_input(\"Enter Start Time (HH:MM:SS): \")\n",
"\t\t\tend = raw_input(\"Enter End Time (HH:MM:SS): \")\n",
"\t\t\tstart = hmsToSec(start)\n",
"\t\t\tend = hmsToSec(end)\n",
"\n",
"\ttry:\n",
"\t\tx = urllib2.urlopen(\"https://www.openrec.tv/movie/\" + video).read()\n",
"\texcept:\n",
"\t\tprint \"Error getting video. Please confirm video ID\"\n",
"\t\treturn\n",
"\tparser = getDataParser()\n",
"\tparser.feed(x)\n",
"\turl = \"/\".join(parser.vidSource.split(\"/\")[:-1]) + \"/\"\n",
"\theader = url.replace(\"https\", \"http\")\n",
"\tprint parser.vidSource.replace(\"https\", \"http\")\n",
"\ttry:\n",
"\t\tx = urllib2.urlopen(parser.vidSource.replace(\"https\", \"http\")).read()\n",
"\texcept:\n",
"\t\tprint \"Error getting video. Please let me know which video it was\"\n",
"\t\treturn\n",
"\tqualities = {}\n",
"\tlines = x.split(\"\\n\")\n",
"\tfor i in range(len(lines)-1):\n",
"\t\tline = lines[i]\n",
"\t\tif line and line[0] == \"#\":\n",
"\t\t\tif \"EXT-X-STREAM-INF\" in line:\n",
"\t\t\t\tbw = cutBandwidth(line)\n",
"\t\t\t\tplaylist = lines[i+1]\n",
"\t\t\t\tqualities[bw] = playlist\n",
"\t\n",
"\tprint \"Bandwidth options are: \" + str(\", \".join([str(x) for x in sorted(qualities)]))\n",
"\tprint \"Choosing Bandwidth option: \" + str(sorted(qualities)[-1])\n",
"\tfooter = qualities[sorted(qualities)[-1]]#\"2000kbps.m3u8\"\n",
"\tplaylist = \"\"\n",
"\tprint header + footer\n",
"\ttry:\n",
"\t\tplaylist = urllib2.urlopen(header + footer)\n",
"\texcept:\n",
"\t\tprint \"Error getting video. Please let me know which video it was\"\n",
"\t\treturn\n",
"\t\n",
"\tlist = playlist.read()\n",
"\tparts = list.split(\"\\n\")\n",
"\tsegments = []\n",
"\ttime = 0\n",
"\tfor part in parts:\n",
"\t\tif \"#EXTINF\" in part:\n",
"\t\t\ttime = time + float(part[8:-1])\n",
"\t\tif len(part) != 0 and part[0] != \"#\":\n",
"\t\t\tif start or end:\n",
"\t\t\t\t#Get only parts of video between start and end\n",
"\t\t\t\tif time >= start and time <= end:\n",
"\t\t\t\t\tsegments.append(part)\n",
"\t\t\telse:\n",
"\t\t\t\t#Get whole video\n",
"\t\t\t\tsegments.append(part)\n",
"\t\n",
"\tfilename = video + \".ts\"\n",
"\ti = 1\n",
"\twhile os.path.exists(filename) and os.path.isfile(filename):\n",
"\t\tfilename = video + \"-\" + str(i) + \".ts\"\n",
"\t\ti = i + 1\n",
"\tvid = open(filename, \"wb\")\n",
"\t\n",
"\theader = \"/\".join((header + footer).split(\"/\")[:-1]) + \"/\"\n",
"\t\n",
"\tprint \"Downloading \" + str(len(segments)) + \" parts. This could take a while.\"\n",
"\tprogress = 0\n",
"\tfor segment in segments:\n",
"\t\tpart = urllib2.urlopen(header + segment).read()\n",
"\t\tvid.write(part)\n",
"\t\tprogress = progress + 1\n",
"\t\tif progress % 10 == 0:\n",
"\t\t\tprint \"Downloaded \" + str(progress) + \" parts out of \" + str(len(segments))\n",
"\t\n",
"\tprint \"Download succeeded\"\n",
"\tvid.close()\n",
"\n",
"if __name__ == \"__main__\":\n",
"\tmain(sys.argv[1:])"
] | [
0,
0,
0,
0,
0,
0,
0.04,
0.037037037037037035,
0.022222222222222223,
0.015151515151515152,
0.023529411764705882,
0.0196078431372549,
0.0125,
0.025,
0,
0.05263157894736842,
0.06451612903225806,
0.041666666666666664,
0.1,
0.09090909090909091,
0.03333333333333333,
0.02857142857142857,
0.05263157894736842,
0.07142857142857142,
0,
0.037037037037037035,
0.09090909090909091,
0.05,
0.03571428571428571,
0.07692307692307693,
0,
0.030303030303030304,
0.025,
0.05263157894736842,
0.02564102564102564,
0.013513513513513514,
0.029411764705882353,
0.021739130434782608,
0,
0.041666666666666664,
0.06896551724137931,
0.09090909090909091,
0.025,
0.041666666666666664,
0.1111111111111111,
0.16666666666666666,
0.0625,
0.2222222222222222,
0.029411764705882353,
0.08333333333333333,
1,
0.0625,
0.08333333333333333,
0.05,
0.020833333333333332,
0.14285714285714285,
0.05555555555555555,
1,
0.06666666666666667,
0.07692307692307693,
1,
0.05,
0.03571428571428571,
0.038461538461538464,
0.14285714285714285,
0.012987012987012988,
0.030303030303030304,
0.018518518518518517,
0.02,
0.037037037037037035,
0.043478260869565216,
0,
0.16666666666666666,
0.014285714285714285,
0.2222222222222222,
0.01818181818181818,
0.1111111111111111,
0.038461538461538464,
0.0625,
0.017857142857142856,
0.02564102564102564,
0.02040816326530612,
0.16666666666666666,
0.013888888888888888,
0.2222222222222222,
0.014492753623188406,
0.1111111111111111,
0.0625,
0.043478260869565216,
0.03225806451612903,
0.05555555555555555,
0.03333333333333333,
0.029411764705882353,
0.03571428571428571,
0.038461538461538464,
0.034482758620689655,
1,
0.022988505747126436,
0.015151515151515152,
0.05084745762711865,
0.06666666666666667,
0.043478260869565216,
0.16666666666666666,
0.021739130434782608,
0.2222222222222222,
0.014492753623188406,
0.1111111111111111,
1,
0.041666666666666664,
0.038461538461538464,
0.06666666666666667,
0.1,
0.05,
0.041666666666666664,
0.02857142857142857,
0.025,
0.05,
0.0392156862745098,
0.02631578947368421,
0.037037037037037035,
0.1111111111111111,
0.09523809523809523,
0.038461538461538464,
1,
0.038461538461538464,
0.14285714285714285,
0.016129032258064516,
0.023809523809523808,
0.08333333333333333,
0.03571428571428571,
1,
0.016666666666666666,
1,
0.0125,
0.07142857142857142,
0.038461538461538464,
0.02,
0.05555555555555555,
0.038461538461538464,
0.04,
0.012658227848101266,
1,
0.03571428571428571,
0.07692307692307693,
0,
0.037037037037037035,
0.10526315789473684
] | 147 | 0.109984 | false |
import functools
from concurrent.futures import ThreadPoolExecutor
import arrow
import discord
from sigma.plugins.core_functions.details.user_data_fill import generate_member_data
def clean_guild_icon(icon_url):
if icon_url:
icon_url = '.'.join(icon_url.split('.')[:-1]) + '.png'
else:
icon_url = 'https://i.imgur.com/QnYSlld.png'
return icon_url
def count_members(members):
users = 0
bots = 0
for member in members:
if member.bot:
bots += 1
else:
users += 1
return users, bots
def count_channels(channels):
text = 0
voice = 0
categories = 0
for channel in channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
elif isinstance(channel, discord.CategoryChannel):
categories += 1
return text, voice, categories
async def server_data_fill(ev):
ev.log.info('Filling server details...')
threads = ThreadPoolExecutor(2)
start_stamp = arrow.utcnow().float_timestamp
srv_coll = ev.db[ev.db.db_cfg.database].ServerDetails
srv_coll.drop()
for x in range(0, ev.bot.shard_count):
shard_start = arrow.utcnow().float_timestamp
server_list = []
for guild in ev.bot.guilds:
if guild.shard_id == x:
users, bots = count_members(guild.members)
text_channels, voice_channels, categories = count_channels(guild.channels)
srv_data = {
'Name': guild.name,
'ServerID': guild.id,
'Icon': clean_guild_icon(guild.icon_url),
'Owner': await generate_member_data(guild.owner),
'Population': {
'Users': users,
'Bots': bots,
'Total': users + bots
},
'Channels': {
'Text': text_channels,
'Voice': voice_channels,
'Categories': categories
},
'Created': {
'Timestamp': {
'Float': arrow.get(guild.created_at).float_timestamp,
'Integer': arrow.get(guild.created_at).timestamp,
},
'Text': arrow.get(guild.created_at).format('DD. MMM. YYYY HH:MM:SS'),
},
'Roles': len(guild.roles)
}
server_list.append(srv_data)
task = functools.partial(srv_coll.insert, server_list)
await ev.bot.loop.run_in_executor(threads, task)
shard_end = arrow.utcnow().float_timestamp
shard_diff = round(shard_end - shard_start, 3)
ev.log.info(f'Filled Shard #{x} Servers in {shard_diff}s.')
end_stamp = arrow.utcnow().float_timestamp
diff = round(end_stamp - start_stamp, 3)
ev.log.info(f'Server detail filler finished in {diff}s')
| [
"import functools\n",
"from concurrent.futures import ThreadPoolExecutor\n",
"\n",
"import arrow\n",
"import discord\n",
"\n",
"from sigma.plugins.core_functions.details.user_data_fill import generate_member_data\n",
"\n",
"\n",
"def clean_guild_icon(icon_url):\n",
" if icon_url:\n",
" icon_url = '.'.join(icon_url.split('.')[:-1]) + '.png'\n",
" else:\n",
" icon_url = 'https://i.imgur.com/QnYSlld.png'\n",
" return icon_url\n",
"\n",
"\n",
"def count_members(members):\n",
" users = 0\n",
" bots = 0\n",
" for member in members:\n",
" if member.bot:\n",
" bots += 1\n",
" else:\n",
" users += 1\n",
" return users, bots\n",
"\n",
"\n",
"def count_channels(channels):\n",
" text = 0\n",
" voice = 0\n",
" categories = 0\n",
" for channel in channels:\n",
" if isinstance(channel, discord.TextChannel):\n",
" text += 1\n",
" elif isinstance(channel, discord.VoiceChannel):\n",
" voice += 1\n",
" elif isinstance(channel, discord.CategoryChannel):\n",
" categories += 1\n",
" return text, voice, categories\n",
"\n",
"\n",
"async def server_data_fill(ev):\n",
" ev.log.info('Filling server details...')\n",
" threads = ThreadPoolExecutor(2)\n",
" start_stamp = arrow.utcnow().float_timestamp\n",
" srv_coll = ev.db[ev.db.db_cfg.database].ServerDetails\n",
" srv_coll.drop()\n",
" for x in range(0, ev.bot.shard_count):\n",
" shard_start = arrow.utcnow().float_timestamp\n",
" server_list = []\n",
" for guild in ev.bot.guilds:\n",
" if guild.shard_id == x:\n",
" users, bots = count_members(guild.members)\n",
" text_channels, voice_channels, categories = count_channels(guild.channels)\n",
" srv_data = {\n",
" 'Name': guild.name,\n",
" 'ServerID': guild.id,\n",
" 'Icon': clean_guild_icon(guild.icon_url),\n",
" 'Owner': await generate_member_data(guild.owner),\n",
" 'Population': {\n",
" 'Users': users,\n",
" 'Bots': bots,\n",
" 'Total': users + bots\n",
" },\n",
" 'Channels': {\n",
" 'Text': text_channels,\n",
" 'Voice': voice_channels,\n",
" 'Categories': categories\n",
" },\n",
" 'Created': {\n",
" 'Timestamp': {\n",
" 'Float': arrow.get(guild.created_at).float_timestamp,\n",
" 'Integer': arrow.get(guild.created_at).timestamp,\n",
" },\n",
" 'Text': arrow.get(guild.created_at).format('DD. MMM. YYYY HH:MM:SS'),\n",
" },\n",
" 'Roles': len(guild.roles)\n",
" }\n",
" server_list.append(srv_data)\n",
" task = functools.partial(srv_coll.insert, server_list)\n",
" await ev.bot.loop.run_in_executor(threads, task)\n",
" shard_end = arrow.utcnow().float_timestamp\n",
" shard_diff = round(shard_end - shard_start, 3)\n",
" ev.log.info(f'Filled Shard #{x} Servers in {shard_diff}s.')\n",
" end_stamp = arrow.utcnow().float_timestamp\n",
" diff = round(end_stamp - start_stamp, 3)\n",
" ev.log.info(f'Server detail filler finished in {diff}s')\n"
] | [
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 88 | 0.000518 | false |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Compilers for JSpeech Grammar Format (JSGF) and the CMU Pocket Sphinx speech
recognition engine.
"""
import jsgf
import jsgf.ext
from dragonfly import List, DictList
import dragonfly.grammar.elements as elements_
from ..base import CompilerBase, CompilerError
# noinspection PyUnusedLocal
class JSGFCompiler(CompilerBase):
"""
Dragonfly compiler for JSpeech Grammar Format (JSGF)
This class translates dragonfly elements, rules and grammars into
JSGF expansions, rules and grammars respectively.
"""
GrammarClass = jsgf.Grammar
@staticmethod
def get_reference_name(o):
# Return a non-nil name string.
if hasattr(o, "name"):
if not o.name:
name = o.__class__.__name__
else:
name = o.name
else:
# Assume the object is a string.
name = o
# JSGF and Pocket Sphinx don't allow spaces in names, but dragonfly
# does. Work around this by changing any spaces to underscores.
return name.replace(" ", "_")
# ----------------------------------------------------------------------
# Methods for compiling grammars and rules.
def compile_grammar(self, grammar, *args, **kwargs):
"""
Take a dragonfly grammar and translate it into a JSGF grammar object
with methods for compiling the grammar and matching speech.
:param grammar:
:param args:
:param kwargs:
:return:
"""
self._log.debug("%s: Compiling grammar %s." % (self, grammar.name))
# Create a new JSGF Grammar object.
unknown_words = set()
result = self.GrammarClass(name=self.get_reference_name(grammar))
# Compile each dragonfly rule and add it to the new grammar.
for rule in grammar.rules:
result.add_rule(self.compile_rule(rule, result, unknown_words))
# Also compile and add any dragonfly Lists.
for lst in grammar.lists:
result.add_rule(self.compile_list(lst, result, unknown_words))
# Log a warning about unknown words if necessary.
if unknown_words:
self._log.warning("Grammar '%s' used words not found in the "
"pronunciation dictionary: %s", result.name,
", ".join(sorted(unknown_words)))
# Return None for empty grammars.
if not result.rules:
return
return result
def compile_rule(self, rule, *args, **kwargs):
return jsgf.Rule(
name=self.get_reference_name(rule),
visible=rule.exported,
expansion=self.compile_element(rule.element, *args, **kwargs)
)
# ----------------------------------------------------------------------
# Methods for compiling dragonfly lists and dictionary lists.
# These have no equivalent in JSGF, so hidden/private rules are used
# instead.
def compile_list(self, lst, *args, **kwargs):
if isinstance(lst, List):
literal_list = [elements_.Literal(item) for item in lst]
elif isinstance(lst, DictList):
keys = list(lst.keys())
keys.sort()
literal_list = [elements_.Literal(key) for key in keys]
else:
raise CompilerError("Cannot compile dragonfly List %s"
% lst)
return jsgf.HiddenRule(
self.get_reference_name(lst),
self.compile_element(elements_.Alternative(literal_list), *args,
**kwargs)
)
def recompile_list(self, lst, jsgf_grammar):
# Used from the GrammarWrapper class to get an updated list and any
# unknown words.
unknown_words = set()
return (self.compile_list(lst, jsgf_grammar, unknown_words),
unknown_words)
# ----------------------------------------------------------------------
# Methods for compiling elements.
def compile_element(self, element, *args, **kwargs):
# Look for a compiler method to handle the given element.
for element_type, compiler in self.element_compilers:
if isinstance(element, element_type):
return compiler(self, element, *args, **kwargs)
# Didn't find a compiler method for this element type.
raise NotImplementedError("Compiler %s not implemented"
" for element type %s."
% (self, element))
def _compile_repetition(self, element, *args, **kwargs):
# Compile the first element only; pyjsgf doesn't support limits on
# repetition (yet).
children = element.children
if len(children) > 1:
self._log.debug("Ignoring limits of repetition element %s."
% element)
compiled_child = self.compile_element(children[0], *args, **kwargs)
return jsgf.Repeat(compiled_child)
def _compile_sequence(self, element, *args, **kwargs):
# Compile Repetition elements separately.
if isinstance(element, elements_.Repetition):
return self._compile_repetition(element, *args, **kwargs)
children = element.children
if len(children) > 1:
return jsgf.Sequence(*[
self.compile_element(c, *args, **kwargs) for c in children
])
elif len(children) == 1:
# Skip redundant (1 child) sequences.
return self.compile_element(children[0], *args, **kwargs)
else:
# Compile an Empty element for empty sequences.
return self.compile_element(elements_.Empty(), *args, **kwargs)
def _compile_alternative(self, element, *args, **kwargs):
children = element.children
if len(children) > 1:
return jsgf.AlternativeSet(*[
self.compile_element(c, *args, **kwargs) for c in children
])
elif len(children) == 1:
# Skip redundant (1 child) alternatives.
return self.compile_element(children[0], *args, **kwargs)
else:
# Compile an Empty element for empty alternatives.
return self.compile_element(elements_.Empty(), *args, **kwargs)
def _compile_optional(self, element, *args, **kwargs):
child = self.compile_element(element.children[0], *args, **kwargs)
return jsgf.OptionalGrouping(child)
def _compile_literal(self, element, *args, **kwargs):
return jsgf.Literal(" ".join(element.words))
def _compile_rule_ref(self, element, *args, **kwargs):
name = element.rule.name.replace(" ", "_")
return jsgf.NamedRuleRef(name)
def _compile_list_ref(self, element, *args, **kwargs):
name = element.list.name.replace(" ", "_")
return jsgf.NamedRuleRef(name)
def _compile_empty(self, element, *args, **kwargs):
return jsgf.NullRef()
def _compile_impossible(self, element, *args, **kwargs):
return jsgf.VoidRef()
def _compile_dictation(self, element, *args, **kwargs):
# JSGF has no equivalent for dictation elements. Instead compile and
# return an Impossible element that allows dictation to be used,
# but not matched.
return self.compile_element(
elements_.Impossible(), *args, **kwargs
)
class PatchedRepeat(jsgf.Repeat):
"""
Repeat class patched to compile JSGF repeats as
"expansion [expansion]*" to avoid a bug in Pocket Sphinx with the
repeat operator.
"""
def compile(self, ignore_tags=False):
super(PatchedRepeat, self).compile()
compiled = self.child.compile(ignore_tags)
if self.tag and not ignore_tags:
return "(%s)[%s]*%s" % (compiled, compiled, self.tag)
else:
return "(%s)[%s]*" % (compiled, compiled)
class SphinxJSGFCompiler(JSGFCompiler):
"""
JSGF compiler sub-class used by the CMU Pocket Sphinx backend.
"""
def __init__(self, engine):
JSGFCompiler.__init__(self)
self.engine = engine
# Use a very unlikely phrase to replace unknown words. NullRefs are
# used instead if words aren't in the vocabulary.
self.impossible_literal = {
"en": "impossible " * 20,
}.get(engine.language, "")
# ----------------------------------------------------------------------
# Methods for compiling elements.
def _compile_repetition(self, element, *args, **kwargs):
# Compile the first element only; pyjsgf doesn't support limits on
# repetition (yet).
children = element.children
if len(children) > 1:
self._log.debug("Ignoring limits of repetition element %s."
% element)
# Return a PatchedRepeat instead of a normal Repeat expansion.
compiled_child = self.compile_element(children[0], *args, **kwargs)
return PatchedRepeat(compiled_child)
def _compile_literal(self, element, *args, **kwargs):
# Build literals as sequences and use <NULL> for unknown words.
children = []
for word in element.words:
if self.engine.check_valid_word(word):
children.append(jsgf.Literal(word))
else:
children.append(self.compile_element(
elements_.Impossible(), *args, **kwargs
))
# Save the unknown word.
args[1].add(word)
return jsgf.Sequence(*children)
def _compile_impossible(self, element, *args, **kwargs):
# Override this to avoid VoidRefs disabling entire rules/grammars.
# Use a special <_impossible> private rule instead. Only add the
# special rule if it isn't in the result grammar.
grammar = args[0]
if "_impossible" not in grammar.rule_names:
# Check that the impossible literal contains only valid words.
words = set(self.impossible_literal.split())
valid_literal = bool(words)
for word in words:
if not valid_literal:
break
if not self.engine.check_valid_word(word):
valid_literal = False
if valid_literal:
expansion = jsgf.Literal(self.impossible_literal)
else:
# Fallback on a NullRef. There are some problems with using
# these, but they get the job done for simple rules.
expansion = jsgf.NullRef()
grammar.add_rule(jsgf.Rule(
name="_impossible", visible=False, expansion=expansion
))
return jsgf.NamedRuleRef("_impossible")
# TODO Change this to allow dictation elements to work.
def _compile_dictation(self, element, *args, **kwargs):
return self.compile_element(
elements_.Impossible(), *args, **kwargs
)
| [
"#\n",
"# This file is part of Dragonfly.\n",
"# (c) Copyright 2007, 2008 by Christo Butcher\n",
"# Licensed under the LGPL.\n",
"#\n",
"# Dragonfly is free software: you can redistribute it and/or modify it\n",
"# under the terms of the GNU Lesser General Public License as published\n",
"# by the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Dragonfly is distributed in the hope that it will be useful, but\n",
"# WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n",
"# Lesser General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU Lesser General Public\n",
"# License along with Dragonfly. If not, see\n",
"# <http://www.gnu.org/licenses/>.\n",
"#\n",
"\n",
"\"\"\"\n",
"Compilers for JSpeech Grammar Format (JSGF) and the CMU Pocket Sphinx speech\n",
"recognition engine.\n",
"\"\"\"\n",
"\n",
"import jsgf\n",
"import jsgf.ext\n",
"from dragonfly import List, DictList\n",
"import dragonfly.grammar.elements as elements_\n",
"\n",
"from ..base import CompilerBase, CompilerError\n",
"\n",
"# noinspection PyUnusedLocal\n",
"\n",
"\n",
"class JSGFCompiler(CompilerBase):\n",
" \"\"\"\n",
" Dragonfly compiler for JSpeech Grammar Format (JSGF)\n",
"\n",
" This class translates dragonfly elements, rules and grammars into\n",
" JSGF expansions, rules and grammars respectively.\n",
" \"\"\"\n",
"\n",
" GrammarClass = jsgf.Grammar\n",
"\n",
" @staticmethod\n",
" def get_reference_name(o):\n",
" # Return a non-nil name string.\n",
" if hasattr(o, \"name\"):\n",
" if not o.name:\n",
" name = o.__class__.__name__\n",
" else:\n",
" name = o.name\n",
" else:\n",
" # Assume the object is a string.\n",
" name = o\n",
"\n",
" # JSGF and Pocket Sphinx don't allow spaces in names, but dragonfly\n",
" # does. Work around this by changing any spaces to underscores.\n",
" return name.replace(\" \", \"_\")\n",
"\n",
" # ----------------------------------------------------------------------\n",
" # Methods for compiling grammars and rules.\n",
"\n",
" def compile_grammar(self, grammar, *args, **kwargs):\n",
" \"\"\"\n",
" Take a dragonfly grammar and translate it into a JSGF grammar object\n",
" with methods for compiling the grammar and matching speech.\n",
"\n",
" :param grammar:\n",
" :param args:\n",
" :param kwargs:\n",
" :return:\n",
" \"\"\"\n",
" self._log.debug(\"%s: Compiling grammar %s.\" % (self, grammar.name))\n",
"\n",
" # Create a new JSGF Grammar object.\n",
" unknown_words = set()\n",
" result = self.GrammarClass(name=self.get_reference_name(grammar))\n",
"\n",
" # Compile each dragonfly rule and add it to the new grammar.\n",
" for rule in grammar.rules:\n",
" result.add_rule(self.compile_rule(rule, result, unknown_words))\n",
"\n",
" # Also compile and add any dragonfly Lists.\n",
" for lst in grammar.lists:\n",
" result.add_rule(self.compile_list(lst, result, unknown_words))\n",
"\n",
" # Log a warning about unknown words if necessary.\n",
" if unknown_words:\n",
" self._log.warning(\"Grammar '%s' used words not found in the \"\n",
" \"pronunciation dictionary: %s\", result.name,\n",
" \", \".join(sorted(unknown_words)))\n",
"\n",
" # Return None for empty grammars.\n",
" if not result.rules:\n",
" return\n",
"\n",
" return result\n",
"\n",
" def compile_rule(self, rule, *args, **kwargs):\n",
" return jsgf.Rule(\n",
" name=self.get_reference_name(rule),\n",
" visible=rule.exported,\n",
" expansion=self.compile_element(rule.element, *args, **kwargs)\n",
" )\n",
"\n",
" # ----------------------------------------------------------------------\n",
" # Methods for compiling dragonfly lists and dictionary lists.\n",
" # These have no equivalent in JSGF, so hidden/private rules are used\n",
" # instead.\n",
"\n",
" def compile_list(self, lst, *args, **kwargs):\n",
" if isinstance(lst, List):\n",
" literal_list = [elements_.Literal(item) for item in lst]\n",
" elif isinstance(lst, DictList):\n",
" keys = list(lst.keys())\n",
" keys.sort()\n",
" literal_list = [elements_.Literal(key) for key in keys]\n",
" else:\n",
" raise CompilerError(\"Cannot compile dragonfly List %s\"\n",
" % lst)\n",
"\n",
" return jsgf.HiddenRule(\n",
" self.get_reference_name(lst),\n",
" self.compile_element(elements_.Alternative(literal_list), *args,\n",
" **kwargs)\n",
" )\n",
"\n",
" def recompile_list(self, lst, jsgf_grammar):\n",
" # Used from the GrammarWrapper class to get an updated list and any\n",
" # unknown words.\n",
" unknown_words = set()\n",
" return (self.compile_list(lst, jsgf_grammar, unknown_words),\n",
" unknown_words)\n",
"\n",
" # ----------------------------------------------------------------------\n",
" # Methods for compiling elements.\n",
"\n",
" def compile_element(self, element, *args, **kwargs):\n",
" # Look for a compiler method to handle the given element.\n",
" for element_type, compiler in self.element_compilers:\n",
" if isinstance(element, element_type):\n",
" return compiler(self, element, *args, **kwargs)\n",
"\n",
" # Didn't find a compiler method for this element type.\n",
" raise NotImplementedError(\"Compiler %s not implemented\"\n",
" \" for element type %s.\"\n",
" % (self, element))\n",
"\n",
" def _compile_repetition(self, element, *args, **kwargs):\n",
" # Compile the first element only; pyjsgf doesn't support limits on\n",
" # repetition (yet).\n",
" children = element.children\n",
" if len(children) > 1:\n",
" self._log.debug(\"Ignoring limits of repetition element %s.\"\n",
" % element)\n",
" compiled_child = self.compile_element(children[0], *args, **kwargs)\n",
" return jsgf.Repeat(compiled_child)\n",
"\n",
" def _compile_sequence(self, element, *args, **kwargs):\n",
" # Compile Repetition elements separately.\n",
" if isinstance(element, elements_.Repetition):\n",
" return self._compile_repetition(element, *args, **kwargs)\n",
"\n",
" children = element.children\n",
" if len(children) > 1:\n",
" return jsgf.Sequence(*[\n",
" self.compile_element(c, *args, **kwargs) for c in children\n",
" ])\n",
" elif len(children) == 1:\n",
" # Skip redundant (1 child) sequences.\n",
" return self.compile_element(children[0], *args, **kwargs)\n",
" else:\n",
" # Compile an Empty element for empty sequences.\n",
" return self.compile_element(elements_.Empty(), *args, **kwargs)\n",
"\n",
" def _compile_alternative(self, element, *args, **kwargs):\n",
" children = element.children\n",
" if len(children) > 1:\n",
" return jsgf.AlternativeSet(*[\n",
" self.compile_element(c, *args, **kwargs) for c in children\n",
" ])\n",
" elif len(children) == 1:\n",
" # Skip redundant (1 child) alternatives.\n",
" return self.compile_element(children[0], *args, **kwargs)\n",
" else:\n",
" # Compile an Empty element for empty alternatives.\n",
" return self.compile_element(elements_.Empty(), *args, **kwargs)\n",
"\n",
" def _compile_optional(self, element, *args, **kwargs):\n",
" child = self.compile_element(element.children[0], *args, **kwargs)\n",
" return jsgf.OptionalGrouping(child)\n",
"\n",
" def _compile_literal(self, element, *args, **kwargs):\n",
" return jsgf.Literal(\" \".join(element.words))\n",
"\n",
" def _compile_rule_ref(self, element, *args, **kwargs):\n",
" name = element.rule.name.replace(\" \", \"_\")\n",
" return jsgf.NamedRuleRef(name)\n",
"\n",
" def _compile_list_ref(self, element, *args, **kwargs):\n",
" name = element.list.name.replace(\" \", \"_\")\n",
" return jsgf.NamedRuleRef(name)\n",
"\n",
" def _compile_empty(self, element, *args, **kwargs):\n",
" return jsgf.NullRef()\n",
"\n",
" def _compile_impossible(self, element, *args, **kwargs):\n",
" return jsgf.VoidRef()\n",
"\n",
" def _compile_dictation(self, element, *args, **kwargs):\n",
" # JSGF has no equivalent for dictation elements. Instead compile and\n",
" # return an Impossible element that allows dictation to be used,\n",
" # but not matched.\n",
" return self.compile_element(\n",
" elements_.Impossible(), *args, **kwargs\n",
" )\n",
"\n",
"\n",
"class PatchedRepeat(jsgf.Repeat):\n",
" \"\"\"\n",
" Repeat class patched to compile JSGF repeats as\n",
" \"expansion [expansion]*\" to avoid a bug in Pocket Sphinx with the\n",
" repeat operator.\n",
" \"\"\"\n",
" def compile(self, ignore_tags=False):\n",
" super(PatchedRepeat, self).compile()\n",
" compiled = self.child.compile(ignore_tags)\n",
" if self.tag and not ignore_tags:\n",
" return \"(%s)[%s]*%s\" % (compiled, compiled, self.tag)\n",
" else:\n",
" return \"(%s)[%s]*\" % (compiled, compiled)\n",
"\n",
"\n",
"class SphinxJSGFCompiler(JSGFCompiler):\n",
" \"\"\"\n",
" JSGF compiler sub-class used by the CMU Pocket Sphinx backend.\n",
" \"\"\"\n",
"\n",
" def __init__(self, engine):\n",
" JSGFCompiler.__init__(self)\n",
" self.engine = engine\n",
"\n",
" # Use a very unlikely phrase to replace unknown words. NullRefs are\n",
" # used instead if words aren't in the vocabulary.\n",
" self.impossible_literal = {\n",
" \"en\": \"impossible \" * 20,\n",
" }.get(engine.language, \"\")\n",
"\n",
" # ----------------------------------------------------------------------\n",
" # Methods for compiling elements.\n",
"\n",
" def _compile_repetition(self, element, *args, **kwargs):\n",
" # Compile the first element only; pyjsgf doesn't support limits on\n",
" # repetition (yet).\n",
" children = element.children\n",
" if len(children) > 1:\n",
" self._log.debug(\"Ignoring limits of repetition element %s.\"\n",
" % element)\n",
"\n",
" # Return a PatchedRepeat instead of a normal Repeat expansion.\n",
" compiled_child = self.compile_element(children[0], *args, **kwargs)\n",
" return PatchedRepeat(compiled_child)\n",
"\n",
" def _compile_literal(self, element, *args, **kwargs):\n",
" # Build literals as sequences and use <NULL> for unknown words.\n",
" children = []\n",
" for word in element.words:\n",
" if self.engine.check_valid_word(word):\n",
" children.append(jsgf.Literal(word))\n",
" else:\n",
" children.append(self.compile_element(\n",
" elements_.Impossible(), *args, **kwargs\n",
" ))\n",
"\n",
" # Save the unknown word.\n",
" args[1].add(word)\n",
"\n",
" return jsgf.Sequence(*children)\n",
"\n",
" def _compile_impossible(self, element, *args, **kwargs):\n",
" # Override this to avoid VoidRefs disabling entire rules/grammars.\n",
" # Use a special <_impossible> private rule instead. Only add the\n",
" # special rule if it isn't in the result grammar.\n",
" grammar = args[0]\n",
" if \"_impossible\" not in grammar.rule_names:\n",
" # Check that the impossible literal contains only valid words.\n",
" words = set(self.impossible_literal.split())\n",
" valid_literal = bool(words)\n",
" for word in words:\n",
" if not valid_literal:\n",
" break\n",
" if not self.engine.check_valid_word(word):\n",
" valid_literal = False\n",
"\n",
" if valid_literal:\n",
" expansion = jsgf.Literal(self.impossible_literal)\n",
" else:\n",
" # Fallback on a NullRef. There are some problems with using\n",
" # these, but they get the job done for simple rules.\n",
" expansion = jsgf.NullRef()\n",
" grammar.add_rule(jsgf.Rule(\n",
" name=\"_impossible\", visible=False, expansion=expansion\n",
" ))\n",
"\n",
" return jsgf.NamedRuleRef(\"_impossible\")\n",
"\n",
" # TODO Change this to allow dictation elements to work.\n",
" def _compile_dictation(self, element, *args, **kwargs):\n",
" return self.compile_element(\n",
" elements_.Impossible(), *args, **kwargs\n",
" )\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 313 | 0 | false |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## CTVS:
### Import modules ...
from ext import *
BGPROCESS = True
##### TVS Object ...
class TVS:
def __init__(self, def_file_name, def_file_path=Empty, Import=False):
## Pack file separators ...
self._sepLST = TAG_PAR_TVSPACK_LSEP
self._sepSRC = TAG_PAR_TVSPACK_SSEP + NewLine
self._sepFRC = TAG_PAR_TVSPACK_FSEP + NewLine
self._sepEPS = TAG_PAR_TVSPACK_ESEP + NewLine
self._sepPRT = TAG_PAR_TVSPACK_PSEP + NewLine
self._sepVER = TAG_PAR_TVSPACK_VERSEP + NewLine
self._PACK_VERS = TAG_PAR_TVSPACK_VERSION
## Define TVS ...
self._define(def_file_name, def_file_path, Import)
def _define(self, def_file_name, def_file_path, Import):
## Set default param ...
self.clear()
self._file_name = def_file_name
self.lib_path = def_file_path
self.lib_name = DOS.getdir(def_file_path) if def_file_path and Import else Empty
self.lib_defname = DOS.getdir(def_file_path) if def_file_path else Empty
## Import data on initialize ...
if Import : self.dimport()
### Clear TV Show ...
def clear(self):
self.lib_path = Empty
self.packed_data = Empty
self._file_name = Empty
self._episodes = []
self._sources = []
self._folsources = []
self._rawlist = []
self.seq = 0
self._sn_add = 1
self._inc_lock = False
### Inside append ...
def _append(self, template, mark, var, appdict):
if template not in [itm[mark] for itm in var]:
var.append(appdict)
return True
return False
### Append ...
def append_episode(self, original_name, new_name, link, src_id, season=Empty):
if self._append(original_name, 'original_name', self._episodes, {'original_name':original_name, 'new_name':new_name, 'link':link, 'src_id':src_id, 'season':season}):
for src in self._sources:
if src['src_id'] == src_id and not self._inc_lock : src['src_numb'] += self._sn_add; self._sn_add = 1; break
if self.seq : self.seq += 1
def update_season(self, src_link, src_season, src_numb, src_name):
for src in self._sources :
if src_link == src['src_link']:
if inte(src_season) >= inte(src['src_season']):
src['src_season'] = src_season
src['src_numb'] = src_numb
src['src_name'] = src_name
else:
self._inc_lock = True
def append_fsource(self, fsrc_name, fsrc_link, fsrc_inum, fsrc_upd=True):
self._append(fsrc_link, 'fsrc_link', self._folsources, {'fsrc_name':getUniqname(fsrc_name, [itm['fsrc_name'] for itm in self._folsources]), 'fsrc_link':fsrc_link, 'fsrc_inum':fsrc_inum, 'fsrc_upd':fsrc_upd})
def append_source(self, src_name, src_link, src_season=Empty, src_upd=True, src_folmode=False, src_numb=0):
src_id = self.get_src_id(src_link)
if not self._append(src_link, 'src_link', self._sources, {'src_name':getUniqname(src_name, [itm['src_name'] for itm in self._sources]), 'src_link':src_link, 'src_id':src_id, 'src_upd':src_upd, 'src_season':src_season, 'src_numb':src_numb, 'src_folmode':src_folmode}):
#self.update_season(src_link, src_season, src_numb, getUniqname(src_name, [itm['src_name'] for itm in self._sources]))
self.update_season(src_link, src_season, src_numb, src_name)
return src_id
def incSeq(self):
self.seq += 1
def incSN(self):
self._sn_add += 1
### Exclude ...
def _exclude(self, value, mark, var, skipvalue=Empty, skipmark=Empty):
return [itm for itm in var if value != itm[mark] or (skipvalue and itm[skipmark] != skipvalue)]
def exclude_source(self, src_id):
self._sources = self._exclude(src_id, 'src_id', self._sources)
def exclude_episodes(self, src_id, season=Empty):
self._episodes = self._exclude(src_id, 'src_id', self._episodes, season, 'season')
def exclude_source_data(self, src_link, season=Empty):
src_id = self.get_src_id(src_link)
self.exclude_episodes(src_id, season=season)
return src_id
def exclude_folsource(self, frc_link):
self._folsources = self._exclude(frc_link, 'fsrc_link', self._folsources)
def remove_episode(self, src_id, eps_name):
for eps in self._episodes:
if eps['src_id'] == src_id and eps['new_name'] == eps_name : self._episodes.remove(eps)
### Get ...
def get_multiseason_list(self, src_link):
src_id = self.get_src_id(src_link)
seasons = []
for eps in self._episodes:
if eps['src_id'] == src_id and eps['season'] not in seasons : seasons.append(eps['season'])
return seasons
def get_eps_names_and_links(self):
return {eps['new_name']: eps['link'] for eps in self._episodes}
def get_eps_names_and_links_forsrc(self, src_link):
src_id = self.get_src_id(src_link)
return [eps['new_name'] for eps in self._episodes if eps['src_id'] == src_id], [eps['link'] for eps in self._episodes if eps['src_id'] == src_id]
def get_names_and_links(self):
return [src['src_name'] for src in self._sources], [src['src_link'] for src in self._sources], \
[frc['fsrc_name'] for frc in self._folsources], [frc['fsrc_link'] for frc in self._folsources]
def get_src_id(self, src_link):
for src in self._sources :
if src_link == src['src_link'] : return src['src_id']
cidx = 1
seq = [src['src_id'] for src in self._sources]
for idx in range(len(seq)+1)[1:] :
if cidx not in seq : return cidx
cidx = idx+1
return cidx
def get_src_name_by_link(self, link):
for itm in self._sources:
if itm['src_link'] == link : return itm['src_name']
return Empty
def get_eps_name_by_link(self, link):
for itm in self._episodes:
if itm['link'] == link : return itm['new_name']
return Empty
def get_direct(self):
return (self._episodes, self._folsources, self._sources)
def get_eps_count(self):
return len(self._episodes)
def get_upd(self):
updListS = [src['src_name'] for src in self._sources if src['src_upd']]
updListF = [frc['fsrc_name'] for frc in self._folsources if frc['fsrc_upd']]
return updListF, updListS
def get_frc_names_and_links(self):
return ([frc['fsrc_name'] for frc in self._folsources], [frc['fsrc_link'] for frc in self._folsources])
def get_scr_numb_and_season(self, link):
for itm in self._sources:
if itm['src_link'] == link : return itm['src_season'], itm['src_numb']
return Empty, 0
def get_scr_numb_season_mode(self, link):
for itm in self._sources:
if itm['src_link'] == link : return itm['src_season'], itm['src_numb'], itm['src_folmode']
return Empty, 0, False
def get_raw_link_list(self):
return [itm[0] for itm in self._rawlist]
def get_raw_eps(self):
return [itm[1] for itm in self._rawlist]
### Add target TVS to current TVS ...
def join_tvs(self, TVS):
srcId = dict()
epsExt, frcExt, srcExt = TVS.get_direct()
for src in srcExt:
scrOldId = src['src_id']
scrNewId = self.append_source(src['src_name'], src['src_link'], src['src_season'], src['src_upd'])
srcId.update({scrOldId: scrNewId})
for frc in frcExt:
self.append_fsource(frc['fsrc_name'], frc['fsrc_link'], frc['fsrc_inum'], frc['fsrc_upd'])
for eps in epsExt:
self.append_episode(eps['original_name'], eps['new_name'], eps['link'], srcId[eps['src_id']])
### Rename ...
def rensource(self, srcOldName, srcNewName):
for src in self._sources:
if src['src_name'] == srcOldName : src['src_name'] = srcNewName
def renfsource(self, frcOldName, frcNewName):
for frc in self._folsources:
if frc['fsrc_name'] == frcOldName : frc['fsrc_name'] = frcNewName
def ren_eps(self, src_id, oldname, newname):
for itm in self._episodes:
if itm['src_id'] == src_id and itm['new_name'] == oldname : itm['new_name'] = newname; break
### Set updateble flags ...
def set_upd(self, fcrNames, scrNames):
for src in self._sources:
src['src_upd'] = True if src['src_name'] in scrNames else False
for frc in self._folsources:
frc['fsrc_upd'] = True if frc['fsrc_name'] in fcrNames else False
def reset_inum(self, frcLink, frcInum):
for frc in self._folsources:
if frc['fsrc_link'] == frcLink : frc['fsrc_inum'] = frcInum; break
### Import and export tvs.pack
def dimport(self):
self.packed_data = DOS.file(self._file_name, self.lib_path, fType=FRead)
if self.packed_data == -1: self.packed_data = Empty
self.packed_data = self.packed_data.replace(CR, Empty)
self._unpack_by_version()
def dexport(self):
self._pack()
self._inc_lock = False
DOS.file(self._file_name, self.lib_path, self.packed_data, FWrite)
### Pack and unpack TV Show data ...
def _pack(self):
lst = [self._sepLST.join([itm['src_name'], itm['src_link'], str(itm['src_id']), str(itm['src_upd']), itm['src_season'], str(itm['src_numb']), str(itm['src_folmode'])]) for itm in self._sources]
src = self._sepSRC.join(lst)
lst = [self._sepLST.join([itm['fsrc_name'], itm['fsrc_link'], str(itm['fsrc_inum']), str(itm['fsrc_upd'])]) for itm in self._folsources]
frc = self._sepFRC.join(lst)
lst = [self._sepLST.join([itm['original_name'], itm['new_name'], itm['link'], str(itm['src_id']), str(itm['season'])]) for itm in self._episodes]
eps = self._sepEPS.join(lst)
self.packed_data = self._sepVER.join([self._PACK_VERS, self._sepPRT.join([src, frc, eps, str(self.seq)])])
### Unpack by version ...
def _unpack_by_version (self):
try:
pVers, pData = (self.packed_data.split(self._sepVER))
if pVers == '10013' : self._unpack10013(pData)
if pVers == '10015' : self._unpack10015(pData)
except:
self._unpack()
### Unpacker versions ...
def _unpack10015(self, pData):
if not pData: return
self.packed_data = pData
src, frc, eps, seq = (self.packed_data.split(self._sepPRT))
if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':sbool(itm7)} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6, itm7 in [itm.split(self._sepLST)]]
if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]
if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':itm5} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4, itm5 in [itm.split(self._sepLST)]]
self.seq = int(seq)
self.packed_data = Empty
def _unpack10013(self, pData):
if not pData: return
self.packed_data = pData
src, frc, eps, seq = (self.packed_data.split(self._sepPRT))
if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':sbool(itm7)} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6, itm7 in [itm.split(self._sepLST)]]
if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]
if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':Empty} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]
self.seq = int(seq)
self.packed_data = Empty
def _unpack(self):
if not self.packed_data: return
src, frc, eps, seq = (self.packed_data.split(self._sepPRT))
if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':False} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6 in [itm.split(self._sepLST)]]
if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]
if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':Empty} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]
self.seq = int(seq)
self.packed_data = Empty
### Get source list with new episodes ...
def check_new_eps(self, message=Empty, globp=None, globmsg=Empty):
if globp is None :
progress = CProgress(len(self._sources)+len(self._folsources), bg=BGPROCESS)
progress.show(message)
stepv = 1
else :
progress = globp
slen = len(self._sources)+len(self._folsources)
stepv = 100.0 / slen if slen else 100
self.os_getraw()
srcListNames = []
srcListLinks = []
rawlinklist = self.get_raw_link_list()
for src in self._sources:
progress.step(src['src_name'] if not globmsg else globmsg, stepv)
if not src['src_upd']: continue
if src['src_link'] in rawlinklist:
ld = DOS.listdir(src['src_link'])
srcItmNum = len(ld[0] + ld[1])
locEpsNum = len([itm[1] for itm in self._rawlist if itm[0] == src['src_link']])
else:
srcItmNum = len(DOS.listdir(src['src_link'])[1])
locEpsNum = len([eps['original_name'] for eps in self._episodes if eps['src_id'] == src['src_id']])
if srcItmNum > locEpsNum and srcItmNum != 0:
srcListNames.append(src['src_name'])
srcListLinks.append(src['src_link'])
frcListNames = []
frcListLinks = []
for frc in self._folsources:
progress.step(frc['fsrc_name'] if not globmsg else globmsg, stepv)
if not frc['fsrc_upd']: continue
frcItmNum = len(DOS.listdir(frc['fsrc_link'])[1])
folNum = frc['fsrc_inum']
if frcItmNum > folNum and frcItmNum != 0:
frcListNames.append(frc['fsrc_name'])
frcListLinks.append(frc['fsrc_link'])
if globp is None : del progress
return srcListNames, srcListLinks, frcListNames, frcListLinks
### OS ...
def os_clear(self):
DOS.remove(self.lib_path, False)
def os_delete(self):
DOS.remove(self.lib_path)
def os_rename(self, newName):
self.lib_name = newName
newPathName = DOS.join(DOS.gettail(self.lib_path), newName)
DOS.rename(self.lib_path, newPathName)
self.lib_path = newPathName
def os_exclude_src(self, link, dexport=True, season=Empty, remove_src=True):
src_id = self.get_src_id(link)
for eps in self._episodes:
if season and season != eps['season'] : continue
if eps['src_id'] == src_id : DOS.delf(DOS.join(self.lib_path, eps['new_name']+STRM))
self.exclude_source_data(link, season=season)
if remove_src : self.exclude_source(src_id)
if dexport : self.dexport()
def os_exclude_src_rest(self, src_link, prefix):
self.os_clear()
self.exclude_source(self.exclude_source_data(src_link))
self.os_create(prefix)
def os_create(self, prefix, overwrite=False):
DOS.mkdirs(self.lib_path)
lEpisodes = self.get_eps_names_and_links()
for eps in lEpisodes: self._os_create_strm(eps, self.lib_path, lEpisodes[eps], overwrite, prefix)
self.dexport()
def _os_create_strm(self, fName, fPath, Link, Overwrite, prefix):
svLink = prefix % (DOS.join(DOS.getdir(fPath), fName + STRM)) + Link if prefix else Link
DOS.file(fName + STRM, fPath, svLink, fRew = Overwrite)
def os_addraw(self, link, itmlist):
rawepslist = [itm[1] for itm in self._rawlist]
for itm in itmlist :
if itm not in rawepslist : self._rawlist.append([link, itm])
lined = []
for itm in self._rawlist : lined.append(itm[0] + self._sepLST + itm[1])
rawdata = self._sepEPS.join(lined)
DOS.file(TAG_PAR_TVSRAWFILE, self.lib_path, rawdata, fRew = True)
del rawepslist, lined
def os_getraw (self):
unpraw = DOS.file(TAG_PAR_TVSRAWFILE, self.lib_path, fType=FRead)
if unpraw == -1 : return
lined = unpraw.split(self._sepEPS)
self._rawlist = []
for itm in lined : self._rawlist.append(itm.split(self._sepLST))
def os_rename_eps(self, src_id, newname, oldname, prefix):
#DOS.delf(DOS.join(self.lib_path, oldname) + STRM)
#self._os_create_strm(newname, self.lib_path, link, True, prefix)
DOS.rename(DOS.join(self.lib_path, oldname) + STRM, DOS.join(self.lib_path, newname) + STRM)
self.ren_eps(src_id, oldname, newname)
self.dexport()
def os_remove_eps(self, src_id, eps_name):
DOS.delf(DOS.join(self.lib_path, eps_name) + STRM)
self.remove_episode(src_id, eps_name)
self.dexport()
class CLinkTable:
def __init__(self, fName, fPath, load=True):
self._sepLST = TAG_PAR_TVSPACK_LSEP
self._sepSRC = TAG_PAR_TVSPACK_SSEP + NewLine
self._unp_table = Empty
self._table = []
self._file_name = fName
self._file_path = fPath
if load : self._load_table()
def _load_table(self):
self._unp_table = DOS.file(self._file_name, self._file_path, fType=FRead)
if self._unp_table == -1: self._unp_table = Empty
self._unp_table = self._unp_table.replace(CR, Empty)
self._unpack()
def _unpack(self):
self._table = []
if self._unp_table: self._table = [{'stl_path':itm1, 'stl_link':itm2} for itm in self._unp_table.split(self._sepSRC) for itm1, itm2 in [itm.split(self._sepLST)]]
self._unp_table = Empty
def _pack(self):
src = [self._sepLST.join([itm['stl_path'], itm['stl_link']]) for itm in self._table]
self._unp_table = self._sepSRC.join(src)
def _save_table(self):
self._pack()
DOS.file(self._file_name, self._file_path, self._unp_table, fRew = True)
def find(self, link):
if not link : return Empty
for itm in self._table:
if itm['stl_link'] == link: return itm['stl_path']
return Empty
def add(self, path, link, save=True):
self._add(path, path, False)
self._add(path, link, True)
def _add(self, path, link, save):
if link not in [itm['stl_link'] for itm in self._table]:
self._table.append({'stl_path': path, 'stl_link': link})
if save: self._save_table()
def remove(self, link, save=True):
for itm in self._table:
if itm['stl_link'] == link:
self._table.remove(itm)
if save: self._save_table()
break
def exclude(self, path, save=True):
self._table = [itm for itm in self._table if itm['stl_path'] != path]
if save: self._save_table()
def chpath(self, oldPath, newPath, save=True):
self._chlink(oldPath, newPath, False)
self._chpath(oldPath, newPath, True)
def _chpath(self, oldPath, newPath, save=True):
for itm in self._table:
if itm['stl_path'] == oldPath: itm['stl_path'] = newPath
if save: self._save_table()
def _chlink(self, oldLink, newLink, save=True):
for itm in self._table:
if itm['stl_link'] == oldLink: itm['stl_link'] = newLink
if save: self._save_table()
def save(self):
self._save_table()
def load(self):
self._load_table()
| [
"# -*- coding: utf-8 -*-\n",
"#\n",
"# Copyright (C) 2016 Taifxx\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"#\n",
"########## CTVS:\n",
"\n",
"### Import modules ...\n",
"from ext import *\n",
"\n",
"\n",
"BGPROCESS = True\n",
"\n",
"##### TVS Object ...\n",
"class TVS:\n",
" \n",
" def __init__(self, def_file_name, def_file_path=Empty, Import=False):\n",
" ## Pack file separators ...\n",
" self._sepLST = TAG_PAR_TVSPACK_LSEP\n",
" self._sepSRC = TAG_PAR_TVSPACK_SSEP + NewLine\n",
" self._sepFRC = TAG_PAR_TVSPACK_FSEP + NewLine\n",
" self._sepEPS = TAG_PAR_TVSPACK_ESEP + NewLine\n",
" self._sepPRT = TAG_PAR_TVSPACK_PSEP + NewLine\n",
" \n",
" self._sepVER = TAG_PAR_TVSPACK_VERSEP + NewLine\n",
" self._PACK_VERS = TAG_PAR_TVSPACK_VERSION\n",
" \n",
" ## Define TVS ...\n",
" self._define(def_file_name, def_file_path, Import)\n",
" \n",
" \n",
" def _define(self, def_file_name, def_file_path, Import):\n",
" ## Set default param ...\n",
" self.clear()\n",
" self._file_name = def_file_name\n",
" self.lib_path = def_file_path\n",
" self.lib_name = DOS.getdir(def_file_path) if def_file_path and Import else Empty \n",
" self.lib_defname = DOS.getdir(def_file_path) if def_file_path else Empty\n",
" \n",
" ## Import data on initialize ...\n",
" if Import : self.dimport()\n",
" \n",
" ### Clear TV Show ...\n",
" def clear(self):\n",
" self.lib_path = Empty\n",
" self.packed_data = Empty\n",
" self._file_name = Empty\n",
" self._episodes = []\n",
" self._sources = []\n",
" self._folsources = []\n",
" self._rawlist = []\n",
" self.seq = 0\n",
" self._sn_add = 1\n",
" self._inc_lock = False\n",
" \n",
" ### Inside append ... \n",
" def _append(self, template, mark, var, appdict):\n",
" if template not in [itm[mark] for itm in var]:\n",
" var.append(appdict)\n",
" return True\n",
" return False\n",
"\n",
" ### Append ... \n",
" def append_episode(self, original_name, new_name, link, src_id, season=Empty):\n",
" if self._append(original_name, 'original_name', self._episodes, {'original_name':original_name, 'new_name':new_name, 'link':link, 'src_id':src_id, 'season':season}):\n",
" for src in self._sources:\n",
" if src['src_id'] == src_id and not self._inc_lock : src['src_numb'] += self._sn_add; self._sn_add = 1; break\n",
" if self.seq : self.seq += 1\n",
" \n",
" def update_season(self, src_link, src_season, src_numb, src_name):\n",
" for src in self._sources : \n",
" if src_link == src['src_link']: \n",
" if inte(src_season) >= inte(src['src_season']):\n",
" src['src_season'] = src_season \n",
" src['src_numb'] = src_numb \n",
" src['src_name'] = src_name\n",
" else:\n",
" self._inc_lock = True\n",
" \n",
" def append_fsource(self, fsrc_name, fsrc_link, fsrc_inum, fsrc_upd=True):\n",
" self._append(fsrc_link, 'fsrc_link', self._folsources, {'fsrc_name':getUniqname(fsrc_name, [itm['fsrc_name'] for itm in self._folsources]), 'fsrc_link':fsrc_link, 'fsrc_inum':fsrc_inum, 'fsrc_upd':fsrc_upd})\n",
" \n",
" def append_source(self, src_name, src_link, src_season=Empty, src_upd=True, src_folmode=False, src_numb=0):\n",
" src_id = self.get_src_id(src_link)\n",
" if not self._append(src_link, 'src_link', self._sources, {'src_name':getUniqname(src_name, [itm['src_name'] for itm in self._sources]), 'src_link':src_link, 'src_id':src_id, 'src_upd':src_upd, 'src_season':src_season, 'src_numb':src_numb, 'src_folmode':src_folmode}):\n",
" #self.update_season(src_link, src_season, src_numb, getUniqname(src_name, [itm['src_name'] for itm in self._sources]))\n",
" self.update_season(src_link, src_season, src_numb, src_name)\n",
" return src_id \n",
" \n",
" def incSeq(self):\n",
" self.seq += 1\n",
" \n",
" def incSN(self):\n",
" self._sn_add += 1\n",
" \n",
" ### Exclude ...\n",
" def _exclude(self, value, mark, var, skipvalue=Empty, skipmark=Empty): \n",
" return [itm for itm in var if value != itm[mark] or (skipvalue and itm[skipmark] != skipvalue)]\n",
" \n",
" def exclude_source(self, src_id):\n",
" self._sources = self._exclude(src_id, 'src_id', self._sources)\n",
" \n",
" def exclude_episodes(self, src_id, season=Empty):\n",
" self._episodes = self._exclude(src_id, 'src_id', self._episodes, season, 'season')\n",
" \n",
" def exclude_source_data(self, src_link, season=Empty):\n",
" src_id = self.get_src_id(src_link)\n",
" self.exclude_episodes(src_id, season=season)\n",
" return src_id\n",
" \n",
" def exclude_folsource(self, frc_link):\n",
" self._folsources = self._exclude(frc_link, 'fsrc_link', self._folsources)\n",
" \n",
" def remove_episode(self, src_id, eps_name):\n",
" for eps in self._episodes:\n",
" if eps['src_id'] == src_id and eps['new_name'] == eps_name : self._episodes.remove(eps) \n",
" \n",
" ### Get ... \n",
" def get_multiseason_list(self, src_link):\n",
" src_id = self.get_src_id(src_link)\n",
" seasons = []\n",
" for eps in self._episodes:\n",
" if eps['src_id'] == src_id and eps['season'] not in seasons : seasons.append(eps['season'])\n",
" return seasons \n",
" \n",
" def get_eps_names_and_links(self):\n",
" return {eps['new_name']: eps['link'] for eps in self._episodes}\n",
" \n",
" def get_eps_names_and_links_forsrc(self, src_link):\n",
" src_id = self.get_src_id(src_link)\n",
" return [eps['new_name'] for eps in self._episodes if eps['src_id'] == src_id], [eps['link'] for eps in self._episodes if eps['src_id'] == src_id] \n",
" \n",
" def get_names_and_links(self):\n",
" return [src['src_name'] for src in self._sources], [src['src_link'] for src in self._sources], \\\n",
" [frc['fsrc_name'] for frc in self._folsources], [frc['fsrc_link'] for frc in self._folsources]\n",
" \n",
" def get_src_id(self, src_link):\n",
" for src in self._sources : \n",
" if src_link == src['src_link'] : return src['src_id']\n",
" \n",
" cidx = 1 \n",
" seq = [src['src_id'] for src in self._sources]\n",
" for idx in range(len(seq)+1)[1:] : \n",
" if cidx not in seq : return cidx\n",
" cidx = idx+1\n",
" return cidx\n",
" \n",
" def get_src_name_by_link(self, link):\n",
" for itm in self._sources:\n",
" if itm['src_link'] == link : return itm['src_name']\n",
" return Empty\n",
" \n",
" def get_eps_name_by_link(self, link):\n",
" for itm in self._episodes:\n",
" if itm['link'] == link : return itm['new_name']\n",
" return Empty \n",
" \n",
" def get_direct(self):\n",
" return (self._episodes, self._folsources, self._sources)\n",
" \n",
" def get_eps_count(self):\n",
" return len(self._episodes)\n",
" \n",
" def get_upd(self):\n",
" updListS = [src['src_name'] for src in self._sources if src['src_upd']]\n",
" updListF = [frc['fsrc_name'] for frc in self._folsources if frc['fsrc_upd']]\n",
" return updListF, updListS\n",
" \n",
" def get_frc_names_and_links(self):\n",
" return ([frc['fsrc_name'] for frc in self._folsources], [frc['fsrc_link'] for frc in self._folsources])\n",
" \n",
" def get_scr_numb_and_season(self, link):\n",
" for itm in self._sources:\n",
" if itm['src_link'] == link : return itm['src_season'], itm['src_numb']\n",
" return Empty, 0\n",
" \n",
" def get_scr_numb_season_mode(self, link):\n",
" for itm in self._sources:\n",
" if itm['src_link'] == link : return itm['src_season'], itm['src_numb'], itm['src_folmode'] \n",
" return Empty, 0, False\n",
" \n",
" def get_raw_link_list(self):\n",
" return [itm[0] for itm in self._rawlist]\n",
" \n",
" def get_raw_eps(self):\n",
" return [itm[1] for itm in self._rawlist]\n",
" \n",
" ### Add target TVS to current TVS ...\n",
" def join_tvs(self, TVS):\n",
" srcId = dict()\n",
" epsExt, frcExt, srcExt = TVS.get_direct()\n",
" for src in srcExt:\n",
" scrOldId = src['src_id']\n",
" scrNewId = self.append_source(src['src_name'], src['src_link'], src['src_season'], src['src_upd'])\n",
" srcId.update({scrOldId: scrNewId})\n",
" \n",
" for frc in frcExt:\n",
" self.append_fsource(frc['fsrc_name'], frc['fsrc_link'], frc['fsrc_inum'], frc['fsrc_upd'])\n",
" \n",
" for eps in epsExt:\n",
" self.append_episode(eps['original_name'], eps['new_name'], eps['link'], srcId[eps['src_id']])\n",
" \n",
" ### Rename ... \n",
" def rensource(self, srcOldName, srcNewName):\n",
" for src in self._sources:\n",
" if src['src_name'] == srcOldName : src['src_name'] = srcNewName \n",
" \n",
" def renfsource(self, frcOldName, frcNewName):\n",
" for frc in self._folsources:\n",
" if frc['fsrc_name'] == frcOldName : frc['fsrc_name'] = frcNewName\n",
" \n",
" def ren_eps(self, src_id, oldname, newname):\n",
" for itm in self._episodes:\n",
" if itm['src_id'] == src_id and itm['new_name'] == oldname : itm['new_name'] = newname; break \n",
" \n",
" ### Set updateble flags ...\n",
" def set_upd(self, fcrNames, scrNames):\n",
" for src in self._sources:\n",
" src['src_upd'] = True if src['src_name'] in scrNames else False\n",
" \n",
" for frc in self._folsources:\n",
" frc['fsrc_upd'] = True if frc['fsrc_name'] in fcrNames else False \n",
" \n",
" def reset_inum(self, frcLink, frcInum):\n",
" for frc in self._folsources:\n",
" if frc['fsrc_link'] == frcLink : frc['fsrc_inum'] = frcInum; break \n",
" \n",
" ### Import and export tvs.pack \n",
" def dimport(self):\n",
" self.packed_data = DOS.file(self._file_name, self.lib_path, fType=FRead) \n",
" if self.packed_data == -1: self.packed_data = Empty\n",
" self.packed_data = self.packed_data.replace(CR, Empty)\n",
" self._unpack_by_version()\n",
" \n",
" def dexport(self):\n",
" self._pack()\n",
" self._inc_lock = False\n",
" DOS.file(self._file_name, self.lib_path, self.packed_data, FWrite)\n",
" \n",
" ### Pack and unpack TV Show data ... \n",
" def _pack(self):\n",
" lst = [self._sepLST.join([itm['src_name'], itm['src_link'], str(itm['src_id']), str(itm['src_upd']), itm['src_season'], str(itm['src_numb']), str(itm['src_folmode'])]) for itm in self._sources]\n",
" src = self._sepSRC.join(lst)\n",
" \n",
" lst = [self._sepLST.join([itm['fsrc_name'], itm['fsrc_link'], str(itm['fsrc_inum']), str(itm['fsrc_upd'])]) for itm in self._folsources]\n",
" frc = self._sepFRC.join(lst)\n",
" \n",
" lst = [self._sepLST.join([itm['original_name'], itm['new_name'], itm['link'], str(itm['src_id']), str(itm['season'])]) for itm in self._episodes]\n",
" eps = self._sepEPS.join(lst)\n",
" \n",
" self.packed_data = self._sepVER.join([self._PACK_VERS, self._sepPRT.join([src, frc, eps, str(self.seq)])]) \n",
" \n",
" ### Unpack by version ...\n",
" def _unpack_by_version (self):\n",
" try:\n",
" pVers, pData = (self.packed_data.split(self._sepVER))\n",
" if pVers == '10013' : self._unpack10013(pData) \n",
" if pVers == '10015' : self._unpack10015(pData)\n",
" except: \n",
" self._unpack() \n",
"\n",
" ### Unpacker versions ...\n",
" def _unpack10015(self, pData):\n",
" if not pData: return\n",
" self.packed_data = pData\n",
" src, frc, eps, seq = (self.packed_data.split(self._sepPRT)) \n",
" if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':sbool(itm7)} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6, itm7 in [itm.split(self._sepLST)]]\n",
" if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]\n",
" if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':itm5} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4, itm5 in [itm.split(self._sepLST)]]\n",
" self.seq = int(seq)\n",
" self.packed_data = Empty\n",
" \n",
" def _unpack10013(self, pData):\n",
" if not pData: return\n",
" self.packed_data = pData\n",
" src, frc, eps, seq = (self.packed_data.split(self._sepPRT)) \n",
" if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':sbool(itm7)} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6, itm7 in [itm.split(self._sepLST)]]\n",
" if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]\n",
" if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':Empty} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]\n",
" self.seq = int(seq)\n",
" self.packed_data = Empty\n",
" \n",
" def _unpack(self):\n",
" if not self.packed_data: return\n",
" src, frc, eps, seq = (self.packed_data.split(self._sepPRT)) \n",
" if src: self._sources = [{'src_name':itm1, 'src_link':itm2, 'src_id':int(itm3), 'src_upd':sbool(itm4), 'src_season':itm5, 'src_numb':int(itm6), 'src_folmode':False} for itm in src.split(self._sepSRC) for itm1, itm2, itm3, itm4, itm5, itm6 in [itm.split(self._sepLST)]]\n",
" if frc: self._folsources = [{'fsrc_name':itm1, 'fsrc_link':itm2, 'fsrc_inum':int(itm3), 'fsrc_upd':sbool(itm4)} for itm in frc.split(self._sepFRC) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]\n",
" if eps: self._episodes = [{'original_name':itm1, 'new_name':itm2, 'link':itm3, 'src_id':int(itm4), 'season':Empty} for itm in eps.split(self._sepEPS) for itm1, itm2, itm3, itm4 in [itm.split(self._sepLST)]]\n",
" self.seq = int(seq)\n",
" self.packed_data = Empty\n",
" \n",
" ### Get source list with new episodes ...\n",
" def check_new_eps(self, message=Empty, globp=None, globmsg=Empty):\n",
" if globp is None : \n",
" progress = CProgress(len(self._sources)+len(self._folsources), bg=BGPROCESS)\n",
" progress.show(message)\n",
" stepv = 1\n",
" else : \n",
" progress = globp \n",
" slen = len(self._sources)+len(self._folsources)\n",
" stepv = 100.0 / slen if slen else 100\n",
" \n",
" self.os_getraw()\n",
" \n",
" srcListNames = []\n",
" srcListLinks = []\n",
" rawlinklist = self.get_raw_link_list()\n",
" for src in self._sources:\n",
" progress.step(src['src_name'] if not globmsg else globmsg, stepv)\n",
" if not src['src_upd']: continue\n",
" if src['src_link'] in rawlinklist: \n",
" ld = DOS.listdir(src['src_link'])\n",
" srcItmNum = len(ld[0] + ld[1])\n",
" locEpsNum = len([itm[1] for itm in self._rawlist if itm[0] == src['src_link']])\n",
" else:\n",
" srcItmNum = len(DOS.listdir(src['src_link'])[1])\n",
" locEpsNum = len([eps['original_name'] for eps in self._episodes if eps['src_id'] == src['src_id']])\n",
" \n",
" if srcItmNum > locEpsNum and srcItmNum != 0: \n",
" srcListNames.append(src['src_name'])\n",
" srcListLinks.append(src['src_link'])\n",
" \n",
" frcListNames = []\n",
" frcListLinks = []\n",
" for frc in self._folsources:\n",
" progress.step(frc['fsrc_name'] if not globmsg else globmsg, stepv)\n",
" if not frc['fsrc_upd']: continue\n",
" frcItmNum = len(DOS.listdir(frc['fsrc_link'])[1])\n",
" folNum = frc['fsrc_inum']\n",
" if frcItmNum > folNum and frcItmNum != 0: \n",
" frcListNames.append(frc['fsrc_name'])\n",
" frcListLinks.append(frc['fsrc_link'])\n",
" \n",
" if globp is None : del progress\n",
" \n",
" return srcListNames, srcListLinks, frcListNames, frcListLinks \n",
" \n",
" \n",
" ### OS ...\n",
" def os_clear(self):\n",
" DOS.remove(self.lib_path, False)\n",
" \n",
" def os_delete(self):\n",
" DOS.remove(self.lib_path)\n",
" \n",
" def os_rename(self, newName):\n",
" self.lib_name = newName\n",
" newPathName = DOS.join(DOS.gettail(self.lib_path), newName)\n",
" DOS.rename(self.lib_path, newPathName)\n",
" self.lib_path = newPathName\n",
" \n",
" def os_exclude_src(self, link, dexport=True, season=Empty, remove_src=True):\n",
" src_id = self.get_src_id(link)\n",
" for eps in self._episodes:\n",
" if season and season != eps['season'] : continue \n",
" if eps['src_id'] == src_id : DOS.delf(DOS.join(self.lib_path, eps['new_name']+STRM)) \n",
" self.exclude_source_data(link, season=season)\n",
" if remove_src : self.exclude_source(src_id)\n",
" if dexport : self.dexport()\n",
" \n",
" def os_exclude_src_rest(self, src_link, prefix):\n",
" self.os_clear()\n",
" self.exclude_source(self.exclude_source_data(src_link))\n",
" self.os_create(prefix)\n",
" \n",
" def os_create(self, prefix, overwrite=False):\n",
" DOS.mkdirs(self.lib_path)\n",
" lEpisodes = self.get_eps_names_and_links()\n",
" for eps in lEpisodes: self._os_create_strm(eps, self.lib_path, lEpisodes[eps], overwrite, prefix)\n",
" self.dexport()\n",
" \n",
" def _os_create_strm(self, fName, fPath, Link, Overwrite, prefix):\n",
" svLink = prefix % (DOS.join(DOS.getdir(fPath), fName + STRM)) + Link if prefix else Link \n",
" DOS.file(fName + STRM, fPath, svLink, fRew = Overwrite)\n",
" \n",
" def os_addraw(self, link, itmlist):\n",
" rawepslist = [itm[1] for itm in self._rawlist] \n",
" for itm in itmlist : \n",
" if itm not in rawepslist : self._rawlist.append([link, itm])\n",
" \n",
" lined = []\n",
" for itm in self._rawlist : lined.append(itm[0] + self._sepLST + itm[1])\n",
" rawdata = self._sepEPS.join(lined)\n",
" DOS.file(TAG_PAR_TVSRAWFILE, self.lib_path, rawdata, fRew = True)\n",
" del rawepslist, lined \n",
" \n",
" def os_getraw (self):\n",
" unpraw = DOS.file(TAG_PAR_TVSRAWFILE, self.lib_path, fType=FRead)\n",
" if unpraw == -1 : return \n",
" lined = unpraw.split(self._sepEPS)\n",
" self._rawlist = []\n",
" for itm in lined : self._rawlist.append(itm.split(self._sepLST)) \n",
" \n",
" def os_rename_eps(self, src_id, newname, oldname, prefix):\n",
" #DOS.delf(DOS.join(self.lib_path, oldname) + STRM)\n",
" #self._os_create_strm(newname, self.lib_path, link, True, prefix)\n",
" DOS.rename(DOS.join(self.lib_path, oldname) + STRM, DOS.join(self.lib_path, newname) + STRM)\n",
" self.ren_eps(src_id, oldname, newname)\n",
" self.dexport()\n",
" \n",
" def os_remove_eps(self, src_id, eps_name):\n",
" DOS.delf(DOS.join(self.lib_path, eps_name) + STRM)\n",
" self.remove_episode(src_id, eps_name)\n",
" self.dexport() \n",
" \n",
"\n",
"class CLinkTable:\n",
" \n",
" def __init__(self, fName, fPath, load=True):\n",
" self._sepLST = TAG_PAR_TVSPACK_LSEP\n",
" self._sepSRC = TAG_PAR_TVSPACK_SSEP + NewLine\n",
" \n",
" self._unp_table = Empty\n",
" self._table = []\n",
" \n",
" self._file_name = fName\n",
" self._file_path = fPath\n",
" \n",
" if load : self._load_table()\n",
" \n",
" def _load_table(self):\n",
" self._unp_table = DOS.file(self._file_name, self._file_path, fType=FRead)\n",
" if self._unp_table == -1: self._unp_table = Empty\n",
" self._unp_table = self._unp_table.replace(CR, Empty)\n",
" self._unpack()\n",
" \n",
" def _unpack(self):\n",
" self._table = []\n",
" if self._unp_table: self._table = [{'stl_path':itm1, 'stl_link':itm2} for itm in self._unp_table.split(self._sepSRC) for itm1, itm2 in [itm.split(self._sepLST)]]\n",
" self._unp_table = Empty\n",
" \n",
" def _pack(self):\n",
" src = [self._sepLST.join([itm['stl_path'], itm['stl_link']]) for itm in self._table]\n",
" self._unp_table = self._sepSRC.join(src)\n",
" \n",
" def _save_table(self):\n",
" self._pack()\n",
" DOS.file(self._file_name, self._file_path, self._unp_table, fRew = True)\n",
" \n",
" def find(self, link):\n",
" if not link : return Empty\n",
" for itm in self._table:\n",
" if itm['stl_link'] == link: return itm['stl_path']\n",
" return Empty\n",
" \n",
" def add(self, path, link, save=True):\n",
" self._add(path, path, False)\n",
" self._add(path, link, True)\n",
" \n",
" def _add(self, path, link, save):\n",
" if link not in [itm['stl_link'] for itm in self._table]:\n",
" self._table.append({'stl_path': path, 'stl_link': link})\n",
" if save: self._save_table()\n",
" \n",
" def remove(self, link, save=True):\n",
" for itm in self._table:\n",
" if itm['stl_link'] == link: \n",
" self._table.remove(itm)\n",
" if save: self._save_table()\n",
" break \n",
"\n",
" def exclude(self, path, save=True):\n",
" self._table = [itm for itm in self._table if itm['stl_path'] != path]\n",
" if save: self._save_table()\n",
" \n",
" def chpath(self, oldPath, newPath, save=True):\n",
" self._chlink(oldPath, newPath, False)\n",
" self._chpath(oldPath, newPath, True)\n",
" \n",
" def _chpath(self, oldPath, newPath, save=True):\n",
" for itm in self._table:\n",
" if itm['stl_path'] == oldPath: itm['stl_path'] = newPath\n",
" if save: self._save_table()\n",
" \n",
" def _chlink(self, oldLink, newLink, save=True):\n",
" for itm in self._table:\n",
" if itm['stl_link'] == oldLink: itm['stl_link'] = newLink\n",
" if save: self._save_table()\n",
" \n",
" def save(self):\n",
" self._save_table()\n",
" \n",
" def load(self):\n",
" self._load_table()\n",
" \n",
" "
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0.047619047619047616,
0.09090909090909091,
0.125,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0.038461538461538464,
0,
0.1111111111111111,
0.2,
0.01639344262295082,
0.030303030303030304,
0,
0.024390243902439025,
0.024390243902439025,
0.03225806451612903,
0.012345679012345678,
0.1111111111111111,
0.024390243902439025,
0.05714285714285714,
0.1111111111111111,
0.038461538461538464,
0,
0.030303030303030304,
0,
0.030303030303030304,
0.03333333333333333,
0.03333333333333333,
0,
0.03333333333333333,
0.034482758620689655,
0.034482758620689655,
0.030303030303030304,
0.2,
0.07407407407407407,
0,
0,
0,
0,
0,
0,
0.06451612903225806,
0.012048192771084338,
0.034482758620689655,
0,
0.04,
0.05,
0.07692307692307693,
0,
0.05555555555555555,
0.022222222222222223,
0,
0.018518518518518517,
0.04,
0.02040816326530612,
0,
0,
0.06666666666666667,
0,
0.023148148148148147,
0.2,
0.008928571428571428,
0,
0.028985507246376812,
0.015267175572519083,
0,
0.043478260869565216,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0.05,
0.012987012987012988,
0.009615384615384616,
0.2,
0,
0,
0.2,
0,
0.01098901098901099,
0.2,
0,
0,
0,
0,
0.2,
0,
0.012195121951219513,
0.2,
0,
0,
0.05825242718446602,
0.2,
0.1111111111111111,
0,
0,
0,
0,
0.028846153846153848,
0.037037037037037035,
0.14285714285714285,
0,
0,
0.2,
0,
0,
0.012903225806451613,
0.1111111111111111,
0,
0.009523809523809525,
0.00909090909090909,
0.1111111111111111,
0,
0.05555555555555555,
0.030303030303030304,
0.1111111111111111,
0.047619047619047616,
0.017857142857142856,
0.045454545454545456,
0.044444444444444446,
0,
0,
0.09090909090909091,
0,
0,
0.03125,
0,
0.1111111111111111,
0,
0,
0.03333333333333333,
0.045454545454545456,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0.011764705882352941,
0,
0.2,
0,
0.008928571428571428,
0.2,
0,
0,
0.03614457831325301,
0,
0.2,
0,
0,
0.038461538461538464,
0,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0.2,
0.023809523809523808,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0.1111111111111111,
0,
0.009708737864077669,
0.07692307692307693,
0,
0.009433962264150943,
0.2,
0.1,
0,
0,
0.0379746835443038,
0.2,
0,
0,
0.02564102564102564,
0.2,
0,
0,
0.046296296296296294,
0.2,
0.03125,
0,
0,
0,
0.07692307692307693,
0,
0.012658227848101266,
0.2,
0,
0,
0.04819277108433735,
0.2,
0.05555555555555555,
0,
0.024390243902439025,
0.016666666666666666,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0.044444444444444446,
0,
0.0049504950495049506,
0.02631578947368421,
0.1111111111111111,
0.006896551724137931,
0.02631578947368421,
0.1111111111111111,
0.006493506493506494,
0.02631578947368421,
0.1111111111111111,
0.017241379310344827,
0.2,
0.03333333333333333,
0.02857142857142857,
0,
0,
0.05,
0.03389830508474576,
0.11764705882352941,
0.03125,
0,
0.03333333333333333,
0,
0.034482758620689655,
0,
0.02666666666666667,
0.03424657534246575,
0.02830188679245283,
0.036036036036036036,
0.022727272727272728,
0,
0.2,
0,
0.034482758620689655,
0,
0.02666666666666667,
0.03424657534246575,
0.02830188679245283,
0.03686635944700461,
0.022727272727272728,
0,
0.1111111111111111,
0,
0.025,
0.02666666666666667,
0.03571428571428571,
0.02830188679245283,
0.03686635944700461,
0.022727272727272728,
0,
0.2,
0.021739130434782608,
0,
0.07142857142857142,
0.011235955056179775,
0,
0,
0.10714285714285714,
0.03333333333333333,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0.037037037037037035,
0.037037037037037035,
0.02040816326530612,
0,
0,
0.022727272727272728,
0.020833333333333332,
0,
0,
0.010416666666666666,
0,
0,
0.008620689655172414,
0.058823529411764705,
0.017241379310344827,
0,
0,
0.1111111111111111,
0.037037037037037035,
0.037037037037037035,
0,
0,
0.022222222222222223,
0,
0.024390243902439025,
0.01818181818181818,
0,
0,
0.1111111111111111,
0.05,
0.058823529411764705,
0.013888888888888888,
0.1,
0.2,
0.13333333333333333,
0.041666666666666664,
0,
0.2,
0,
0,
0.2,
0,
0,
0,
0,
0,
0.2,
0.012345679012345678,
0,
0,
0.047619047619047616,
0.0392156862745098,
0,
0.038461538461538464,
0.05555555555555555,
0.2,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.018867924528301886,
0,
0.1111111111111111,
0,
0.019417475728155338,
0.03125,
0.1111111111111111,
0,
0.017857142857142856,
0.06666666666666667,
0.0273972602739726,
0.1111111111111111,
0.047619047619047616,
0.025,
0,
0.02702702702702703,
0.03225806451612903,
0.1111111111111111,
0.038461538461538464,
0,
0.08823529411764706,
0.022727272727272728,
0,
0.04054054054054054,
0.1111111111111111,
0,
0.01694915254237288,
0.013513513513513514,
0.009900990099009901,
0,
0,
0.1111111111111111,
0,
0,
0,
0.04,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0,
0.1111111111111111,
0,
0.034482758620689655,
0.1111111111111111,
0,
0,
0.1111111111111111,
0.05405405405405406,
0.2,
0,
0.012195121951219513,
0.017241379310344827,
0,
0,
0.2,
0,
0,
0.023529411764705882,
0,
0.2,
0,
0.010752688172043012,
0,
0.2,
0,
0,
0.037037037037037035,
0.1111111111111111,
0,
0.05714285714285714,
0,
0.015873015873015872,
0,
0.2,
0,
0,
0,
0.2,
0,
0,
0,
0.025,
0.2,
0,
0,
0.024390243902439025,
0,
0.022727272727272728,
0.038461538461538464,
0,
0,
0,
0.027777777777777776,
0.2,
0,
0,
0,
0.2,
0,
0,
0.014492753623188406,
0.027777777777777776,
0.2,
0,
0,
0.014492753623188406,
0.027777777777777776,
0.2,
0,
0,
0.2,
0,
0,
0.2,
0.5
] | 498 | 0.041654 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Risk import *
from QuantConnect.Algorithm.Framework.Selection import *
from Alphas.HistoricalReturnsAlphaModel import *
from Portfolio.MeanVarianceOptimizationPortfolioConstructionModel import *
from QuantConnect.Util import PythonUtil
### <summary>
### Mean Variance Optimization algorithm
### Uses the HistoricalReturnsAlphaModel and the MeanVarianceOptimizationPortfolioConstructionModel
### to create an algorithm that rebalances the portfolio according to modern portfolio theory
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class MeanVarianceOptimizationAlgorithm(QCAlgorithmFramework):
'''Mean Variance Optimization algorithm.'''
def Initialize(self):
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in [ 'AIG', 'BAC', 'IBM', 'SPY' ] ]
self.minimum_weight = -1
self.maximum_weight = 1
# set algorithm framework models
self.SetUniverseSelection(CoarseFundamentalUniverseSelectionModel(self.coarseSelector))
self.SetAlpha(HistoricalReturnsAlphaModel(resolution = Resolution.Daily))
self.SetPortfolioConstruction(MeanVarianceOptimizationPortfolioConstructionModel(optimization_method = self.maximum_sharpe_ratio))
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(NullRiskManagementModel())
def coarseSelector(self, coarse):
# Drops SPY after the 8th
last = 3 if self.Time.day > 8 else len(self.symbols)
return self.symbols[0:last]
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
self.Debug(orderEvent.ToString())
def maximum_sharpe_ratio(self, returns):
'''Maximum Sharpe Ratio optimization method'''
# Objective function
fun = lambda weights: -self.sharpe_ratio(returns, weights)
# Constraint #1: The weights can be negative, which means investors can short a security.
constraints = [{'type': 'eq', 'fun': lambda w: np.sum(w) - 1}]
size = returns.columns.size
x0 = np.array(size * [1. / size])
bounds = tuple((self.minimum_weight, self.maximum_weight) for x in range(size))
opt = minimize(fun, # Objective function
x0, # Initial guess
method='SLSQP', # Optimization method: Sequential Least SQuares Programming
bounds = bounds, # Bounds for variables
constraints = constraints) # Constraints definition
weights = pd.Series(opt['x'], index = returns.columns)
self.Log('{}:\n\r{}'.format(self.Time, weights))
return opt, weights
def sharpe_ratio(self, returns, weights):
annual_return = np.dot(np.matrix(returns.mean()), np.matrix(weights).T).item()
annual_volatility = np.sqrt(np.dot(weights.T, np.dot(returns.cov(), weights)))
return annual_return/annual_volatility
| [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Orders import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Algorithm.Framework import *\n",
"from QuantConnect.Algorithm.Framework.Execution import *\n",
"from QuantConnect.Algorithm.Framework.Risk import *\n",
"from QuantConnect.Algorithm.Framework.Selection import *\n",
"from Alphas.HistoricalReturnsAlphaModel import *\n",
"from Portfolio.MeanVarianceOptimizationPortfolioConstructionModel import *\n",
"from QuantConnect.Util import PythonUtil\n",
"\n",
"### <summary>\n",
"### Mean Variance Optimization algorithm\n",
"### Uses the HistoricalReturnsAlphaModel and the MeanVarianceOptimizationPortfolioConstructionModel\n",
"### to create an algorithm that rebalances the portfolio according to modern portfolio theory\n",
"### </summary>\n",
"### <meta name=\"tag\" content=\"using data\" />\n",
"### <meta name=\"tag\" content=\"using quantconnect\" />\n",
"### <meta name=\"tag\" content=\"trading and orders\" />\n",
"class MeanVarianceOptimizationAlgorithm(QCAlgorithmFramework):\n",
" '''Mean Variance Optimization algorithm.'''\n",
"\n",
" def Initialize(self):\n",
"\n",
" # Set requested data resolution\n",
" self.UniverseSettings.Resolution = Resolution.Minute\n",
"\n",
" self.SetStartDate(2013,10,7) #Set Start Date\n",
" self.SetEndDate(2013,10,11) #Set End Date\n",
" self.SetCash(100000) #Set Strategy Cash\n",
"\n",
" self.symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in [ 'AIG', 'BAC', 'IBM', 'SPY' ] ]\n",
"\n",
" self.minimum_weight = -1\n",
" self.maximum_weight = 1\n",
"\n",
" # set algorithm framework models\n",
" self.SetUniverseSelection(CoarseFundamentalUniverseSelectionModel(self.coarseSelector))\n",
" self.SetAlpha(HistoricalReturnsAlphaModel(resolution = Resolution.Daily))\n",
" self.SetPortfolioConstruction(MeanVarianceOptimizationPortfolioConstructionModel(optimization_method = self.maximum_sharpe_ratio))\n",
" self.SetExecution(ImmediateExecutionModel())\n",
" self.SetRiskManagement(NullRiskManagementModel())\n",
"\n",
" def coarseSelector(self, coarse):\n",
" # Drops SPY after the 8th\n",
" last = 3 if self.Time.day > 8 else len(self.symbols)\n",
"\n",
" return self.symbols[0:last]\n",
"\n",
" def OnOrderEvent(self, orderEvent):\n",
" if orderEvent.Status == OrderStatus.Filled:\n",
" self.Debug(orderEvent.ToString())\n",
"\n",
" def maximum_sharpe_ratio(self, returns):\n",
" '''Maximum Sharpe Ratio optimization method'''\n",
"\n",
" # Objective function\n",
" fun = lambda weights: -self.sharpe_ratio(returns, weights)\n",
"\n",
" # Constraint #1: The weights can be negative, which means investors can short a security.\n",
" constraints = [{'type': 'eq', 'fun': lambda w: np.sum(w) - 1}]\n",
"\n",
" size = returns.columns.size\n",
" x0 = np.array(size * [1. / size])\n",
" bounds = tuple((self.minimum_weight, self.maximum_weight) for x in range(size))\n",
"\n",
" opt = minimize(fun, # Objective function\n",
" x0, # Initial guess\n",
" method='SLSQP', # Optimization method: Sequential Least SQuares Programming\n",
" bounds = bounds, # Bounds for variables \n",
" constraints = constraints) # Constraints definition\n",
"\n",
" weights = pd.Series(opt['x'], index = returns.columns)\n",
" self.Log('{}:\\n\\r{}'.format(self.Time, weights))\n",
"\n",
" return opt, weights\n",
"\n",
" def sharpe_ratio(self, returns, weights):\n",
" annual_return = np.dot(np.matrix(returns.mean()), np.matrix(weights).T).item()\n",
" annual_volatility = np.sqrt(np.dot(weights.T, np.dot(returns.cov(), weights)))\n",
" return annual_return/annual_volatility\n"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.029411764705882353,
0.02702702702702703,
0.02127659574468085,
0.017543859649122806,
0.019230769230769232,
0.017543859649122806,
0.02040816326530612,
0.013333333333333334,
0.024390243902439025,
0,
0.07142857142857142,
0.024390243902439025,
0.02,
0.02127659574468085,
0.06666666666666667,
0.022222222222222223,
0.018867924528301886,
0.018867924528301886,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0.05454545454545454,
0.05660377358490566,
0.017241379310344827,
0,
0.042735042735042736,
0,
0,
0,
0,
0,
0.010416666666666666,
0.036585365853658534,
0.02158273381294964,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0.01020408163265306,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0.008849557522123894,
0.039473684210526314,
0.025974025974025976,
0,
0.031746031746031744,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011494252873563218,
0
] | 100 | 0.009838 | false |
# -*- coding: utf-8 -*-
"""
@author: Jörg Encke, Wilfried Hortschitz, Matthias Kahr, Veronika Schrenk
This class is able to connect to the Stanford Lock-In Amplifier SR830,
regardless what Gpib-Adress the Amplifier is set.
All major functionalities have been implemented in public methods.
Literature - References:
[1]
MODEL SR830 DSP Lock-In Amplifier - Manual
by Stanford Research Systems
Revision 2.5 (10/2011)
http://www.thinksrs.com/downloads/PDFs/Manuals/SR830m.pdf
"""
import time
import imp
import sys
import warnings
import subprocess
import numpy as np
DEBUG = False
DEVICE_NAME = "Stanford_Research_Systems,SR830"
class liaSR830():
def __lin_search_logic(self):
"""
This function is meant to be called from __init__ to automatically search
for the correct gpib-address in ubuntu
"""
try:
f, filename, descr = imp.find_module('Gpib')
Gpib_package = imp.load_module('Gpib', f, filename, descr)
f, filename, descr = imp.find_module('gpib')
gpib_package = imp.load_module('gpib', f, filename, descr)
gpib_available = True
except ImportError:
gpib_available = False
print('Gpib is not available')
if gpib_available:
print("searching for correct gpib-address...")
for x in range(1, 31):
try:
self.inst = Gpib_package.Gpib(0,x)
self.inst.clear();
self.inst.write('*idn?')
time.sleep(0.8)
print("Stanford_Research_System, SR830 on gpib-address " + str(x) + " detected!")
return True
break
except gpib_package.GpibError, e:
print(str(x) + " ...")
continue
return False
def __check_if_GPIB_USB_B_Adapter_linux(self):
"""
internal method
this method checks if the GPIB-USB-B-Adapter is used instead of the GPIB-USB-HS-Adapter.
if this condition is true the method loads all needed modules in Ubuntu
"""
a = []
a = subprocess.check_output('lsusb')
x = None
for i in a.split('\n'):
if 'GPIB-USB-B' in i:
x = i
break
if x is not None:
bus_number = x[4:7]
device_number = x[15:18]
subprocess.Popen('sudo fxload -D /dev/bus/usb/' + str(bus_number)+ '/' + str(device_number) +
' -I /lib/firmware/ni_usb_gpib/niusbb_firmware.hex -s /lib/firmware/ni_usb_gpib/niusbb_loader.hex',
shell=True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
def __init__(self):
'''
Automatically search for pre-set gpib-address from connected instrument
Check if the connected Intrument is compatible to this driver by
using check_instr-function
'''
found = False
if sys.platform.startswith('lin'):
self.__check_if_GPIB_USB_B_Adapter_linux();
found = self.__lin_search_logic();
if not found:
#print("Run \'sudo gpib_config\'")
subprocess.Popen('sudo gpib_config', shell=True, stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
print("Gpib-address could not be detected!")
print("Press F5...")
elif sys.platform.startswith('win'):
try:
f, filename, descr = imp.find_module('visa')
visa_package = imp.load_module('visa', f, filename, descr)
visa_available = True
except ImportError:
visa_available = False
print('Visa is not available')
if visa_available:
rm = visa_package.ResourceManager()
print("searching for correct gpib-address...")
for x in range(1, 31):
with warnings.catch_warnings(record = True) as w:
self.visa_instr = rm.open_resource('GPIB0::' + str(x) + '::INSTR')
if len(w):
print(str(x) + " ...")
continue
else:
print(str(x) + " ...")
if(self.check_instr()):
print "Stanford_Research_System, SR830 on gpib-address " + str(x) + " detected!"
found = True
break
if not found:
print("Gpib-address could not be detected!")
def check_instr(self):
'''
Check if the connected Intrument is compatible to this driver by
comparing the *IDN? answer to the string "Stanford_Research_Systems,SR830"
'''
if sys.platform.startswith('lin'):
self.inst.clear();
self.inst.write('*idn?')
time.sleep(0.2)
ident = self.inst.read(100)
self.inst.clear();
elif sys.platform.startswith('win'):
self.visa_instr.clear();
try:
ident = self.visa_instr.query("*IDN?")
time.sleep(3)
except:
ident = ""
self.visa_instr.clear();
if DEVICE_NAME in ident:
return True
else:
if DEBUG: print "DEBUG: Instrument "+ ident + " seems not to be Stanford_Research_Systems, SR830"
return False
def correct_phaseshift(self, phase):
"""
I have no idea what this method is supposed to do (-> untested)
"""
th=100
sig = lambda x: x < 0.
prev_sig = sig(phase[0])
prev_element = phase[0]
jump = 0
return_array = []
for element in phase:
save_element = element
if (sig(element) is not prev_sig
and abs(prev_element) > th
and abs(element) > th):
if jump: jump = 0
else: jump = -1 if prev_sig else 1
if jump:
save_element=element+ jump * 360
prev_element=element
prev_sig = sig(element)
return_array.append(save_element)
return return_array
def __GetSomething(self, cmdString):
"""
Internal function. The cmdString will be send to the instrument
to get a response.
(cmdString can be for example SENS?, FREQ?,... most likely something with a question mark)
"""
if sys.platform.startswith('win'):
self.visa_instr.clear();
resp = self.visa_instr.query(cmdString)
elif sys.platform.startswith('lin'):
self.inst.clear();
self.inst.write(cmdString)
resp = self.inst.read(100)
self.inst.clear();
if DEBUG:
print("command: " + cmdString + "; resp: " + str(resp))
return resp
def __SetSomething(self, cmdString, setValue):
"""
Internal function. The cmdString will be send to the instrument.
Use setValue to set specific Values on the instrument
(setValue can for example be the value of PHAS or FREQ,
when the cmdString contains "PHAS" or "FREQ")
"""
if sys.platform.startswith('win'):
self.visa_instr.write(cmdString + ' ' + str(setValue))
elif sys.platform.startswith('lin'):
self.inst.clear();
self.inst.write(cmdString + ' ' + str(setValue))
time.sleep(0.2)
self.inst.clear();
if DEBUG:
print("command: " + cmdString + ' ' + str(setValue))
def ConvertiToTimeconstant(self, i):
"""
converts the i-param needed for the OFLT?-command to the actual timeconstant-value
"""
options = {0 : 10e-6,
1 : 30e-6,
2 : 100e-6,
3 : 300e-6,
4 : 1e-3,
5 : 3e-3,
6 : 10e-3,
7 : 30e-3,
8 : 100e-3,
9 : 300e-3,
10: 1,
11: 3,
12: 10,
13: 30,
14: 100,
15: 300,
16: 1000,
17: 3000,
18: 10000,
19: 30000
}
try:
return options[i]
except:
raise Exception("ConvertiToTimeconstant: parameter i contains an invalid value")
def ConvertTimeconstantToi(self, timeconstant):
"""
converts the actual timeconstant-value to the i-param, needed for the OFLT-command
"""
options = {10e-6 : 0,
30e-6 : 1,
100e-6 : 2,
300e-6 : 3,
1e-3 : 4,
3e-3 : 5,
10e-3 : 6,
30e-3 : 7,
100e-3 : 8,
300e-3 : 9,
1 : 10,
3 : 11,
10 : 12,
30 : 13,
100 : 14,
300 :15,
1000 : 16,
3000 : 17,
10000 : 18,
30000 : 19
}
try:
return options[timeconstant]
except:
raise Exception("ConvertTimeconstantToi: parameter timeconstant contains an invalid value")
# by HoWil#############
def __SensitivityToVolt(self, n_In):
"""
Internal method
This function is meant to be called from .SetSensitivityLIA() to calculate
the sensitivity value out of the sensitivity settings on the lockIn
"""
# Dim m_In As Integer
m_In = n_In + 1;
voltValue = round(10**((m_In%3) / 3)) * (10**-9 * 10**np.floor(m_In / 3));
return voltValue
# end % function SensitivityToVolt
def SetSensitivityLIA(self, timeconstant = None):
"""
Automatically sets the best Sensitivity.
When the timeconstant is None the timeconstant set on the device
is being used. Attention: If this pre-set timeconstant is large, this could take awhile!
When the timecontant is not None, the timeconstant on the device is set to this timeconstant,
before the SetSensitivityLIA-Logic starts
"""
#Configure property value(s).
#set(obj, 'sens', 22.0);
bKorrekterBereich = 0;
Frequenz = self.getF();
T = 1/Frequenz
while bKorrekterBereich == 0:
if timeconstant == None:
i = self.GetTimeConst();
timeconstant = self.ConvertiToTimeconstant(i)
time.sleep(3 * timeconstant + T);
else:
i = self.ConvertTimeconstantToi(timeconstant);
self.SetTimeConst(i)
time.sleep(3 * timeconstant + T);
# end
# Query property value(s).
iSensitivityLIA = self.getSens(); # get the set sensitivity
R = self.getR();
#print " R = %f" %R
#print " iSensitivityLIA = %i" %iSensitivityLIA
vValue = self.__SensitivityToVolt(iSensitivityLIA);#!!!
#print " voltValue = %f" %voltValue
if R > vValue:
iSensitivityLIA = iSensitivityLIA + 1;
if iSensitivityLIA > 26 :
iSensitivityLIA = 26;
# end;
# Configure property value(s).
self.SetSens(iSensitivityLIA);
bKorrekterBereich = 0;
time.sleep(3 * timeconstant + 0.2 * T)
else:
#R = self.getR();
#vValue = self.__SensitivityToVolt(iSensitivityLIA);#!!!
if DEBUG: print str(vValue)
if R < 0.3 * vValue:
iSensitivityLIA = iSensitivityLIA - 1;
if iSensitivityLIA < 0:
iSensitivityLIA = 0;
# end;
if DEBUG: print("iSensitivityLIA: " + str(iSensitivityLIA))
self.SetSens(iSensitivityLIA);
bKorrekterBereich = 0;
time.sleep(3 * timeconstant + 0.2 * T)
else:
bKorrekterBereich = 1;
if DEBUG: print str(vValue)
return vValue
# end
# end
# end
# end # function SetSensitivityLIA
def SendString(self, CmdString):
"""
sends CmdString as a command to the instrument
"""
if DEBUG:
print("send string: " + CmdString)
if sys.platform.startswith('win'):
self.visa_instr.write(CmdString)
elif sys.platform.startswith('lin'):
self.inst.write(CmdString)
return
def getR(self):
"""
Query the value of R (3). Returns ASCII floating point value[1].
[additional information: other options would be: X (1), Y (2), θ (4)]
"""
R = self.__GetSomething('OUTP?3')
if DEBUG:
print("R: " + R)
return float(R)
def getPhi(self):
"""
Query the value of θ (4). Returns ASCII floating point value[1].
[additional information: other options would be: X (1), Y (2), R (3)]
"""
phi = self.__GetSomething('OUTP?4')
if DEBUG:
print("Phi: " + phi)
return float(phi)
def getSens(self):
"""
duplicate to method GetSens
The SENS? command queries the sensitivity[1].
i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA),
i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),
i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),
i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)
"""
i = self.__GetSomething('SENS?')
if DEBUG:
print("Sens: " + i)
return float(i)
def getF(self):
"""
duplicate to method GetRefFreq
The FREQ? query command will return the reference frequency
(in internal or external mode)[1].
"""
fr = self.__GetSomething('FREQ?')
if DEBUG:
print("F: " + fr)
return float(fr)
####################
#Instrument status
def SerialPollDiagnostic(self):
"""
I have no idea what this method is supposed to do (-> untested)
"""
resp = self.__GetSomething('*STB?')
SPB = int(resp) # SPB ...serial poll byte
ok = SPB & 1 | SPB & 2 | (not (SPB & 64)) #.....no command in progress
if (not ok):
SPBbit0 = SPB & 0 #no data is beeing acquired
SPBbit1 = SPB & 2 #no command execution in progress
SPBbit2 = SPB & 4 #unmasked bit in error status byte set
SPBbit3 = SPB & 8 #unmasked bit in LIA status byte set
SPBbit4 = SPB & 16 #!!!! the interface output buffer is not empty
SPBbit5 = SPB & 32 #unmasked bit in standard status byte set
SPBbit6 = SPB & 64 # SRQ has oThe FREQ? query command will return the reference frequency
SPBbit7 = SPB & 128 #not in use
if SPBbit2:
print 'unmasked bit in error status byte set'
ERRSbyte = self.__GetSomething('ERRS?')# may be subroutine call required
print 'error-status byte: ', ERRSbyte
if SPBbit3:
print 'unmasked bit in LIA status byte set'
LIASbyte = self.__GetSomething('LIAS?') # may be subroutine call required
print 'LIA-status byte: ', LIASbyte
if SPBbit4:
self.SendString('REST') # not shure that this will help
if SPBbit5:
ESRbyte = self.__GetSomething('*ESR?') # may be subroutine call required
print 'standard event-status byte: ', ESRbyte
if SPBbit6:
SPEbyte = self.__GetSomething('*SRE?') # may be subroutine call required
print 'SRQ occurred SP enable register value ', SPEbyte
return SPB
#reference settings
def SetRefRms(self,rms):
"""
The SLVL x command sets the amplitude of the sine output.
The parameter x is a voltage (real number of Volts). The value of x will
be rounded to 0.002V. The value of x is limited to 0.004 ≤ x ≤ 5.000[1].
"""
#if rms < 0.004 or rms > 5.0:
# raise Exception("SetRefRms: parameter rms can only be set to values from 0.004 to 5.0")
resp = self.__SetSomething('SLVL', rms)
return resp
def GetRefRms(self):
"""
The SLVL? command queries the amplitude of the sine output.
"""
rms = self.__GetSomething('SLVL?')
return float(rms)
def SetRefFreq(self,f):
"""
The FREQ f command sets the frequency of the internal oscillator. This
command is allowed only if the reference source is internal. The parame-
ter f is a frequency (real number of Hz). The value of f will be rounded to
5 digits or 0.0001 Hz, whichever is greater. The value of f is limited to
0.001 ≤ f ≤ 102000. If the harmonic number is greater than 1, then the
frequency is limited to nxf ≤ 102 kHz where n is the harmonic number[1].
"""
#if f < 0.001 or f > 102000:
# raise Exception("SetRefFreq: parameter f can only be set to values from 0.001 to 102000.")
resp = self.__SetSomething('FREQ', str(f))
return resp
def GetRefFreq(self):
"""
duplicate to method getF
The FREQ? query command will return the reference frequency
(in internal or external mode)[1].
"""
f = self.__GetSomething('Freq?')
return float(f)
def SetRefPhas(self, phase):
"""
The PHAS x command will set the phase shift to x.
The parameter x is the phase (real number of degrees).
The value of x will be rounded to 0.01°.
The phase may be programmed from -360.00 ≤ x ≤ 729.99 and will be
wrapped around at ±180°. For example, the PHAS 541.0 command will
set the phase to -179.00° (541-360=181=-179)[1].
"""
#if phase < -360.0 or phase > 729.99:
# raise Exception("SetRefPhas: parameter phase can only be set to values from -360.0 to 729.99")
resp = self.__SetSomething('PHAS', str(phase))
return resp
def GetRefPhas(self):
"""
The PHAS? command queries the reference phase shift[1].
"""
phase = self.__GetSomething('PHAS?')
return float(phase)
def SetRefMode(self,refmod):
"""
The FMOD i command sets the reference source. The parameter
i selects internal (i=1) or external (i=0)[1].
"""
if refmod not in (0,1):
raise Exception("SetRefMode: parameter refmode can only be set to 0 (=external) or 1(=internal)")
resp = self.__SetSomething('FMOD', str(refmod))
return resp
def __checkFractionalDigits(self, i, exception_text):
"""
internal method checks if there are other numbers than 0 among the fractional digits
"""
import decimal
if "." in str(i):
d = decimal.Decimal(i).as_tuple()
preDecimalPlaces = len(d.digits) + d.exponent
try:
fractionalDigits = int(str(i)[(preDecimalPlaces + 1):])
except:
raise Exception(exception_text)
if fractionalDigits != 0:
raise Exception(exception_text)
def GetRefMode(self):
"""
The FMOD? command queries the reference source[1].
refmod=0(≙external) or refmode=1(≙internal)
"""
refmod = self.__GetSomething('FMOD?')
return int(refmod)
def SetRefHarm(self,harm):
"""
The HARM i command sets the detection harmonic. This
parameter is an integer from 1 to 19999. The HARM i command will set
the lock-in to detect at the i th harmonic of the reference frequency. The
value of i is limited by ixf ≤ 102 kHz. If the value of i requires a detection
frequency greater than 102 kHz, then the harmonic number will be set to
the largest value of i such that ixf ≤ 102 kHz[1].
"""
#if harm < 1 or harm > 19999:
# raise Exception("harm can only be set to values from 1 to 19999")
exception_text = "SetRefHarm: parameter harm has to be int or long from 1 to 19999"
self.__checkFractionalDigits(harm, exception_text);
try:
harm = int(harm)
except:
raise Exception(exception_text)
if not isinstance( harm, ( int, long ) ):
raise Exception(exception_text)
resp = self.__SetSomething('HARM', str(harm))
return resp
def GetRefHarm(self):
"""
The HARM? command queries the detection harmonic[1].
"""
harm = self.__GetSomething('HARM?')
return int(harm)
#input and filter
def SetInputConfig(self,iconf):
"""
The ISRC command sets the input configuration. The parameter
i selects A (i=0), A-B (i=1), I (1 MΩ) (i=2) or I (100 MΩ) (i=3).
Changing the current gain does not change the instrument sensitivity.
Sensitivities above 10 nA require a current gain of 1 MΩ. Sensitivities
between 20 nA and 1 μA automatically select the 1 MΩ current gain. At
sensitivities below 20 nA, changing the sensitivity does not change the
current gain[1].
"""
if iconf not in (0, 1, 2, 3):
raise Exception("SetInputConfig: parameter iconf can only be set to value from 0 to 3\nA (iconf=0), A-B (iconf=1), I (1 MΩ) (iconf=2) or I (100 MΩ) (iconf=3)")
resp = self.__SetSomething('ISRC', str(iconf))
return resp
def GetInputConfig(self):
"""
The ISRC? command queries the input configuration[1].
iconf=0 (≙A), iconf=1(≙A-B), iconf=2 (≙I(1 MΩ)) or iconf=3(≙I(100 MΩ))
"""
iconf = self.__GetSomething('ISRC?')
return int(iconf)
def SetGNDConfig(self, gndconf):
"""
The IGND command queries the input shield grounding[1]. The
parameter gndconf selects Float (gndconf=0) or Ground (gndconf=1).
"""
if gndconf not in (0,1):
raise Exception("SetGNDConfig: parameter gndconf can only be 0(≙Float) or 1(≙Ground)")
self.__SetSomething('IGND', gndconf)
def GetGNDConfig(self):
"""
The IGND? command queries the input shield grounding[1]. The
gndconf=0(≙Float) or gndconf=1(≙Ground)
"""
gndconf = self.__GetSomething('IGND?')
return int(gndconf)
def SetInputCoupling(self,icoup):
"""
The ICPL i command sets the input coupling.
The parameter i selects AC (i=0) or DC (i=1)[1].
"""
if icoup not in (0,1):
raise Exception("SetInputCoupling: parameter icoup can only be 0(≙AC) or 1(≙DC)")
resp = self.__SetSomething('ICPL', icoup)
return resp
def GetInputCoupling(self):
"""
The ICPL? command queries the input coupling[1].
icoup=0(≙AC) or icoup=1(≙DC)
"""
icoup = self.__GetSomething('ICPL?')
return int(icoup)
def SetLineNotch(self, linotch):
"""
The ILIN i command sets the input line notch filter status. The
parameter i selects Out or no filters (i=0), Line notch in (i=1), 2xLine
notch in (i=2) or Both notch filters in (i=3)[1].
"""
if linotch not in (0,1,2,3):
raise Exception("SetLineNotch: parameter linotch can only be set to 0(≙Out or no filters), 1(≙Line notch in), 2(≙2xLine notch in) or 3(≙Both notch filters in)")
self.__SetSomething('ILIN', str(linotch))
def GetLineNotch(self):
"""
The ILIN? command queries the input line notch filter status[1].
"""
linotch = self.__GetSomething('ILIN?')
return int(linotch)
def SetSens(self, i):
"""
The SENS command sets the sensitivity[1].
i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA),
i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),
i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),
i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)
"""
exception_text = "SetSens: parameter i can only be set to int or long values from 0 to 26\n";
exception_text += "i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA), "
exception_text += "i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA), "
exception_text += "i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA), "
exception_text += "i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)"
self.__checkFractionalDigits(i, exception_text);
try:
i = int(i)
except:
raise Exception(exception_text)
if i < 0 or i > 26 or not(isinstance( i, ( int, long ) )):
raise Exception(exception_text)
self.__SetSomething('SENS', i)
def GetSens(self):
"""
duplicate to method getSens
The SENS? command queries the sensitivity[1].
i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA),
i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),
i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),
i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)
"""
R = self.__GetSomething('SENS?')
return int(R)
def SetReserve(self, reserve):
"""
The RMOD i command sets the reserve mode. The parameter i
selects High Reserve (i=0), Normal (i=1) or Low Noise (minimum) (i=2).
See in the manual-description of the [Reserve] key for the actual reserves for each
sensitivity[1].
"""
if reserve not in (0,1,2):
raise Exception("SetReserve: parameter reserve can only be set to the values 0(≙High Reserve), 1(≙Normal) or 2(≙Low Noise)")
self.__SetSomething('RMOD', str(reserve))
def GetReserve(self):
"""
The RMOD? command queries the reserve mode[1].
reserve=0(≙High Reserve), reserve=1(≙Normal) or reserve=2(≙Low Noise)
"""
reserve = self.__GetSomething('RMOD?')
return int(reserve)
def SetTimeConst(self,i):
"""
The OFLT i command sets the time constant[1].
i=0(≙10 μs), i=1(≙30 μs), i=2(≙100 μs), i=3(≙300 μs), i=4(≙1 ms), i=5(≙3 ms), i=6(≙10 ms),
i=7(≙30 ms), i=8(≙100 ms), i=9(≙300 ms), i=10(≙1 s), i=11(≙3 s), i=12(≙10 s), i=13(≙30 s),
i=14(≙100 s), i=15(≙300 s), i=16(≙1 ks), i=17(≙3 ks), i=18(≙10 ks), i=19(≙30 ks)
use the method self.ConvertTimeconstantToi to convert your timeconstant to the needed parameter for this method
Time constants greater than 30s may NOT be set if the harmonic x ref. frequency (detection frequency) exceeds 200 Hz.
Time constants shorter than the minimum time constant (based upon the filter slope and dynamic reserve) will set the
time constant to the minimum allowed time constant[1]. See the Gain and Time Constant operation section in the manual.
"""
exception_text = "SetTimeConst: parameter i can only be set to values from 0 to 19\n"
exception_text += "i=0(≙10 μs), i=1(≙30 μs), i=2(≙100 μs), i=3(≙300 μs), i=4(≙1 ms), i=5(≙3 ms), i=6(≙10 ms), "
exception_text += "i=7(≙30 ms), i=8(≙100 ms), i=9(≙300 ms), i=10(≙1 s), i=11(≙3 s), i=12(≙10 s), i=13(≙30 s), "
exception_text += "i=14(≙100 s), i=15(≙300 s), i=16(≙1 ks), i=17(≙3 ks), i=18(≙10 ks), i=19(≙30 ks)"
self.__checkFractionalDigits(i, exception_text);
try:
i = int(i)
except:
raise Exception(exception_text)
if i < 0 or i > 19 or not(isinstance( i, ( int, long ) )):
raise Exception(exception_text)
self.__SetSomething('OFLT', i)
def GetTimeConst(self):
"""
The OFLT? command queries the time constant[1].
use the method self.ConvertiToTimeconstant to convert the return-value of this method to the actual timeconstant
"""
tc = self.__GetSomething('OFLT?')
# 1e-5 * 10**np.floor(int(tc)/2) * (1+2*(int(tc)%2)) #numerischer Wert
return int(tc)
def SetSlope(self,slope):
"""
The OFSL i command setsthe low pass filter slope. The
parameter slope selects 6 dB/oct (slope=0), 12 dB/oct (slope=1), 18 dB/oct (slope=2) or
24 dB/oct (slope=3)[1].
"""
exception_text = "SetSlope: parameter slope can only be set to the values 0(≙6 dB/oct), 1(≙12 dB/oct), 2(≙18 dB/oct) or 3(≙24 dB/oct)."
self.__checkFractionalDigits(slope, exception_text);
try:
slope = int(slope)
except:
raise Exception(exception_text)
if slope < 0 or slope > 3 or not(isinstance( slope, ( int, long ) )):
raise Exception(exception_text)
self.__SetSomething('OFSL', slope)
def GetSlope(self):
"""
The OFSL? command queries the low pass filter slope[1].
slope=0(≙6 dB/oct), slope=1(≙12 dB/oct), slope=2(≙18 dB/oct) or
slope=3(≙24 dB/oct)
"""
slope = self.__GetSomething('OFSL?')
return int(slope)
def SetSyncFilter(self, sync):
"""
The SYNC i command sets the synchronous filter status. The
parameter i selects Off (i=0) or synchronous filtering below 200 Hz (i=1).
Synchronous filtering is turned on only if the detection frequency (refer-
ence x harmonic number) is less than 200 Hz[1].
"""
exception_text = "SetSyncFilter: parameter sync can only be set to 0(≙Off) or 1(≙synchronous filtering below 200 Hz)."
self.__checkFractionalDigits(sync, exception_text);
try:
sync = int(sync)
except:
raise Exception(exception_text)
if sync < 0 or sync > 1 or not(isinstance( sync, ( int, long ) )):
raise Exception(exception_text)
self.__SetSomething('SYNC', sync)
def GetSyncFilter(self):
"""
The SYNC? command queries the synchronous filter status[1].
sync=0(≙Off) or sync=1(≙synchronous filtering below 200 Hz).
"""
sync = self.__GetSomething('SYNC?')
return int(sync)
def SetDisplay(self, channel, j, ratio=0):
"""
The DDEF i, j, k command selects the CH1 and CH2 displays. The parameter
channel selects CH1 (channel=1) or CH2 (channel=2) and is required.
This command sets channel i to parameter j with ratio k as listed below.
CH1 (i=1) 4 CH2 (i=2)
j display j display
0 X 0 Y
1 R 1 θ
2 X Noise 2 Y Noise
3 Aux In 1 3 Aux In 3
4 Aux In 2 4 Aux In 4
k ratio k ratio
0 none 0 none
1 Aux In 1 1 Aux In 3
2 Aux In 2 2 Aux In 4
[1]
"""
ch = str(channel)
k = str(j)
rat = str(ratio)
Cmd = 'DDEF'+ ch + ',' + k + ',' + rat
self.SendString(Cmd)
return
def GetDisplay(self, channel = 1):
"""
The DDEF? i command queries the display and ratio of display i. The
returned string contains both j and k separated by a comma. For exam-
ple, if the DDEF? 1 command returns "1,0" then the CH1 display is R
with no ratio[1].
"""
resp = self.__GetSomething('DDEF? ' + str(channel));
[j,ratio] = resp.rsplit(',')
return [j,ratio]
def SetInterface(self, GPIB = True, RS232 =False):
"""
The OUTX i command sets the output interface to RS232 (i=0) or GPIB(i=1).
The OUTX i command should be sent before any query com-
mands to direct the responses to the interface in use[1].
"""
if GPIB:
Cmd = 'OUTX 1'#sets te output interface to GPIB
else:
Cmd = 'OUTX 0'#sets the output interface to RS232
self.SendString(Cmd)
return
def GetInterface(self, GPIB = False, RS232 =False):
"""
The OUTX? command queries the interface[1].
Interface=0(≙RS232) or Interface=1(≙GPIB).
"""
Ifc = self.__GetSomething('OUTX?')
if int(Ifc) == 1 :
Interface = 'GPIB'
else:
Interface = 'RS232'
return int(Ifc), Interface
def SetDisableRemoteLockoutState(self, On = True):
"""
In general, every GPIB interface command will put the SR830 into the
REMOTE state with the front panel deactivated. To defeat this feature,
use the OVRM 1 command to overide the GPIB remote. In this mode, the
front panel is not locked out when the unit is in the REMOTE state. The
OVRM 0 command returns the unit to normal remote operation[1].
"""
if On:
Cmd = 'OVRM 1' #Front panel is not locked out
else:
Cmd = 'OVRM 0' #Front panel is locked out
self.SendString(Cmd)
return
def SetKlickOn(self, On=False):
"""
The KCLK i command sets the key click On (i=1) or Off (i=0) state[1].
"""
if On:
Cmd = 'KCLK 1'
else:
Cmd = 'KCLK 0'
self.SendString(Cmd)
return
def GetKlickOn(self,On=False):
"""
The KCLK i command queries the key[1].
"""
KlickOn = self.__GetSomething('KCLK?')
return int(KlickOn)
def SetAlarm(self, On=False):
"""
The ALRM i command sets the alarm On (i=1) or Off (i=0) state[1].
"""
if On:
Cmd = 'ALRM 1'
else:
Cmd = 'ALRM 0'
self.SendString(Cmd)
return
def GetAlarm(self,On=False):
"""
The ALRM? command queries the alarm[1]
Alarm=1(≙On) or Alarm=0(≙Off).
"""
Alarm = self.__GetSomething('ALRM?')
return int(Alarm)
def SaveSettings(self, BufferAddress = 1):
"""
The SSET i command saves the lock-in setup in setting buffer i (1<i<9).
The setting buffers are retained when the power is turned off[1].
"""
self.__SetSomething('SSET', BufferAddress)
def ReactivateSettings(self, BufferAddress = 1):
"""
The RSET i command recalls the lock-in setup from setting buffer i
(1≤i≤9). Interface parameters are not changed when a setting buffer is
recalled with the RSET command. If setting i has not been saved prior to
the RSET i command, then an error will result[1].
"""
self.__SetSomething('RSET', BufferAddress)
def SetAutoGain(self):
"""
The AGAN command performs the Auto Gain function. This command is
the same as pressing the [Auto Gain] key. Auto Gain may take some
time if the time constant is long. AGAN does nothing if the time constant
is greater than 1 second. Check the command execution in progress bit
in the Serial Poll Status Byte (bit 1) to determine when the function is
finished[1].
"""
cmd = 'AGAN'
self.SendString(cmd)
return
def SetFrontOutputSource(self, which = None, Type = None):
"""
The FPOP i,j command sets the front panel (CH1 and CH2) output sources.
The parameter i selects CH1 (i=1) or CH2 (i=2) and is required.
The FPOP i, j command sets output i to quantity j where j is
listed below.
CH1 (i=1) 4 CH2 (i=2)
j output quantity j output quantity
0 CH 1 Display 0 CH 2 Display
1 X 1 Y
[1]
"""
cmd = 'FPOP ' + str(which) + ',' + str(Type)
self.SendString(cmd)
def GetFrontOutputSource(self, which= None):
"""
The FPOP? command queries the front panel (CH1 and CH2) output sources[1].
"""
resp = self.__GetSomething('FPOP?' + str(which))
if str(resp)==0:
Type = 'Display Channel '+ str(which)
else:
if which == 1:
Type = 'X'
else:
Type = 'Y'
return Type
def GetOutputOffsetAndExpand(self, i):
"""
The OEXP? i command queries the output offsets and expand of quantity i.
The parameter i selects X (i=1), Y (i=2) or R (i=3) and is required.
The returned string contains both the offset and
expand separated by a comma. For example, if the OEXP? 2 command
returns "50.00,1" then the Y offset is 50.00% and the Y expand is 10.
Setting an offset to zero turns the offset off. Querying an offset which is
off will return 0% for the offset value[1].
"""
exception_text = "GetOutputOffsetAndExpand: parameter i can only be 1(≙X), 2(≙Y) or 3(≙R)"
self.__checkFractionalDigits(i, exception_text);
try:
i = int(i)
except:
raise Exception(exception_text)
if i < 1 or i > 3 or not(isinstance( i, ( int, long ) )):
raise Exception(exception_text)
Type = ['X','Y','R']
cmd = 'OEXP? '+ str(i)
resp = self.__GetSomething(cmd)
[offset, expand] = resp.rsplit(',')
return Type[i-1], offset, expand
def SetOutputOffsetAndExpand(self, Param, Offset, Expand):
"""
The OEXP i, x, j command will set the offset and expand for quantity i.
This command requires BOTH x and j.
The parameter i selects X (i=1), Y (i=2) or R (i=3) and is required. The
parameter x is the offset in percent (-105.00 ≤ x ≤ 105.00). The parame-
ter j selects no expand (j=0), expand by 10 (j=1) or 100 (j=2)[1].
"""
cmd = 'OEXP ' + str(Param)+ ',' + str(Offset) + ',' + str(Expand)
self.SendString(cmd)
def SetAutoReserve(self):
"""
The ARSV command performs the Auto Reserve function. This com-
mand is the same as pressing the [Auto Reserve] key. Auto Reserve
may take some time. Check the command execution in progress bit in
the Serial Poll Status Byte (bit 1) to determine when the function is
finished[1].
"""
cmd = 'ARSV'
self.SendString(cmd)
def SetAutoPhase(self):
"""
The APHS command performs the Auto Phase function. This command
is the same as pressing the [Auto Phase] key. The outputs will take many
time constants to reach their new values. Do not send the APHS com-
mand again without waiting the appropriate amount of time. If the phase
is unstable, then APHS will do nothing. Query the new value of the phase
shift to see if APHS changed the phase shift[1].
"""
cmd = 'APHS'
self.SendString(cmd)
def SetAutoOffset(self, which):
"""
The AOFF i command automatically offsets X (i=1), Y (i=2) or R (i=3) to
zero. The parameter i is required. This command is equivalent to press-
ing the [Auto Offset] keys[1].
"""
exception_text = "SetAutoOffset: parameter which can only be 1(≙X), 2(≙Y) or 3(≙R)"
self.__checkFractionalDigits(which, exception_text);
try:
which = int(which)
except:
raise Exception(exception_text)
if which < 1 or which > 3 or not(isinstance( which, ( int, long ) )):
raise Exception(exception_text)
self.__SetSomething('AOFF', which)
def SetDataSampleRate(self, rate = 4):
"""
The SRAT i command sets the data sample rate. The parame-
ter i selects the sample rate listed below.
i quantity i quantity
0 62.5 mHz 8 16 Hz
1 125 mHz 9 32 Hz
2 250 mHz 10 64 Hz
3 500 mHz 11 128 Hz
4 1 Hz 12 256 Hz
5 2 Hz 13 512 Hz
6 4 Hz 14 Trigger
7 8 Hz
[1]
"""
self.__SetSomething('SRAT', rate)
def GetDataSampleRate(self, rate = None):
"""
The SRAT? command queries the data sample rate[1].
"""
Rate = self.__GetSomething('SRAT?')
return int(Rate)
def SetEndOfBuffer(self, kind =None):
"""
The SEND i command sets the end of buffer mode. The param-
eter i selects 1 Shot (i=0) or Loop (i=1). If Loop mode is used, make sure
to pause data storage before reading the data to avoid confusion about
which point is the most recent[1].
"""
if kind not in (0,1):
raise Exception("SetEndOfBuffer: parameter kind can only be 0(≙Shot) or 1(≙Loop)")
self.__SetSomething('SEND', kind)
def GetEndOfBuffer(self, kind = None):
"""
The SEND? command queries the end of buffer mode[1].
"""
Kind = self.__GetSomething('SEND?')
return Kind
def Trigger(self):
"""
The TRIG command is the software trigger command. This command
has the same effect as a trigger at the rear panel trigger input[1].
"""
self.SendString('TRIG')
def SetTriggerStartMode(self, kind):
"""
The TSTR i command sets the trigger start mode. The parameter
i=1 selects trigger starts the scan and i=0 turns the trigger start feature off.
"""
if kind not in (0,1):
raise Exception("SetTriggerStartMode: parameter kind can only be 0(≙trigger starts the scan) or 1(≙turns the trigger start feature off)")
self.__SetSomething('TSTR', kind)
def GetTriggerStartMode(self):
"""
The TSTR? command queries the trigger start mode[1].
"""
Kind = self.__GetSomething('TSTR?')
return int(Kind)
def Start(self):
"""
The STRT command starts or resumes data storage. STRT is ignored if
storage is already in progress[1].
"""
self.SendString('STRT')
def Pause(self):
"""
The PAUS command pauses data storage. If storage is already paused
or reset then this command is ignored[1].
"""
self.SendString('PAUS')
def SetTriggerSlope(self, value):
"""
The RSLP command sets the reference trigger when using the
external reference mode. The parameter i selects sine zero crossing
(i=0), TTL rising edge (i=1), , or TTL falling edge (i=2). At frequencies
below 1 Hz, the a TTL reference must be used[1].
"""
if value not in (0,1,2):
raise Exception("SetTriggerSlope: parameter value can only be 0(≙sine zero crossing), 1(≙TTL rising edge/Pos edge) or 2(≙TTL falling edge/neg edge)")
snd = "RSLP%i" % value
self.SendString(snd)
def iToSlope(self, i):
"""
converts the response returned by GetTriggerSlope to the actual slope
"""
options = {0 : 'Sine',
1 : 'Pos edge',
2 : 'neg edge'
}
return options[int(i.strip())]
def GetTriggerSlope(self):
"""
The RSLP? command queries the reference trigger when using the
external reference mode.
use the method self.iToSlope to convert the response of this method to the actual slope
"""
resp = self.__GetSomething('RSLP?');
return resp
def Reset(self):
"""
Reset the unit to its default configurations[1].
"""
self.SendString('*RST')
def ResetDataBuffers(self):
"""
The REST command resets the data buffers. The REST command can
be sent at any time - any storage in progress, paused or not, will be
reset. This command will erase the data buffer[1].
"""
self.SendString('REST')
def GetSelectedOutput(self, which):
"""
The OUTP? i command reads the value of X, Y, R or θ. The parameter
i selects X (i=1), Y (i=2), R (i=3) or θ (i=4). Values are returned as ASCII
floating point numbers with units of Volts or degrees. For example, the
response might be "-1.01026". This command is a query only command[1].
"""
if which not in (1,2,3,4):
raise Exception("GetSelectedOutput: parameter which can only be 1(≙X),2(≙Y),3(≙R) or 4(≙θ)")
Value = self.__GetSomething('OUTP?' + str(which))
if which == 1:
Type = 'X'
elif which == 2:
Type = 'Y'
elif which == 3:
Type = 'R'
elif which == 4:
Type = 'θ'
return [float(Value), Type]
def GetSelectedDisplayValue(self, which):
"""
The OUTR? i command reads the value of the CH1 or CH2 display.
The parameter i selects the display (i=1 or 2). Values are returned as
ASCII floating point numbers with units of the display. For example, the
response might be "-1.01026". This command is a query only command[1].
"""
if which not in (1, 2):
raise Exception("GetSelectedDisplayValue: parameter which can only be 1(≙CH1) or 2(≙CH2)")
Value = self.__GetSomething('OUTR?' + str(which))
time.sleep(0.2);
resp = float(Value)
if DEBUG:
print("GetSelectedDisplayValue: " + Value)
return resp
def __check_snap(self, param):
"""
internal function used by method SNAP
ensures that the SNAP-params are correct
"""
if param not in (1,2,3,4,5,6,7,8,9,10,11):
raise Exception("SNAP: Parameters can only be 1(≙X), 2(≙Y), 3(≙R), 4(≙θ), 5(≙Aux In 1), 6(≙Aux In 2), 7(≙Aux In 3), 8(≙Aux In 4), 9(≙Reference Frequency), 10(≙CH1 display) or 11(≙CH2 display)")
def SNAP(self,Param1,Param2,Param3=None,Param4 =None,Param5=None,Param6=None):
"""
The SNAP? command records the values of either 2, 3, 4, 5 or 6 param-
eters at a single instant. For example, SNAP? is a way to query values of
X and Y (or R and θ) which are taken at the same time. This is important
when the time constant is very short. Using the OUTP? or OUTR? com-
mands will result in time delays, which may be greater than the time con-
stant, between reading X and Y (or R and θ).
The SNAP? command requires at least two parameters and at most six
parameters. The parameters i, j, k, l, m, n select the parameters below.
i,j,k,l,m,n parameter
1 X
2 Y
3 R
4 θ
5 Aux In 1
6 Aux In 2
7 Aux In 3
8 Aux In 4
9 Reference Frequency
10 CH1 display
11 CH2 display
The requested values are returned in a single string with the values sep-
arated by commas and in the order in which they were requested. For
example, the SNAP?1,2,9,5 will return the values of X, Y, Freq and
Aux In 1. These values will be returned in a single string such as
"0.951359,0.0253297,1000.00,1.234".
The first value is X, the second is Y, the third is f, and the fourth is
Aux In 1.
The values of X and Y are recorded at a single instant. The values of R
and θ are also recorded at a single instant. Thus reading X,Y OR R,θ
yields a coherent snapshot of the output signal. If X,Y,R and θ are all
read, then the values of X,Y are recorded approximately 10μs apart from
R,θ. Thus, the values of X and Y may not yield the exact values of R and
θ from a single SNAP? query.
The values of the Aux Inputs may have an uncertainty of up to 32μs. The
frequency is computed only every other period or 40 ms, whichever is
longer.
The SNAP? command is a query only command. The SNAP? command
is used to record various parameters simultaneously, not to transfer data
quickly[1].
"""
self.__check_snap(Param1);
self.__check_snap(Param2);
Cmdstr = 'SNAP?' + ' '+ str(Param1) + ','+ str(Param2);
if Param3 != None:
self.__check_snap(Param3);
Cmdstr += ','+ str(Param3);
if Param4 != None:
self.__check_snap(Param4);
Cmdstr += ','+ str(Param4);
if Param5 != None:
self.__check_snap(Param5);
Cmdstr += ','+ str(Param5);
if Param6 != None:
self.__check_snap(Param6);
Cmdstr += ','+ str(Param6);
resp = self.__GetSomething(Cmdstr);
if Param3 is None: # no value, just the command string to query
Val6 = None; Val5 = None; Val4 = None; Val3 = None
[Val1,Val2] = resp.rsplit(',')
elif Param4 is None:
Val6 = None; Val5 =None; Val4 = None
[Val1,Val2,Val3]= resp.rsplit(',')
elif Param5 is None:
Val6 = None; Val5 = None;
[Val1,Val2,Val3,Val4]= resp.rsplit(',')
elif Param6 is None:
Val6 = None
[Val1,Val2,Val3,Val4,Val5]= resp.rsplit(',')
else:
[Val1,Val2,Val3,Val4,Val5, Val6]= resp.rsplit(',')
return Val1, Val2, Val3, Val4, Val5, Val6, Param1, Param2, Param3, \
Param4, Param5, Param6
def GetAuxValue(self, number):
"""
The OAUX? command reads the Aux Input values. The parameter i
selects an Aux Input (1, 2, 3 or 4) and is required. The Aux Input voltages
are returned as ASCII strings with units of Volts. The resolution is
1/3 mV. This command is a query only command[1].
"""
if number not in (1,2,3,4):
raise Exception("GetAuxValue: parameter number can only be 1(≙Aux Input 1), 2(≙Aux Input 2), 3(≙Aux Input 3) or 4(≙Aux Input 4)")
OutAux = self.__GetSomething('OAUX?' + str(number))
return float(OutAux), number
def GetOccupiedBuffer(self):
"""
The SPTS? command queries the number of points stored in the buffer.
Both displays have the same number of points. If the buffer is reset, then
0 is returned. Remember, SPTS? returns N where N is the number of
points - the points are numbered from 0 (oldest) to N-1 (most recent).
The SPTS? command can be sent at any time, even while storage is in
progress. This command is a query only command[1].
"""
n = self.__GetSomething('SPTS?')
return int(n)
# commented by WilHo, because this method uses GetOccupiedBuffer with parameter 'which',
# but SPTS? is a query only command for further information see the programming manual
# def GetChannelBufferPoints(self,which,length):
# if which not in (1,2):
# raise Exception("which has to be 1 or 2")
# if length <= 0:
# raise Exception("Length hast to be >= 0")
# length = int(self.GetOccupiedBuffer(which)) - 1
## DataBuffer = [((0:length)];
# DataBuffer = []
# for j in range(0,length):
# cmd = 'TRCA? '+str(which)+',' + str(j) + ',1'
# DataBuffer[j] = self.SetOrCheckSomething(cmd, None,0, length, False)
# return DataBuffer[:]
def close(self):
'''
Close the connection to the Instrument, return controle to instruments
controles and switch off output
'''
if sys.platform.startswith('win'):
self.visa_instr.close()
elif sys.platform.startswith('lin'):
self.inst.clear() #close() not implemented in Gpib.py
OUT_CLASS = liaSR830 | [
"# -*- coding: utf-8 -*-\r\n",
"\"\"\"\r\n",
"@author: Jörg Encke, Wilfried Hortschitz, Matthias Kahr, Veronika Schrenk\r\n",
"\r\n",
"This class is able to connect to the Stanford Lock-In Amplifier SR830, \r\n",
"regardless what Gpib-Adress the Amplifier is set.\r\n",
"All major functionalities have been implemented in public methods.\r\n",
"\r\n",
"Literature - References:\r\n",
"[1]\r\n",
"MODEL SR830 DSP Lock-In Amplifier - Manual\r\n",
"by Stanford Research Systems\r\n",
"Revision 2.5 (10/2011)\r\n",
"http://www.thinksrs.com/downloads/PDFs/Manuals/SR830m.pdf\r\n",
"\"\"\"\r\n",
"import time\r\n",
"import imp\r\n",
"import sys\r\n",
"import warnings\r\n",
"import subprocess\r\n",
"import numpy as np\r\n",
"\r\n",
"DEBUG = False\r\n",
"DEVICE_NAME = \"Stanford_Research_Systems,SR830\"\r\n",
"\r\n",
"class liaSR830():\r\n",
" \r\n",
" def __lin_search_logic(self):\r\n",
" \"\"\"\r\n",
" This function is meant to be called from __init__ to automatically search\r\n",
" for the correct gpib-address in ubuntu\r\n",
" \"\"\"\r\n",
" try:\r\n",
" f, filename, descr = imp.find_module('Gpib')\r\n",
" Gpib_package = imp.load_module('Gpib', f, filename, descr)\r\n",
" f, filename, descr = imp.find_module('gpib')\r\n",
" gpib_package = imp.load_module('gpib', f, filename, descr) \r\n",
" gpib_available = True\r\n",
" except ImportError:\r\n",
" gpib_available = False\r\n",
" print('Gpib is not available')\r\n",
" if gpib_available:\r\n",
" print(\"searching for correct gpib-address...\")\r\n",
" for x in range(1, 31):\r\n",
" try:\r\n",
" self.inst = Gpib_package.Gpib(0,x)\r\n",
" self.inst.clear();\r\n",
" self.inst.write('*idn?')\r\n",
" time.sleep(0.8)\r\n",
" print(\"Stanford_Research_System, SR830 on gpib-address \" + str(x) + \" detected!\")\r\n",
" return True\r\n",
" break\r\n",
" except gpib_package.GpibError, e:\r\n",
" print(str(x) + \" ...\")\r\n",
" continue\r\n",
" return False\r\n",
" \r\n",
" def __check_if_GPIB_USB_B_Adapter_linux(self):\r\n",
" \"\"\"\r\n",
" internal method\r\n",
" this method checks if the GPIB-USB-B-Adapter is used instead of the GPIB-USB-HS-Adapter.\r\n",
" if this condition is true the method loads all needed modules in Ubuntu \r\n",
" \"\"\"\r\n",
" a = []\r\n",
" a = subprocess.check_output('lsusb')\r\n",
" x = None\r\n",
" for i in a.split('\\n'):\r\n",
" if 'GPIB-USB-B' in i:\r\n",
" x = i\r\n",
" break\r\n",
" if x is not None:\r\n",
" bus_number = x[4:7]\r\n",
" device_number = x[15:18]\r\n",
" subprocess.Popen('sudo fxload -D /dev/bus/usb/' + str(bus_number)+ '/' + str(device_number) + \r\n",
" ' -I /lib/firmware/ni_usb_gpib/niusbb_firmware.hex -s /lib/firmware/ni_usb_gpib/niusbb_loader.hex', \r\n",
" shell=True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\r\n",
" \r\n",
" def __init__(self):\r\n",
" '''\r\n",
" Automatically search for pre-set gpib-address from connected instrument\r\n",
" Check if the connected Intrument is compatible to this driver by\r\n",
" using check_instr-function \r\n",
" '''\r\n",
" found = False\r\n",
" \r\n",
" if sys.platform.startswith('lin'):\r\n",
" self.__check_if_GPIB_USB_B_Adapter_linux();\r\n",
" found = self.__lin_search_logic();\r\n",
" if not found:\r\n",
" #print(\"Run \\'sudo gpib_config\\'\") \r\n",
" subprocess.Popen('sudo gpib_config', shell=True, stdin = subprocess.PIPE, \r\n",
" stdout = subprocess.PIPE,\r\n",
" stderr = subprocess.PIPE)\r\n",
" print(\"Gpib-address could not be detected!\")\r\n",
" print(\"Press F5...\")\r\n",
" \r\n",
" elif sys.platform.startswith('win'):\r\n",
" try:\r\n",
" f, filename, descr = imp.find_module('visa')\r\n",
" visa_package = imp.load_module('visa', f, filename, descr)\r\n",
" visa_available = True\r\n",
" except ImportError:\r\n",
" visa_available = False\r\n",
" print('Visa is not available')\r\n",
" if visa_available:\r\n",
" rm = visa_package.ResourceManager()\r\n",
"\r\n",
" print(\"searching for correct gpib-address...\")\r\n",
" for x in range(1, 31):\r\n",
" with warnings.catch_warnings(record = True) as w:\r\n",
" self.visa_instr = rm.open_resource('GPIB0::' + str(x) + '::INSTR')\r\n",
" if len(w):\r\n",
" print(str(x) + \" ...\")\r\n",
" continue\r\n",
" else:\r\n",
" print(str(x) + \" ...\")\r\n",
" if(self.check_instr()):\r\n",
" print \"Stanford_Research_System, SR830 on gpib-address \" + str(x) + \" detected!\"\r\n",
" found = True \r\n",
" break\r\n",
" if not found:\r\n",
" print(\"Gpib-address could not be detected!\")\r\n",
" \r\n",
" def check_instr(self):\r\n",
" '''\r\n",
" Check if the connected Intrument is compatible to this driver by\r\n",
" comparing the *IDN? answer to the string \"Stanford_Research_Systems,SR830\" \r\n",
" '''\r\n",
" if sys.platform.startswith('lin'):\r\n",
" self.inst.clear();\r\n",
" self.inst.write('*idn?')\r\n",
" time.sleep(0.2)\r\n",
" ident = self.inst.read(100) \r\n",
" self.inst.clear();\r\n",
" elif sys.platform.startswith('win'):\r\n",
" self.visa_instr.clear();\r\n",
" try:\r\n",
" ident = self.visa_instr.query(\"*IDN?\")\r\n",
" time.sleep(3)\r\n",
" except:\r\n",
" ident = \"\"\r\n",
" self.visa_instr.clear();\r\n",
" if DEVICE_NAME in ident:\r\n",
" return True\r\n",
" else:\r\n",
" if DEBUG: print \"DEBUG: Instrument \"+ ident + \" seems not to be Stanford_Research_Systems, SR830\"\r\n",
" return False\r\n",
" \r\n",
" \r\n",
" def correct_phaseshift(self, phase):\r\n",
" \"\"\"\r\n",
" I have no idea what this method is supposed to do (-> untested)\r\n",
" \"\"\"\r\n",
" th=100 \r\n",
" sig = lambda x: x < 0. \r\n",
" \r\n",
" prev_sig = sig(phase[0]) \r\n",
" prev_element = phase[0]\r\n",
" jump = 0\r\n",
" return_array = []\r\n",
" \r\n",
" for element in phase:\r\n",
" save_element = element \r\n",
" if (sig(element) is not prev_sig \r\n",
" and abs(prev_element) > th \r\n",
" and abs(element) > th): \r\n",
" \r\n",
" if jump: jump = 0\r\n",
" \r\n",
" \r\n",
" else: jump = -1 if prev_sig else 1 \r\n",
" \r\n",
" if jump:\r\n",
" save_element=element+ jump * 360\r\n",
" \r\n",
" prev_element=element\r\n",
" prev_sig = sig(element)\r\n",
" return_array.append(save_element)\r\n",
" return return_array\r\n",
" \r\n",
" def __GetSomething(self, cmdString):\r\n",
" \"\"\"\r\n",
" Internal function. The cmdString will be send to the instrument\r\n",
" to get a response. \r\n",
" (cmdString can be for example SENS?, FREQ?,... most likely something with a question mark)\r\n",
" \"\"\"\r\n",
" if sys.platform.startswith('win'):\r\n",
" self.visa_instr.clear();\r\n",
" resp = self.visa_instr.query(cmdString)\r\n",
" elif sys.platform.startswith('lin'):\r\n",
" self.inst.clear();\r\n",
" self.inst.write(cmdString)\r\n",
" resp = self.inst.read(100)\r\n",
" self.inst.clear();\r\n",
" if DEBUG:\r\n",
" print(\"command: \" + cmdString + \"; resp: \" + str(resp))\r\n",
" return resp\r\n",
" \r\n",
" def __SetSomething(self, cmdString, setValue):\r\n",
" \"\"\"\r\n",
" Internal function. The cmdString will be send to the instrument. \r\n",
" Use setValue to set specific Values on the instrument \r\n",
" (setValue can for example be the value of PHAS or FREQ, \r\n",
" when the cmdString contains \"PHAS\" or \"FREQ\")\r\n",
" \"\"\"\r\n",
" if sys.platform.startswith('win'):\r\n",
" self.visa_instr.write(cmdString + ' ' + str(setValue))\r\n",
" elif sys.platform.startswith('lin'):\r\n",
" self.inst.clear();\r\n",
" self.inst.write(cmdString + ' ' + str(setValue))\r\n",
" time.sleep(0.2)\r\n",
" self.inst.clear();\r\n",
" if DEBUG:\r\n",
" print(\"command: \" + cmdString + ' ' + str(setValue))\r\n",
" \r\n",
"\r\n",
" def ConvertiToTimeconstant(self, i):\r\n",
" \"\"\"\r\n",
" converts the i-param needed for the OFLT?-command to the actual timeconstant-value\r\n",
" \"\"\"\r\n",
" options = {0 : 10e-6,\r\n",
" 1 : 30e-6,\r\n",
" 2 : 100e-6,\r\n",
" 3 : 300e-6,\r\n",
" 4 : 1e-3,\r\n",
" 5 : 3e-3,\r\n",
" 6 : 10e-3,\r\n",
" 7 : 30e-3,\r\n",
" 8 : 100e-3,\r\n",
" 9 : 300e-3,\r\n",
" 10: 1,\r\n",
" 11: 3,\r\n",
" 12: 10,\r\n",
" 13: 30,\r\n",
" 14: 100,\r\n",
" 15: 300,\r\n",
" 16: 1000,\r\n",
" 17: 3000,\r\n",
" 18: 10000,\r\n",
" 19: 30000\r\n",
" }\r\n",
" try:\r\n",
" return options[i]\r\n",
" except:\r\n",
" raise Exception(\"ConvertiToTimeconstant: parameter i contains an invalid value\")\r\n",
" \r\n",
" def ConvertTimeconstantToi(self, timeconstant):\r\n",
" \"\"\"\r\n",
" converts the actual timeconstant-value to the i-param, needed for the OFLT-command\r\n",
" \"\"\"\r\n",
" options = {10e-6 : 0,\r\n",
" 30e-6 : 1,\r\n",
" 100e-6 : 2,\r\n",
" 300e-6 : 3,\r\n",
" 1e-3 : 4,\r\n",
" 3e-3 : 5,\r\n",
" 10e-3 : 6,\r\n",
" 30e-3 : 7,\r\n",
" 100e-3 : 8,\r\n",
" 300e-3 : 9,\r\n",
" 1 : 10,\r\n",
" 3 : 11,\r\n",
" 10 : 12,\r\n",
" 30 : 13,\r\n",
" 100 : 14,\r\n",
" 300 :15,\r\n",
" 1000 : 16,\r\n",
" 3000 : 17,\r\n",
" 10000 : 18,\r\n",
" 30000 : 19\r\n",
" }\r\n",
" try:\r\n",
" return options[timeconstant]\r\n",
" except:\r\n",
" raise Exception(\"ConvertTimeconstantToi: parameter timeconstant contains an invalid value\")\r\n",
"\r\n",
"# by HoWil#############\r\n",
" \r\n",
" def __SensitivityToVolt(self, n_In):\r\n",
" \"\"\"\r\n",
" Internal method\r\n",
" This function is meant to be called from .SetSensitivityLIA() to calculate \r\n",
" the sensitivity value out of the sensitivity settings on the lockIn\r\n",
" \"\"\"\r\n",
" # Dim m_In As Integer\r\n",
" m_In = n_In + 1;\r\n",
" voltValue = round(10**((m_In%3) / 3)) * (10**-9 * 10**np.floor(m_In / 3));\r\n",
" return voltValue\r\n",
" # end % function SensitivityToVolt\r\n",
"\r\n",
"\r\n",
" def SetSensitivityLIA(self, timeconstant = None):\r\n",
" \"\"\"\r\n",
" Automatically sets the best Sensitivity.\r\n",
" \r\n",
" When the timeconstant is None the timeconstant set on the device\r\n",
" is being used. Attention: If this pre-set timeconstant is large, this could take awhile!\r\n",
" When the timecontant is not None, the timeconstant on the device is set to this timeconstant,\r\n",
" before the SetSensitivityLIA-Logic starts \r\n",
" \"\"\"\r\n",
" #Configure property value(s).\r\n",
" #set(obj, 'sens', 22.0);\r\n",
"\r\n",
" bKorrekterBereich = 0;\r\n",
" Frequenz = self.getF();\r\n",
" T = 1/Frequenz\r\n",
" \r\n",
" while bKorrekterBereich == 0:\r\n",
" if timeconstant == None:\r\n",
" i = self.GetTimeConst();\r\n",
" timeconstant = self.ConvertiToTimeconstant(i)\r\n",
" time.sleep(3 * timeconstant + T);\r\n",
" else:\r\n",
" i = self.ConvertTimeconstantToi(timeconstant);\r\n",
" self.SetTimeConst(i)\r\n",
" time.sleep(3 * timeconstant + T);\r\n",
" # end\r\n",
" \r\n",
" # Query property value(s).\r\n",
" iSensitivityLIA = self.getSens(); # get the set sensitivity\r\n",
" R = self.getR();\r\n",
" #print \" R = %f\" %R\r\n",
" #print \" iSensitivityLIA = %i\" %iSensitivityLIA\r\n",
" vValue = self.__SensitivityToVolt(iSensitivityLIA);#!!!\r\n",
" #print \" voltValue = %f\" %voltValue\r\n",
" if R > vValue:\r\n",
" iSensitivityLIA = iSensitivityLIA + 1;\r\n",
" if iSensitivityLIA > 26 :\r\n",
" iSensitivityLIA = 26; \r\n",
" # end;\r\n",
" # Configure property value(s). \r\n",
" self.SetSens(iSensitivityLIA);\r\n",
" bKorrekterBereich = 0;\r\n",
" time.sleep(3 * timeconstant + 0.2 * T)\r\n",
" else:\r\n",
" #R = self.getR();\r\n",
" #vValue = self.__SensitivityToVolt(iSensitivityLIA);#!!!\r\n",
" if DEBUG: print str(vValue)\r\n",
" \r\n",
" if R < 0.3 * vValue:\r\n",
" iSensitivityLIA = iSensitivityLIA - 1;\r\n",
" if iSensitivityLIA < 0:\r\n",
" iSensitivityLIA = 0; \r\n",
" # end;\r\n",
" if DEBUG: print(\"iSensitivityLIA: \" + str(iSensitivityLIA))\r\n",
" self.SetSens(iSensitivityLIA);\r\n",
" bKorrekterBereich = 0;\r\n",
" time.sleep(3 * timeconstant + 0.2 * T)\r\n",
" else:\r\n",
" bKorrekterBereich = 1;\r\n",
" \r\n",
" if DEBUG: print str(vValue)\r\n",
" return vValue\r\n",
" # end\r\n",
" # end\r\n",
" # end\r\n",
" \r\n",
" # end # function SetSensitivityLIA\r\n",
" \r\n",
" def SendString(self, CmdString):\r\n",
" \"\"\"\r\n",
" sends CmdString as a command to the instrument\r\n",
" \"\"\"\r\n",
" if DEBUG: \r\n",
" print(\"send string: \" + CmdString) \r\n",
" \r\n",
" if sys.platform.startswith('win'):\r\n",
" self.visa_instr.write(CmdString)\r\n",
" elif sys.platform.startswith('lin'):\r\n",
" self.inst.write(CmdString)\r\n",
" return\r\n",
"\r\n",
" def getR(self):\r\n",
" \"\"\"\r\n",
" Query the value of R (3). Returns ASCII floating point value[1].\r\n",
" [additional information: other options would be: X (1), Y (2), θ (4)]\r\n",
" \"\"\"\r\n",
" R = self.__GetSomething('OUTP?3')\r\n",
" if DEBUG:\r\n",
" print(\"R: \" + R)\r\n",
" return float(R)\r\n",
" \r\n",
" def getPhi(self):\r\n",
" \"\"\"\r\n",
" Query the value of θ (4). Returns ASCII floating point value[1].\r\n",
" [additional information: other options would be: X (1), Y (2), R (3)]\r\n",
" \"\"\"\r\n",
" phi = self.__GetSomething('OUTP?4')\r\n",
" if DEBUG:\r\n",
" print(\"Phi: \" + phi)\r\n",
" return float(phi)\r\n",
" \r\n",
" def getSens(self):\r\n",
" \"\"\"\r\n",
" duplicate to method GetSens\r\n",
" The SENS? command queries the sensitivity[1].\r\n",
" \r\n",
" i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA), \r\n",
" i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),\r\n",
" i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),\r\n",
" i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)\r\n",
" \"\"\"\r\n",
" i = self.__GetSomething('SENS?')\r\n",
" if DEBUG:\r\n",
" print(\"Sens: \" + i)\r\n",
" return float(i)\r\n",
" \r\n",
" def getF(self):\r\n",
" \"\"\"\r\n",
" duplicate to method GetRefFreq \r\n",
" \r\n",
" The FREQ? query command will return the reference frequency \r\n",
" (in internal or external mode)[1]. \r\n",
" \"\"\"\r\n",
" fr = self.__GetSomething('FREQ?')\r\n",
" if DEBUG:\r\n",
" print(\"F: \" + fr)\r\n",
" return float(fr)\r\n",
"\r\n",
"####################\r\n",
"#Instrument status\r\n",
"\r\n",
" def SerialPollDiagnostic(self):\r\n",
" \"\"\"\r\n",
" I have no idea what this method is supposed to do (-> untested)\r\n",
" \"\"\"\r\n",
" resp = self.__GetSomething('*STB?')\r\n",
" \r\n",
" SPB = int(resp) # SPB ...serial poll byte\r\n",
" \r\n",
" ok = SPB & 1 | SPB & 2 | (not (SPB & 64)) #.....no command in progress\r\n",
" if (not ok):\r\n",
" SPBbit0 = SPB & 0 #no data is beeing acquired\r\n",
" SPBbit1 = SPB & 2 #no command execution in progress\r\n",
" SPBbit2 = SPB & 4 #unmasked bit in error status byte set\r\n",
" SPBbit3 = SPB & 8 #unmasked bit in LIA status byte set\r\n",
" SPBbit4 = SPB & 16 #!!!! the interface output buffer is not empty\r\n",
" SPBbit5 = SPB & 32 #unmasked bit in standard status byte set\r\n",
" SPBbit6 = SPB & 64 # SRQ has oThe FREQ? query command will return the reference frequency \r\n",
" SPBbit7 = SPB & 128 #not in use\r\n",
" if SPBbit2:\r\n",
" print 'unmasked bit in error status byte set'\r\n",
" ERRSbyte = self.__GetSomething('ERRS?')# may be subroutine call required\r\n",
" print 'error-status byte: ', ERRSbyte\r\n",
" if SPBbit3:\r\n",
" print 'unmasked bit in LIA status byte set' \r\n",
" LIASbyte = self.__GetSomething('LIAS?') # may be subroutine call required\r\n",
" print 'LIA-status byte: ', LIASbyte \r\n",
" if SPBbit4:\r\n",
" self.SendString('REST') # not shure that this will help\r\n",
" if SPBbit5:\r\n",
" ESRbyte = self.__GetSomething('*ESR?') # may be subroutine call required \r\n",
" print 'standard event-status byte: ', ESRbyte\r\n",
" if SPBbit6:\r\n",
" SPEbyte = self.__GetSomething('*SRE?') # may be subroutine call required \r\n",
" print 'SRQ occurred SP enable register value ', SPEbyte\r\n",
" return SPB \r\n",
"\r\n",
"#reference settings \r\n",
" def SetRefRms(self,rms):\r\n",
" \"\"\"\r\n",
" The SLVL x command sets the amplitude of the sine output.\r\n",
" The parameter x is a voltage (real number of Volts). The value of x will\r\n",
" be rounded to 0.002V. The value of x is limited to 0.004 ≤ x ≤ 5.000[1]. \r\n",
" \"\"\" \r\n",
" #if rms < 0.004 or rms > 5.0:\r\n",
" # raise Exception(\"SetRefRms: parameter rms can only be set to values from 0.004 to 5.0\")\r\n",
" resp = self.__SetSomething('SLVL', rms)\r\n",
" return resp\r\n",
" \r\n",
" def GetRefRms(self):\r\n",
" \"\"\"\r\n",
" The SLVL? command queries the amplitude of the sine output.\r\n",
" \"\"\"\r\n",
" rms = self.__GetSomething('SLVL?')\r\n",
" return float(rms)\r\n",
"\r\n",
" def SetRefFreq(self,f):\r\n",
" \"\"\"\r\n",
" The FREQ f command sets the frequency of the internal oscillator. This\r\n",
" command is allowed only if the reference source is internal. The parame-\r\n",
" ter f is a frequency (real number of Hz). The value of f will be rounded to\r\n",
" 5 digits or 0.0001 Hz, whichever is greater. The value of f is limited to\r\n",
" 0.001 ≤ f ≤ 102000. If the harmonic number is greater than 1, then the\r\n",
" frequency is limited to nxf ≤ 102 kHz where n is the harmonic number[1].\r\n",
" \"\"\"\r\n",
" #if f < 0.001 or f > 102000:\r\n",
" # raise Exception(\"SetRefFreq: parameter f can only be set to values from 0.001 to 102000.\")\r\n",
" resp = self.__SetSomething('FREQ', str(f))\r\n",
" return resp\r\n",
" \r\n",
" def GetRefFreq(self):\r\n",
" \"\"\"\r\n",
" duplicate to method getF\r\n",
" \r\n",
" The FREQ? query command will return the reference frequency \r\n",
" (in internal or external mode)[1].\r\n",
" \"\"\"\r\n",
" f = self.__GetSomething('Freq?')\r\n",
" return float(f)\r\n",
" \r\n",
" def SetRefPhas(self, phase):\r\n",
" \"\"\"\r\n",
" The PHAS x command will set the phase shift to x. \r\n",
" The parameter x is the phase (real number of degrees). \r\n",
" The value of x will be rounded to 0.01°.\r\n",
" The phase may be programmed from -360.00 ≤ x ≤ 729.99 and will be\r\n",
" wrapped around at ±180°. For example, the PHAS 541.0 command will\r\n",
" set the phase to -179.00° (541-360=181=-179)[1].\r\n",
" \"\"\"\r\n",
" #if phase < -360.0 or phase > 729.99:\r\n",
" # raise Exception(\"SetRefPhas: parameter phase can only be set to values from -360.0 to 729.99\")\r\n",
" resp = self.__SetSomething('PHAS', str(phase))\r\n",
" return resp\r\n",
" \r\n",
" def GetRefPhas(self):\r\n",
" \"\"\"\r\n",
" The PHAS? command queries the reference phase shift[1].\r\n",
" \"\"\"\r\n",
" phase = self.__GetSomething('PHAS?')\r\n",
" return float(phase)\r\n",
" \r\n",
" def SetRefMode(self,refmod):\r\n",
" \"\"\"\r\n",
" The FMOD i command sets the reference source. The parameter\r\n",
" i selects internal (i=1) or external (i=0)[1].\r\n",
" \"\"\"\r\n",
" if refmod not in (0,1):\r\n",
" raise Exception(\"SetRefMode: parameter refmode can only be set to 0 (=external) or 1(=internal)\")\r\n",
" resp = self.__SetSomething('FMOD', str(refmod))\r\n",
" return resp\r\n",
"\r\n",
" def __checkFractionalDigits(self, i, exception_text):\r\n",
" \"\"\"\r\n",
" internal method checks if there are other numbers than 0 among the fractional digits\r\n",
" \"\"\"\r\n",
" import decimal\r\n",
" if \".\" in str(i):\r\n",
" d = decimal.Decimal(i).as_tuple()\r\n",
" preDecimalPlaces = len(d.digits) + d.exponent\r\n",
" try: \r\n",
" fractionalDigits = int(str(i)[(preDecimalPlaces + 1):])\r\n",
" except:\r\n",
" raise Exception(exception_text)\r\n",
" if fractionalDigits != 0:\r\n",
" raise Exception(exception_text) \r\n",
" \r\n",
" def GetRefMode(self):\r\n",
" \"\"\"\r\n",
" The FMOD? command queries the reference source[1].\r\n",
" refmod=0(≙external) or refmode=1(≙internal)\r\n",
" \"\"\" \r\n",
" refmod = self.__GetSomething('FMOD?')\r\n",
" return int(refmod)\r\n",
" \r\n",
" def SetRefHarm(self,harm):\r\n",
" \"\"\"\r\n",
" The HARM i command sets the detection harmonic. This\r\n",
" parameter is an integer from 1 to 19999. The HARM i command will set\r\n",
" the lock-in to detect at the i th harmonic of the reference frequency. The\r\n",
" value of i is limited by ixf ≤ 102 kHz. If the value of i requires a detection\r\n",
" frequency greater than 102 kHz, then the harmonic number will be set to\r\n",
" the largest value of i such that ixf ≤ 102 kHz[1].\r\n",
" \"\"\"\r\n",
" #if harm < 1 or harm > 19999:\r\n",
" # raise Exception(\"harm can only be set to values from 1 to 19999\")\r\n",
" exception_text = \"SetRefHarm: parameter harm has to be int or long from 1 to 19999\"\r\n",
" self.__checkFractionalDigits(harm, exception_text);\r\n",
" try:\r\n",
" harm = int(harm)\r\n",
" except:\r\n",
" raise Exception(exception_text)\r\n",
" \r\n",
" if not isinstance( harm, ( int, long ) ):\r\n",
" raise Exception(exception_text)\r\n",
" resp = self.__SetSomething('HARM', str(harm))\r\n",
" return resp\r\n",
" \r\n",
" def GetRefHarm(self):\r\n",
" \"\"\"\r\n",
" The HARM? command queries the detection harmonic[1].\r\n",
" \"\"\"\r\n",
" harm = self.__GetSomething('HARM?')\r\n",
" return int(harm)\r\n",
"\r\n",
"#input and filter \r\n",
" def SetInputConfig(self,iconf):\r\n",
" \"\"\"\r\n",
" The ISRC command sets the input configuration. The parameter\r\n",
" i selects A (i=0), A-B (i=1), I (1 MΩ) (i=2) or I (100 MΩ) (i=3).\r\n",
" \r\n",
" Changing the current gain does not change the instrument sensitivity.\r\n",
" Sensitivities above 10 nA require a current gain of 1 MΩ. Sensitivities\r\n",
" between 20 nA and 1 μA automatically select the 1 MΩ current gain. At\r\n",
" sensitivities below 20 nA, changing the sensitivity does not change the\r\n",
" current gain[1].\r\n",
" \"\"\"\r\n",
" if iconf not in (0, 1, 2, 3):\r\n",
" raise Exception(\"SetInputConfig: parameter iconf can only be set to value from 0 to 3\\nA (iconf=0), A-B (iconf=1), I (1 MΩ) (iconf=2) or I (100 MΩ) (iconf=3)\")\r\n",
" resp = self.__SetSomething('ISRC', str(iconf))\r\n",
" return resp\r\n",
" \r\n",
" def GetInputConfig(self):\r\n",
" \"\"\"\r\n",
" The ISRC? command queries the input configuration[1].\r\n",
" iconf=0 (≙A), iconf=1(≙A-B), iconf=2 (≙I(1 MΩ)) or iconf=3(≙I(100 MΩ))\r\n",
" \"\"\"\r\n",
" iconf = self.__GetSomething('ISRC?')\r\n",
" return int(iconf)\r\n",
" \r\n",
" def SetGNDConfig(self, gndconf):\r\n",
" \"\"\"\r\n",
" The IGND command queries the input shield grounding[1]. The\r\n",
" parameter gndconf selects Float (gndconf=0) or Ground (gndconf=1).\r\n",
" \"\"\"\r\n",
" if gndconf not in (0,1):\r\n",
" raise Exception(\"SetGNDConfig: parameter gndconf can only be 0(≙Float) or 1(≙Ground)\")\r\n",
" self.__SetSomething('IGND', gndconf)\r\n",
" \r\n",
" def GetGNDConfig(self):\r\n",
" \"\"\"\r\n",
" The IGND? command queries the input shield grounding[1]. The\r\n",
" gndconf=0(≙Float) or gndconf=1(≙Ground)\r\n",
" \"\"\"\r\n",
" gndconf = self.__GetSomething('IGND?')\r\n",
" return int(gndconf)\r\n",
" \r\n",
" def SetInputCoupling(self,icoup):\r\n",
" \"\"\"\r\n",
" The ICPL i command sets the input coupling. \r\n",
" The parameter i selects AC (i=0) or DC (i=1)[1].\r\n",
" \"\"\"\r\n",
" if icoup not in (0,1):\r\n",
" raise Exception(\"SetInputCoupling: parameter icoup can only be 0(≙AC) or 1(≙DC)\")\r\n",
" resp = self.__SetSomething('ICPL', icoup)\r\n",
" return resp\r\n",
" \r\n",
" def GetInputCoupling(self):\r\n",
" \"\"\"\r\n",
" The ICPL? command queries the input coupling[1].\r\n",
" icoup=0(≙AC) or icoup=1(≙DC)\r\n",
" \"\"\"\r\n",
" icoup = self.__GetSomething('ICPL?')\r\n",
" return int(icoup)\r\n",
" \r\n",
" def SetLineNotch(self, linotch):\r\n",
" \"\"\"\r\n",
" The ILIN i command sets the input line notch filter status. The\r\n",
" parameter i selects Out or no filters (i=0), Line notch in (i=1), 2xLine\r\n",
" notch in (i=2) or Both notch filters in (i=3)[1].\r\n",
" \"\"\"\r\n",
" if linotch not in (0,1,2,3):\r\n",
" raise Exception(\"SetLineNotch: parameter linotch can only be set to 0(≙Out or no filters), 1(≙Line notch in), 2(≙2xLine notch in) or 3(≙Both notch filters in)\")\r\n",
" self.__SetSomething('ILIN', str(linotch))\r\n",
" \r\n",
" def GetLineNotch(self):\r\n",
" \"\"\"\r\n",
" The ILIN? command queries the input line notch filter status[1].\r\n",
" \"\"\"\r\n",
" linotch = self.__GetSomething('ILIN?')\r\n",
" return int(linotch)\r\n",
" \r\n",
" def SetSens(self, i):\r\n",
" \"\"\"\r\n",
" The SENS command sets the sensitivity[1].\r\n",
" \r\n",
" i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA), \r\n",
" i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),\r\n",
" i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),\r\n",
" i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)\r\n",
" \"\"\"\r\n",
" exception_text = \"SetSens: parameter i can only be set to int or long values from 0 to 26\\n\";\r\n",
" exception_text += \"i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA), \"\r\n",
" exception_text += \"i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA), \"\r\n",
" exception_text += \"i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA), \"\r\n",
" exception_text += \"i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)\"\r\n",
" self.__checkFractionalDigits(i, exception_text); \r\n",
" try:\r\n",
" i = int(i)\r\n",
" except:\r\n",
" raise Exception(exception_text)\r\n",
" if i < 0 or i > 26 or not(isinstance( i, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" self.__SetSomething('SENS', i)\r\n",
" \r\n",
" def GetSens(self):\r\n",
" \"\"\" \r\n",
" duplicate to method getSens\r\n",
" \r\n",
" The SENS? command queries the sensitivity[1].\r\n",
" \r\n",
" i=0(≙2 nV/fA), i=1(≙5 nV/fA), i=2(≙10 nV/fA), i=3(≙20 nV/fA), i=4(≙50 nV/fA), i=5(≙100 nV/fA), i=6(≙200 nV/fA), \r\n",
" i=7(≙500 nV/fA), i=8(≙1 μV/pA), i=9(≙2 μV/pA), i=10(≙5 μV/pA), i=11(≙10 μV/pA), i=12(≙20 μV/pA), i=13(≙50 μV/pA),\r\n",
" i=14(≙100 μV/pA), i=15(≙200 μV/pA), i=16(≙500 μV/pA), i=17(≙1 mV/nA), i=18(≙2 mV/nA), i=19(≙5 mV/nA), i=20(≙10 mV/nA),\r\n",
" i=21(≙20 mV/nA), i=22(≙50 mV/nA), i=23(≙100 mV/nA), i=24(≙200 mV/nA), i=25(≙500 mV/nA), i=26(≙1 V/μA)\r\n",
" \"\"\"\r\n",
" R = self.__GetSomething('SENS?')\r\n",
" return int(R)\r\n",
"\r\n",
" def SetReserve(self, reserve):\r\n",
" \"\"\"\r\n",
" The RMOD i command sets the reserve mode. The parameter i\r\n",
" selects High Reserve (i=0), Normal (i=1) or Low Noise (minimum) (i=2).\r\n",
" See in the manual-description of the [Reserve] key for the actual reserves for each\r\n",
" sensitivity[1].\r\n",
" \"\"\"\r\n",
" if reserve not in (0,1,2):\r\n",
" raise Exception(\"SetReserve: parameter reserve can only be set to the values 0(≙High Reserve), 1(≙Normal) or 2(≙Low Noise)\")\r\n",
" self.__SetSomething('RMOD', str(reserve))\r\n",
" \r\n",
" def GetReserve(self):\r\n",
" \"\"\"\r\n",
" The RMOD? command queries the reserve mode[1].\r\n",
" reserve=0(≙High Reserve), reserve=1(≙Normal) or reserve=2(≙Low Noise)\r\n",
" \"\"\"\r\n",
" reserve = self.__GetSomething('RMOD?')\r\n",
" return int(reserve)\r\n",
"\r\n",
" def SetTimeConst(self,i):\r\n",
" \"\"\"\r\n",
" The OFLT i command sets the time constant[1].\r\n",
" \r\n",
" i=0(≙10 μs), i=1(≙30 μs), i=2(≙100 μs), i=3(≙300 μs), i=4(≙1 ms), i=5(≙3 ms), i=6(≙10 ms), \r\n",
" i=7(≙30 ms), i=8(≙100 ms), i=9(≙300 ms), i=10(≙1 s), i=11(≙3 s), i=12(≙10 s), i=13(≙30 s),\r\n",
" i=14(≙100 s), i=15(≙300 s), i=16(≙1 ks), i=17(≙3 ks), i=18(≙10 ks), i=19(≙30 ks)\r\n",
" use the method self.ConvertTimeconstantToi to convert your timeconstant to the needed parameter for this method\r\n",
" \r\n",
" Time constants greater than 30s may NOT be set if the harmonic x ref. frequency (detection frequency) exceeds 200 Hz. \r\n",
" Time constants shorter than the minimum time constant (based upon the filter slope and dynamic reserve) will set the \r\n",
" time constant to the minimum allowed time constant[1]. See the Gain and Time Constant operation section in the manual.\r\n",
" \"\"\"\r\n",
" exception_text = \"SetTimeConst: parameter i can only be set to values from 0 to 19\\n\"\r\n",
" exception_text += \"i=0(≙10 μs), i=1(≙30 μs), i=2(≙100 μs), i=3(≙300 μs), i=4(≙1 ms), i=5(≙3 ms), i=6(≙10 ms), \"\r\n",
" exception_text += \"i=7(≙30 ms), i=8(≙100 ms), i=9(≙300 ms), i=10(≙1 s), i=11(≙3 s), i=12(≙10 s), i=13(≙30 s), \"\r\n",
" exception_text += \"i=14(≙100 s), i=15(≙300 s), i=16(≙1 ks), i=17(≙3 ks), i=18(≙10 ks), i=19(≙30 ks)\"\r\n",
" self.__checkFractionalDigits(i, exception_text); \r\n",
" try:\r\n",
" i = int(i)\r\n",
" except:\r\n",
" raise Exception(exception_text) \r\n",
" if i < 0 or i > 19 or not(isinstance( i, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" self.__SetSomething('OFLT', i)\r\n",
"\r\n",
" def GetTimeConst(self):\r\n",
" \"\"\"\r\n",
" The OFLT? command queries the time constant[1].\r\n",
" use the method self.ConvertiToTimeconstant to convert the return-value of this method to the actual timeconstant \r\n",
" \"\"\"\r\n",
" tc = self.__GetSomething('OFLT?')\r\n",
" # 1e-5 * 10**np.floor(int(tc)/2) * (1+2*(int(tc)%2)) #numerischer Wert \r\n",
" return int(tc)\r\n",
" \r\n",
" def SetSlope(self,slope):\r\n",
" \"\"\"\r\n",
" The OFSL i command setsthe low pass filter slope. The\r\n",
" parameter slope selects 6 dB/oct (slope=0), 12 dB/oct (slope=1), 18 dB/oct (slope=2) or\r\n",
" 24 dB/oct (slope=3)[1].\r\n",
" \"\"\"\r\n",
" exception_text = \"SetSlope: parameter slope can only be set to the values 0(≙6 dB/oct), 1(≙12 dB/oct), 2(≙18 dB/oct) or 3(≙24 dB/oct).\"\r\n",
" self.__checkFractionalDigits(slope, exception_text); \r\n",
" try:\r\n",
" slope = int(slope)\r\n",
" except:\r\n",
" raise Exception(exception_text) \r\n",
" if slope < 0 or slope > 3 or not(isinstance( slope, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" self.__SetSomething('OFSL', slope)\r\n",
" \r\n",
" def GetSlope(self):\r\n",
" \"\"\"\r\n",
" The OFSL? command queries the low pass filter slope[1].\r\n",
" slope=0(≙6 dB/oct), slope=1(≙12 dB/oct), slope=2(≙18 dB/oct) or\r\n",
" slope=3(≙24 dB/oct)\r\n",
" \"\"\"\r\n",
" slope = self.__GetSomething('OFSL?')\r\n",
" return int(slope)\r\n",
" \r\n",
" def SetSyncFilter(self, sync):\r\n",
" \"\"\"\r\n",
" The SYNC i command sets the synchronous filter status. The\r\n",
" parameter i selects Off (i=0) or synchronous filtering below 200 Hz (i=1).\r\n",
" Synchronous filtering is turned on only if the detection frequency (refer-\r\n",
" ence x harmonic number) is less than 200 Hz[1].\r\n",
" \"\"\"\r\n",
" exception_text = \"SetSyncFilter: parameter sync can only be set to 0(≙Off) or 1(≙synchronous filtering below 200 Hz).\"\r\n",
" self.__checkFractionalDigits(sync, exception_text); \r\n",
" try:\r\n",
" sync = int(sync)\r\n",
" except:\r\n",
" raise Exception(exception_text) \r\n",
" if sync < 0 or sync > 1 or not(isinstance( sync, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" self.__SetSomething('SYNC', sync)\r\n",
" \r\n",
" def GetSyncFilter(self):\r\n",
" \"\"\"\r\n",
" The SYNC? command queries the synchronous filter status[1].\r\n",
" sync=0(≙Off) or sync=1(≙synchronous filtering below 200 Hz).\r\n",
" \"\"\"\r\n",
" sync = self.__GetSomething('SYNC?')\r\n",
" return int(sync)\r\n",
" \r\n",
" def SetDisplay(self, channel, j, ratio=0):\r\n",
" \"\"\"\r\n",
" The DDEF i, j, k command selects the CH1 and CH2 displays. The parameter\r\n",
" channel selects CH1 (channel=1) or CH2 (channel=2) and is required. \r\n",
" This command sets channel i to parameter j with ratio k as listed below.\r\n",
" CH1 (i=1) 4 CH2 (i=2) \r\n",
" j display j display\r\n",
" 0 X 0 Y\r\n",
" 1 R 1 θ\r\n",
" 2 X Noise 2 Y Noise\r\n",
" 3 Aux In 1 3 Aux In 3\r\n",
" 4 Aux In 2 4 Aux In 4\r\n",
"\r\n",
" k ratio k ratio\r\n",
" 0 none 0 none\r\n",
" 1 Aux In 1 1 Aux In 3\r\n",
" 2 Aux In 2 2 Aux In 4\r\n",
" [1] \r\n",
" \"\"\"\r\n",
" ch = str(channel)\r\n",
" k = str(j)\r\n",
" rat = str(ratio)\r\n",
" Cmd = 'DDEF'+ ch + ',' + k + ',' + rat\r\n",
" self.SendString(Cmd)\r\n",
" return \r\n",
" \r\n",
" def GetDisplay(self, channel = 1):\r\n",
" \"\"\"\r\n",
" The DDEF? i command queries the display and ratio of display i. The\r\n",
" returned string contains both j and k separated by a comma. For exam-\r\n",
" ple, if the DDEF? 1 command returns \"1,0\" then the CH1 display is R\r\n",
" with no ratio[1].\r\n",
" \"\"\"\r\n",
" resp = self.__GetSomething('DDEF? ' + str(channel));\r\n",
" [j,ratio] = resp.rsplit(',')\r\n",
" return [j,ratio] \r\n",
" \r\n",
" def SetInterface(self, GPIB = True, RS232 =False): \r\n",
" \"\"\"\r\n",
" The OUTX i command sets the output interface to RS232 (i=0) or GPIB(i=1).\r\n",
" The OUTX i command should be sent before any query com-\r\n",
" mands to direct the responses to the interface in use[1].\r\n",
" \"\"\"\r\n",
" if GPIB:\r\n",
" Cmd = 'OUTX 1'#sets te output interface to GPIB\r\n",
" else: \r\n",
" Cmd = 'OUTX 0'#sets the output interface to RS232\r\n",
" self.SendString(Cmd)\r\n",
" return\r\n",
" \r\n",
" def GetInterface(self, GPIB = False, RS232 =False): \r\n",
" \"\"\"\r\n",
" The OUTX? command queries the interface[1].\r\n",
" Interface=0(≙RS232) or Interface=1(≙GPIB).\r\n",
" \"\"\"\r\n",
" Ifc = self.__GetSomething('OUTX?')\r\n",
" if int(Ifc) == 1 :\r\n",
" Interface = 'GPIB'\r\n",
" else:\r\n",
" Interface = 'RS232'\r\n",
" return int(Ifc), Interface\r\n",
"\r\n",
" def SetDisableRemoteLockoutState(self, On = True):\r\n",
" \"\"\"\r\n",
" In general, every GPIB interface command will put the SR830 into the\r\n",
" REMOTE state with the front panel deactivated. To defeat this feature,\r\n",
" use the OVRM 1 command to overide the GPIB remote. In this mode, the\r\n",
" front panel is not locked out when the unit is in the REMOTE state. The\r\n",
" OVRM 0 command returns the unit to normal remote operation[1].\r\n",
" \"\"\"\r\n",
" if On:\r\n",
" Cmd = 'OVRM 1' #Front panel is not locked out \r\n",
" else:\r\n",
" Cmd = 'OVRM 0' #Front panel is locked out\r\n",
" self.SendString(Cmd)\r\n",
" return\r\n",
" \r\n",
" def SetKlickOn(self, On=False):\r\n",
" \"\"\"\r\n",
" The KCLK i command sets the key click On (i=1) or Off (i=0) state[1].\r\n",
" \"\"\"\r\n",
" if On:\r\n",
" Cmd = 'KCLK 1'\r\n",
" else:\r\n",
" Cmd = 'KCLK 0'\r\n",
" self.SendString(Cmd)\r\n",
" return\r\n",
" \r\n",
" def GetKlickOn(self,On=False):\r\n",
" \"\"\"\r\n",
" The KCLK i command queries the key[1].\r\n",
" \"\"\"\r\n",
" KlickOn = self.__GetSomething('KCLK?')\r\n",
" return int(KlickOn) \r\n",
" \r\n",
" def SetAlarm(self, On=False): \r\n",
" \"\"\"\r\n",
" The ALRM i command sets the alarm On (i=1) or Off (i=0) state[1].\r\n",
" \"\"\"\r\n",
" if On:\r\n",
" Cmd = 'ALRM 1' \r\n",
" else:\r\n",
" Cmd = 'ALRM 0'\r\n",
" self.SendString(Cmd)\r\n",
" return\r\n",
" \r\n",
" def GetAlarm(self,On=False):\r\n",
" \"\"\"\r\n",
" The ALRM? command queries the alarm[1] \r\n",
" Alarm=1(≙On) or Alarm=0(≙Off).\r\n",
" \"\"\"\r\n",
" Alarm = self.__GetSomething('ALRM?')\r\n",
" return int(Alarm)\r\n",
" \r\n",
" def SaveSettings(self, BufferAddress = 1):\r\n",
" \"\"\"\r\n",
" The SSET i command saves the lock-in setup in setting buffer i (1<i<9).\r\n",
" The setting buffers are retained when the power is turned off[1].\r\n",
" \"\"\"\r\n",
" self.__SetSomething('SSET', BufferAddress)\r\n",
" \r\n",
" def ReactivateSettings(self, BufferAddress = 1):\r\n",
" \"\"\"\r\n",
" The RSET i command recalls the lock-in setup from setting buffer i\r\n",
" (1≤i≤9). Interface parameters are not changed when a setting buffer is\r\n",
" recalled with the RSET command. If setting i has not been saved prior to\r\n",
" the RSET i command, then an error will result[1].\r\n",
" \"\"\"\r\n",
" self.__SetSomething('RSET', BufferAddress)\r\n",
" \r\n",
" def SetAutoGain(self):\r\n",
" \"\"\"\r\n",
" The AGAN command performs the Auto Gain function. This command is\r\n",
" the same as pressing the [Auto Gain] key. Auto Gain may take some\r\n",
" time if the time constant is long. AGAN does nothing if the time constant\r\n",
" is greater than 1 second. Check the command execution in progress bit\r\n",
" in the Serial Poll Status Byte (bit 1) to determine when the function is\r\n",
" finished[1].\r\n",
" \"\"\"\r\n",
" cmd = 'AGAN'\r\n",
" self.SendString(cmd)\r\n",
" return\r\n",
"\r\n",
" def SetFrontOutputSource(self, which = None, Type = None):\r\n",
" \"\"\"\r\n",
" The FPOP i,j command sets the front panel (CH1 and CH2) output sources. \r\n",
" The parameter i selects CH1 (i=1) or CH2 (i=2) and is required. \r\n",
" The FPOP i, j command sets output i to quantity j where j is\r\n",
" listed below.\r\n",
" CH1 (i=1) 4 CH2 (i=2) \r\n",
" j output quantity j output quantity\r\n",
" 0 CH 1 Display 0 CH 2 Display\r\n",
" 1 X 1 Y\r\n",
" [1]\r\n",
" \"\"\"\r\n",
" cmd = 'FPOP ' + str(which) + ',' + str(Type)\r\n",
" self.SendString(cmd)\r\n",
" \r\n",
" def GetFrontOutputSource(self, which= None):\r\n",
" \"\"\"\r\n",
" The FPOP? command queries the front panel (CH1 and CH2) output sources[1].\r\n",
" \"\"\"\r\n",
" resp = self.__GetSomething('FPOP?' + str(which))\r\n",
" if str(resp)==0:\r\n",
" Type = 'Display Channel '+ str(which)\r\n",
" else:\r\n",
" if which == 1:\r\n",
" Type = 'X'\r\n",
" else:\r\n",
" Type = 'Y'\r\n",
" return Type \r\n",
" \r\n",
" def GetOutputOffsetAndExpand(self, i):\r\n",
" \"\"\"\r\n",
" The OEXP? i command queries the output offsets and expand of quantity i.\r\n",
" The parameter i selects X (i=1), Y (i=2) or R (i=3) and is required.\r\n",
" The returned string contains both the offset and\r\n",
" expand separated by a comma. For example, if the OEXP? 2 command\r\n",
" returns \"50.00,1\" then the Y offset is 50.00% and the Y expand is 10.\r\n",
" Setting an offset to zero turns the offset off. Querying an offset which is\r\n",
" off will return 0% for the offset value[1].\r\n",
" \"\"\"\r\n",
" exception_text = \"GetOutputOffsetAndExpand: parameter i can only be 1(≙X), 2(≙Y) or 3(≙R)\"\r\n",
" self.__checkFractionalDigits(i, exception_text); \r\n",
" try:\r\n",
" i = int(i)\r\n",
" except:\r\n",
" raise Exception(exception_text) \r\n",
" if i < 1 or i > 3 or not(isinstance( i, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" \r\n",
" Type = ['X','Y','R'] \r\n",
" cmd = 'OEXP? '+ str(i) \r\n",
" resp = self.__GetSomething(cmd)\r\n",
" [offset, expand] = resp.rsplit(',') \r\n",
" return Type[i-1], offset, expand \r\n",
" \r\n",
" def SetOutputOffsetAndExpand(self, Param, Offset, Expand):\r\n",
" \"\"\"\r\n",
" The OEXP i, x, j command will set the offset and expand for quantity i. \r\n",
" This command requires BOTH x and j.\r\n",
" The parameter i selects X (i=1), Y (i=2) or R (i=3) and is required. The\r\n",
" parameter x is the offset in percent (-105.00 ≤ x ≤ 105.00). The parame-\r\n",
" ter j selects no expand (j=0), expand by 10 (j=1) or 100 (j=2)[1].\r\n",
" \"\"\"\r\n",
" cmd = 'OEXP ' + str(Param)+ ',' + str(Offset) + ',' + str(Expand) \r\n",
" self.SendString(cmd)\r\n",
" \r\n",
" def SetAutoReserve(self):\r\n",
" \"\"\"\r\n",
" The ARSV command performs the Auto Reserve function. This com-\r\n",
" mand is the same as pressing the [Auto Reserve] key. Auto Reserve\r\n",
" may take some time. Check the command execution in progress bit in\r\n",
" the Serial Poll Status Byte (bit 1) to determine when the function is\r\n",
" finished[1].\r\n",
" \"\"\"\r\n",
" cmd = 'ARSV'\r\n",
" self.SendString(cmd)\r\n",
" \r\n",
" def SetAutoPhase(self):\r\n",
" \"\"\"\r\n",
" The APHS command performs the Auto Phase function. This command\r\n",
" is the same as pressing the [Auto Phase] key. The outputs will take many\r\n",
" time constants to reach their new values. Do not send the APHS com-\r\n",
" mand again without waiting the appropriate amount of time. If the phase\r\n",
" is unstable, then APHS will do nothing. Query the new value of the phase\r\n",
" shift to see if APHS changed the phase shift[1].\r\n",
" \"\"\"\r\n",
" cmd = 'APHS'\r\n",
" self.SendString(cmd)\r\n",
" \r\n",
" def SetAutoOffset(self, which):\r\n",
" \"\"\"\r\n",
" The AOFF i command automatically offsets X (i=1), Y (i=2) or R (i=3) to\r\n",
" zero. The parameter i is required. This command is equivalent to press-\r\n",
" ing the [Auto Offset] keys[1].\r\n",
" \"\"\"\r\n",
" exception_text = \"SetAutoOffset: parameter which can only be 1(≙X), 2(≙Y) or 3(≙R)\"\r\n",
" self.__checkFractionalDigits(which, exception_text); \r\n",
" try:\r\n",
" which = int(which)\r\n",
" except:\r\n",
" raise Exception(exception_text) \r\n",
" if which < 1 or which > 3 or not(isinstance( which, ( int, long ) )):\r\n",
" raise Exception(exception_text)\r\n",
" \r\n",
" self.__SetSomething('AOFF', which)\r\n",
"\r\n",
" def SetDataSampleRate(self, rate = 4):\r\n",
" \"\"\"\r\n",
" The SRAT i command sets the data sample rate. The parame-\r\n",
" ter i selects the sample rate listed below.\r\n",
" i quantity i quantity\r\n",
" 0 62.5 mHz 8 16 Hz\r\n",
" 1 125 mHz 9 32 Hz\r\n",
" 2 250 mHz 10 64 Hz\r\n",
" 3 500 mHz 11 128 Hz\r\n",
" 4 1 Hz 12 256 Hz\r\n",
" 5 2 Hz 13 512 Hz\r\n",
" 6 4 Hz 14 Trigger\r\n",
" 7 8 Hz\r\n",
" [1]\r\n",
" \"\"\"\r\n",
" self.__SetSomething('SRAT', rate)\r\n",
" \r\n",
" def GetDataSampleRate(self, rate = None):\r\n",
" \"\"\"\r\n",
" The SRAT? command queries the data sample rate[1].\r\n",
" \"\"\"\r\n",
" Rate = self.__GetSomething('SRAT?')\r\n",
" return int(Rate) \r\n",
"\r\n",
" def SetEndOfBuffer(self, kind =None):\r\n",
" \"\"\"\r\n",
" The SEND i command sets the end of buffer mode. The param-\r\n",
" eter i selects 1 Shot (i=0) or Loop (i=1). If Loop mode is used, make sure\r\n",
" to pause data storage before reading the data to avoid confusion about\r\n",
" which point is the most recent[1].\r\n",
" \"\"\"\r\n",
" if kind not in (0,1):\r\n",
" raise Exception(\"SetEndOfBuffer: parameter kind can only be 0(≙Shot) or 1(≙Loop)\")\r\n",
" self.__SetSomething('SEND', kind)\r\n",
" \r\n",
" def GetEndOfBuffer(self, kind = None):\r\n",
" \"\"\"\r\n",
" The SEND? command queries the end of buffer mode[1].\r\n",
" \"\"\"\r\n",
" Kind = self.__GetSomething('SEND?')\r\n",
" return Kind\r\n",
" \r\n",
" def Trigger(self): \r\n",
" \"\"\"\r\n",
" The TRIG command is the software trigger command. This command\r\n",
" has the same effect as a trigger at the rear panel trigger input[1].\r\n",
" \"\"\"\r\n",
" self.SendString('TRIG')\r\n",
" \r\n",
" def SetTriggerStartMode(self, kind):\r\n",
" \"\"\"\r\n",
" The TSTR i command sets the trigger start mode. The parameter \r\n",
" i=1 selects trigger starts the scan and i=0 turns the trigger start feature off.\r\n",
" \"\"\"\r\n",
" if kind not in (0,1):\r\n",
" raise Exception(\"SetTriggerStartMode: parameter kind can only be 0(≙trigger starts the scan) or 1(≙turns the trigger start feature off)\")\r\n",
" self.__SetSomething('TSTR', kind)\r\n",
"\r\n",
" def GetTriggerStartMode(self):\r\n",
" \"\"\"\r\n",
" The TSTR? command queries the trigger start mode[1].\r\n",
" \"\"\"\r\n",
" Kind = self.__GetSomething('TSTR?')\r\n",
" return int(Kind)\r\n",
"\r\n",
" def Start(self):\r\n",
" \"\"\"\r\n",
" The STRT command starts or resumes data storage. STRT is ignored if\r\n",
" storage is already in progress[1].\r\n",
" \"\"\"\r\n",
" self.SendString('STRT')\r\n",
"\r\n",
" def Pause(self): \r\n",
" \"\"\"\r\n",
" The PAUS command pauses data storage. If storage is already paused\r\n",
" or reset then this command is ignored[1].\r\n",
" \"\"\"\r\n",
" self.SendString('PAUS')\r\n",
"\r\n",
" def SetTriggerSlope(self, value):\r\n",
" \"\"\"\r\n",
" The RSLP command sets the reference trigger when using the\r\n",
" external reference mode. The parameter i selects sine zero crossing\r\n",
" (i=0), TTL rising edge (i=1), , or TTL falling edge (i=2). At frequencies\r\n",
" below 1 Hz, the a TTL reference must be used[1].\r\n",
" \"\"\"\r\n",
" if value not in (0,1,2):\r\n",
" raise Exception(\"SetTriggerSlope: parameter value can only be 0(≙sine zero crossing), 1(≙TTL rising edge/Pos edge) or 2(≙TTL falling edge/neg edge)\")\r\n",
" snd = \"RSLP%i\" % value \r\n",
" self.SendString(snd)\r\n",
"\r\n",
" def iToSlope(self, i):\r\n",
" \"\"\"\r\n",
" converts the response returned by GetTriggerSlope to the actual slope\r\n",
" \"\"\"\r\n",
" options = {0 : 'Sine',\r\n",
" 1 : 'Pos edge',\r\n",
" 2 : 'neg edge'\r\n",
" }\r\n",
" return options[int(i.strip())] \r\n",
" \r\n",
" def GetTriggerSlope(self):\r\n",
" \"\"\"\r\n",
" The RSLP? command queries the reference trigger when using the\r\n",
" external reference mode.\r\n",
" use the method self.iToSlope to convert the response of this method to the actual slope\r\n",
" \"\"\"\r\n",
" resp = self.__GetSomething('RSLP?');\r\n",
" return resp\r\n",
" \r\n",
" def Reset(self):\r\n",
" \"\"\"\r\n",
" Reset the unit to its default configurations[1].\r\n",
" \"\"\"\r\n",
" self.SendString('*RST')\r\n",
" \r\n",
" def ResetDataBuffers(self):\r\n",
" \"\"\"\r\n",
" The REST command resets the data buffers. The REST command can\r\n",
" be sent at any time - any storage in progress, paused or not, will be\r\n",
" reset. This command will erase the data buffer[1].\r\n",
" \"\"\"\r\n",
" self.SendString('REST')\r\n",
" \r\n",
" def GetSelectedOutput(self, which):\r\n",
" \"\"\"\r\n",
" The OUTP? i command reads the value of X, Y, R or θ. The parameter\r\n",
" i selects X (i=1), Y (i=2), R (i=3) or θ (i=4). Values are returned as ASCII\r\n",
" floating point numbers with units of Volts or degrees. For example, the\r\n",
" response might be \"-1.01026\". This command is a query only command[1].\r\n",
" \"\"\"\r\n",
" if which not in (1,2,3,4):\r\n",
" raise Exception(\"GetSelectedOutput: parameter which can only be 1(≙X),2(≙Y),3(≙R) or 4(≙θ)\") \r\n",
" Value = self.__GetSomething('OUTP?' + str(which))\r\n",
" if which == 1:\r\n",
" Type = 'X'\r\n",
" elif which == 2:\r\n",
" Type = 'Y'\r\n",
" elif which == 3:\r\n",
" Type = 'R'\r\n",
" elif which == 4: \r\n",
" Type = 'θ'\r\n",
" \r\n",
" return [float(Value), Type]\r\n",
"\r\n",
" def GetSelectedDisplayValue(self, which):\r\n",
" \"\"\"\r\n",
" The OUTR? i command reads the value of the CH1 or CH2 display.\r\n",
" The parameter i selects the display (i=1 or 2). Values are returned as\r\n",
" ASCII floating point numbers with units of the display. For example, the\r\n",
" response might be \"-1.01026\". This command is a query only command[1].\r\n",
" \"\"\"\r\n",
" if which not in (1, 2):\r\n",
" raise Exception(\"GetSelectedDisplayValue: parameter which can only be 1(≙CH1) or 2(≙CH2)\")\r\n",
" Value = self.__GetSomething('OUTR?' + str(which)) \r\n",
" time.sleep(0.2);\r\n",
" resp = float(Value)\r\n",
" if DEBUG:\r\n",
" print(\"GetSelectedDisplayValue: \" + Value)\r\n",
" return resp\r\n",
" \r\n",
" def __check_snap(self, param):\r\n",
" \"\"\"\r\n",
" internal function used by method SNAP\r\n",
" ensures that the SNAP-params are correct\r\n",
" \"\"\"\r\n",
" if param not in (1,2,3,4,5,6,7,8,9,10,11):\r\n",
" raise Exception(\"SNAP: Parameters can only be 1(≙X), 2(≙Y), 3(≙R), 4(≙θ), 5(≙Aux In 1), 6(≙Aux In 2), 7(≙Aux In 3), 8(≙Aux In 4), 9(≙Reference Frequency), 10(≙CH1 display) or 11(≙CH2 display)\") \r\n",
" \r\n",
" def SNAP(self,Param1,Param2,Param3=None,Param4 =None,Param5=None,Param6=None):\r\n",
" \"\"\"\r\n",
" The SNAP? command records the values of either 2, 3, 4, 5 or 6 param-\r\n",
" eters at a single instant. For example, SNAP? is a way to query values of\r\n",
" X and Y (or R and θ) which are taken at the same time. This is important\r\n",
" when the time constant is very short. Using the OUTP? or OUTR? com-\r\n",
" mands will result in time delays, which may be greater than the time con-\r\n",
" stant, between reading X and Y (or R and θ).\r\n",
" The SNAP? command requires at least two parameters and at most six\r\n",
" parameters. The parameters i, j, k, l, m, n select the parameters below.\r\n",
" \r\n",
" i,j,k,l,m,n parameter\r\n",
" 1 X\r\n",
" 2 Y\r\n",
" 3 R\r\n",
" 4 θ\r\n",
" 5 Aux In 1\r\n",
" 6 Aux In 2\r\n",
" 7 Aux In 3\r\n",
" 8 Aux In 4\r\n",
" 9 Reference Frequency\r\n",
" 10 CH1 display\r\n",
" 11 CH2 display\r\n",
"\r\n",
" The requested values are returned in a single string with the values sep-\r\n",
" arated by commas and in the order in which they were requested. For\r\n",
" example, the SNAP?1,2,9,5 will return the values of X, Y, Freq and\r\n",
" Aux In 1. These values will be returned in a single string such as\r\n",
" \"0.951359,0.0253297,1000.00,1.234\".\r\n",
" The first value is X, the second is Y, the third is f, and the fourth is\r\n",
" Aux In 1.\r\n",
" The values of X and Y are recorded at a single instant. The values of R\r\n",
" and θ are also recorded at a single instant. Thus reading X,Y OR R,θ\r\n",
" yields a coherent snapshot of the output signal. If X,Y,R and θ are all\r\n",
" read, then the values of X,Y are recorded approximately 10μs apart from\r\n",
" R,θ. Thus, the values of X and Y may not yield the exact values of R and\r\n",
" θ from a single SNAP? query.\r\n",
" The values of the Aux Inputs may have an uncertainty of up to 32μs. The\r\n",
" frequency is computed only every other period or 40 ms, whichever is\r\n",
" longer.\r\n",
" \r\n",
" The SNAP? command is a query only command. The SNAP? command\r\n",
" is used to record various parameters simultaneously, not to transfer data\r\n",
" quickly[1].\r\n",
" \"\"\"\r\n",
" self.__check_snap(Param1); \r\n",
" self.__check_snap(Param2); \r\n",
" Cmdstr = 'SNAP?' + ' '+ str(Param1) + ','+ str(Param2);\r\n",
" if Param3 != None:\r\n",
" self.__check_snap(Param3); \r\n",
" Cmdstr += ','+ str(Param3);\r\n",
" if Param4 != None:\r\n",
" self.__check_snap(Param4); \r\n",
" Cmdstr += ','+ str(Param4);\r\n",
" if Param5 != None:\r\n",
" self.__check_snap(Param5); \r\n",
" Cmdstr += ','+ str(Param5);\r\n",
" if Param6 != None:\r\n",
" self.__check_snap(Param6); \r\n",
" Cmdstr += ','+ str(Param6);\r\n",
"\r\n",
" resp = self.__GetSomething(Cmdstr);\r\n",
" \r\n",
" if Param3 is None: # no value, just the command string to query\r\n",
" Val6 = None; Val5 = None; Val4 = None; Val3 = None \r\n",
" [Val1,Val2] = resp.rsplit(',')\r\n",
" elif Param4 is None: \r\n",
" Val6 = None; Val5 =None; Val4 = None \r\n",
" [Val1,Val2,Val3]= resp.rsplit(',')\r\n",
" elif Param5 is None:\r\n",
" Val6 = None; Val5 = None; \r\n",
" [Val1,Val2,Val3,Val4]= resp.rsplit(',')\r\n",
" elif Param6 is None:\r\n",
" Val6 = None \r\n",
" [Val1,Val2,Val3,Val4,Val5]= resp.rsplit(',')\r\n",
" else:\r\n",
" [Val1,Val2,Val3,Val4,Val5, Val6]= resp.rsplit(',')\r\n",
"\r\n",
" return Val1, Val2, Val3, Val4, Val5, Val6, Param1, Param2, Param3, \\\r\n",
" Param4, Param5, Param6\r\n",
"\r\n",
" def GetAuxValue(self, number):\r\n",
" \"\"\"\r\n",
" The OAUX? command reads the Aux Input values. The parameter i\r\n",
" selects an Aux Input (1, 2, 3 or 4) and is required. The Aux Input voltages\r\n",
" are returned as ASCII strings with units of Volts. The resolution is\r\n",
" 1/3 mV. This command is a query only command[1].\r\n",
" \"\"\"\r\n",
" if number not in (1,2,3,4):\r\n",
" raise Exception(\"GetAuxValue: parameter number can only be 1(≙Aux Input 1), 2(≙Aux Input 2), 3(≙Aux Input 3) or 4(≙Aux Input 4)\") \r\n",
" OutAux = self.__GetSomething('OAUX?' + str(number))\r\n",
" return float(OutAux), number\r\n",
"\r\n",
" def GetOccupiedBuffer(self):\r\n",
" \"\"\"\r\n",
" The SPTS? command queries the number of points stored in the buffer.\r\n",
" Both displays have the same number of points. If the buffer is reset, then\r\n",
" 0 is returned. Remember, SPTS? returns N where N is the number of\r\n",
" points - the points are numbered from 0 (oldest) to N-1 (most recent).\r\n",
" The SPTS? command can be sent at any time, even while storage is in\r\n",
" progress. This command is a query only command[1].\r\n",
" \"\"\"\r\n",
" n = self.__GetSomething('SPTS?')\r\n",
" return int(n)\r\n",
"\r\n",
"# commented by WilHo, because this method uses GetOccupiedBuffer with parameter 'which', \r\n",
"# but SPTS? is a query only command for further information see the programming manual\r\n",
"# def GetChannelBufferPoints(self,which,length):\r\n",
"# if which not in (1,2):\r\n",
"# raise Exception(\"which has to be 1 or 2\")\r\n",
"# if length <= 0:\r\n",
"# raise Exception(\"Length hast to be >= 0\") \r\n",
"# length = int(self.GetOccupiedBuffer(which)) - 1\r\n",
"## DataBuffer = [((0:length)]; \r\n",
"# DataBuffer = [] \r\n",
"# for j in range(0,length):\r\n",
"# cmd = 'TRCA? '+str(which)+',' + str(j) + ',1'\r\n",
"# DataBuffer[j] = self.SetOrCheckSomething(cmd, None,0, length, False)\r\n",
"# return DataBuffer[:]\r\n",
"\r\n",
" def close(self):\r\n",
" '''\r\n",
" Close the connection to the Instrument, return controle to instruments \r\n",
" controles and switch off output\r\n",
" '''\r\n",
" if sys.platform.startswith('win'):\r\n",
" self.visa_instr.close()\r\n",
" elif sys.platform.startswith('lin'):\r\n",
" self.inst.clear() #close() not implemented in Gpib.py\r\n",
"\r\n",
"OUT_CLASS = liaSR830"
] | [
0,
0,
0,
0,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.16666666666666666,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856,
0.025,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0.01020408163265306,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.022388059701492536,
0.05737704918032787,
0.16666666666666666,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0.05555555555555555,
0,
0.017543859649122806,
0.020833333333333332,
0,
0.0273972602739726,
0.043478260869565216,
0.06382978723404255,
0.06382978723404255,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.028169014084507043,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0.014285714285714285,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0.021739130434782608,
0,
0,
0.03125,
0,
0,
0.023255813953488372,
0.03125,
0,
0.02631578947368421,
0,
0,
0,
0.047619047619047616,
0,
0.02631578947368421,
0,
0,
0,
0.02702702702702703,
0,
0.09090909090909091,
0.3333333333333333,
0.023809523809523808,
0,
0,
0,
0.09090909090909091,
0.05405405405405406,
0.1,
0.047619047619047616,
0,
0.038461538461538464,
0,
0.1,
0,
0.022727272727272728,
0.02127659574468085,
0.022222222222222223,
0.06382978723404255,
0.05555555555555555,
0.02857142857142857,
0.045454545454545456,
0.045454545454545456,
0.045454545454545456,
0.07142857142857142,
0,
0.04,
0.05555555555555555,
0.029411764705882353,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.034482758620689655,
0.01,
0,
0,
0.047619047619047616,
0,
0,
0.05555555555555555,
0,
0,
0.027777777777777776,
0,
0.0136986301369863,
0,
0.16666666666666666,
0,
0,
0.013333333333333334,
0.015625,
0.015151515151515152,
0,
0,
0,
0.013888888888888888,
0,
0.05555555555555555,
0,
0,
0.027777777777777776,
0,
0.014285714285714285,
0.16666666666666666,
0,
0.023809523809523808,
0,
0.010869565217391304,
0,
0.03225806451612903,
0.08695652173913043,
0.08333333333333333,
0.08333333333333333,
0.09090909090909091,
0.09090909090909091,
0.08695652173913043,
0.08695652173913043,
0.08333333333333333,
0.08333333333333333,
0.05263157894736842,
0.05263157894736842,
0.05,
0.05,
0.047619047619047616,
0.047619047619047616,
0.045454545454545456,
0.045454545454545456,
0.043478260869565216,
0.045454545454545456,
0.09090909090909091,
0,
0,
0.058823529411764705,
0.010638297872340425,
0.1,
0,
0,
0.010869565217391304,
0,
0.03225806451612903,
0.08695652173913043,
0.08333333333333333,
0.08333333333333333,
0.09090909090909091,
0.09090909090909091,
0.08695652173913043,
0.08695652173913043,
0.08333333333333333,
0.08333333333333333,
0.1,
0.1,
0.09523809523809523,
0.09523809523809523,
0.09090909090909091,
0.14285714285714285,
0.08695652173913043,
0.08695652173913043,
0.08333333333333333,
0.08695652173913043,
0.09090909090909091,
0,
0,
0.058823529411764705,
0.009523809523809525,
0,
0,
0.125,
0,
0,
0,
0.023529411764705882,
0,
0,
0,
0.038461538461538464,
0.03571428571428571,
0,
0,
0,
0,
0.05454545454545454,
0,
0,
0.1,
0,
0.01020408163265306,
0.009708737864077669,
0.016666666666666666,
0,
0.02564102564102564,
0.029411764705882353,
0,
0.03125,
0.030303030303030304,
0,
0.1,
0,
0.02631578947368421,
0.023809523809523808,
0,
0.0196078431372549,
0,
0.015625,
0,
0.0196078431372549,
0,
0.1,
0,
0.0273972602739726,
0.03333333333333333,
0.030303030303030304,
0.01639344262295082,
0.043478260869565216,
0.02040816326530612,
0,
0.017857142857142856,
0.023255813953488372,
0.045454545454545456,
0,
0.02040816326530612,
0.020833333333333332,
0.025,
0,
0,
0.02857142857142857,
0.013513513513513514,
0.022222222222222223,
0.1,
0,
0.016666666666666666,
0,
0.0425531914893617,
0,
0.012345679012345678,
0.019230769230769232,
0.022727272727272728,
0,
0,
0.022727272727272728,
0.045454545454545456,
0.02702702702702703,
0,
0.043478260869565216,
0.05263157894736842,
0,
0.16666666666666666,
0,
0.16666666666666666,
0,
0,
0,
0,
0.037037037037037035,
0.017857142857142856,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0.1,
0.01639344262295082,
0.008130081300813009,
0.0078125,
0.009009009009009009,
0,
0,
0,
0,
0,
0.1,
0,
0,
0.020833333333333332,
0.1,
0.014285714285714285,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0.1,
0.0196078431372549,
0.1,
0.025,
0,
0.015151515151515152,
0.013888888888888888,
0.012987012987012988,
0.013333333333333334,
0.023529411764705882,
0.0125,
0.01818181818181818,
0.02,
0,
0,
0.022222222222222223,
0,
0,
0.016129032258064516,
0.03260869565217391,
0.017857142857142856,
0,
0.0136986301369863,
0,
0.03296703296703297,
0,
0,
0.03296703296703297,
0,
0.045454545454545456,
0,
0.06896551724137931,
0.03333333333333333,
0,
0,
0.012195121951219513,
0.023809523809523808,
0.047619047619047616,
0.02564102564102564,
0.00980392156862745,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0.012195121951219513,
0.011764705882352941,
0.012048192771084338,
0,
0.012195121951219513,
0,
0.02631578947368421,
0.009523809523809525,
0,
0,
0.1,
0,
0,
0,
0.1,
0.014285714285714285,
0,
0,
0,
0,
0.1,
0,
0,
0.016666666666666666,
0.013888888888888888,
0,
0,
0,
0,
0,
0.02127659574468085,
0.009174311926605505,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0.1,
0.029411764705882353,
0,
0,
0,
0,
0.030303030303030304,
0.009009009009009009,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.047619047619047616,
0,
0,
0.017543859649122806,
0.16666666666666666,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0.1,
0.03125,
0,
0,
0,
0.011904761904761904,
0.011363636363636364,
0,
0,
0,
0.02564102564102564,
0,
0.010752688172043012,
0.01639344262295082,
0,
0,
0.058823529411764705,
0,
0.1,
0.0784313725490196,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.07407407407407407,
0.02702702702702703,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.005780346820809248,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0.029411764705882353,
0.01,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.02564102564102564,
0,
0.018518518518518517,
0,
0,
0.03125,
0.010526315789473684,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0.012195121951219513,
0,
0,
0.07894736842105263,
0.005747126436781609,
0,
0.1,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1,
0.01639344262295082,
0.008130081300813009,
0.0078125,
0.009009009009009009,
0,
0.019417475728155338,
0.007042253521126761,
0.006944444444444444,
0.006711409395973154,
0.007633587786259542,
0.030303030303030304,
0,
0,
0.058823529411764705,
0,
0.058823529411764705,
0,
0,
0.1,
0,
0.047619047619047616,
0,
0.1,
0,
0.1,
0.01639344262295082,
0.008130081300813009,
0.0078125,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0.05555555555555555,
0.007246376811594203,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0.1,
0.019801980198019802,
0.01,
0.011111111111111112,
0.008264462809917356,
0.1,
0.015625,
0.015748031496062992,
0.0078125,
0,
0.010526315789473684,
0.008264462809917356,
0.008264462809917356,
0.00909090909090909,
0.030303030303030304,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0.016260162601626018,
0,
0,
0.011363636363636364,
0,
0.1,
0.03225806451612903,
0,
0,
0.010309278350515464,
0,
0,
0.006896551724137931,
0.02857142857142857,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.05063291139240506,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0.011904761904761904,
0.011904761904761904,
0,
0,
0.0078125,
0.028985507246376812,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.05263157894736842,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0.012195121951219513,
0.01282051282051282,
0.012195121951219513,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.020833333333333332,
0,
0.058823529411764705,
0.1,
0.05,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0.02631578947368421,
0.06060606060606061,
0.1,
0.07017543859649122,
0,
0.012048192771084338,
0,
0,
0,
0,
0.03278688524590164,
0.0625,
0.031746031746031744,
0,
0,
0.16666666666666666,
0.06896551724137931,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0.03636363636363636,
0,
0,
0.16666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.027777777777777776,
0,
0,
0,
0,
0.030303030303030304,
0.16666666666666666,
0.027777777777777776,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0.1,
0.029411764705882353,
0,
0.02040816326530612,
0,
0,
0,
0,
0.1,
0.041666666666666664,
0,
0,
0,
0,
0,
0.1111111111111111,
0.037037037037037035,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0.1,
0,
0,
0,
0,
0.012048192771084338,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0.012195121951219513,
0.013513513513513514,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.02,
0,
0.011904761904761904,
0,
0,
0.038461538461538464,
0.0196078431372549,
0,
0,
0,
0,
0,
0.034482758620689655,
0.1,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.01,
0.030303030303030304,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.05970149253731343,
0,
0.07142857142857142,
0.07894736842105263,
0.075,
0,
0.018867924528301886,
0.021739130434782608,
0.1,
0,
0,
0.012195121951219513,
0,
0.012195121951219513,
0.012195121951219513,
0,
0,
0.03896103896103896,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0.012195121951219513,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0.02857142857142857,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.05063291139240506,
0,
0.07142857142857142,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.0425531914893617,
0,
0,
0,
0,
0.029411764705882353,
0,
0.023255813953488372,
0,
0,
0.011904761904761904,
0,
0,
0,
0.03225806451612903,
0.010416666666666666,
0,
0.1,
0.045454545454545456,
0,
0,
0,
0,
0,
0.125,
0.03571428571428571,
0,
0,
0,
0,
0,
0.1,
0,
0,
0.013888888888888888,
0.011111111111111112,
0,
0.03225806451612903,
0.006622516556291391,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0.058823529411764705,
0.006134969325153374,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0.03125,
0.07142857142857142,
0.07407407407407407,
0.09090909090909091,
0.02,
0.16666666666666666,
0,
0,
0,
0,
0.010309278350515464,
0,
0.021739130434782608,
0,
0.1,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0.08333333333333333,
0.018691588785046728,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0.009615384615384616,
0.03225806451612903,
0.038461538461538464,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0.19230769230769232,
0.009615384615384616,
0.1,
0.09523809523809523,
0,
0,
0.012048192771084338,
0.012195121951219513,
0,
0.012048192771084338,
0,
0,
0.012195121951219513,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.1,
0,
0.012048192771084338,
0,
0,
0.047619047619047616,
0.05405405405405406,
0.046153846153846156,
0.03571428571428571,
0.04878048780487805,
0.04878048780487805,
0.03571428571428571,
0.04878048780487805,
0.04878048780487805,
0.03571428571428571,
0.04878048780487805,
0.04878048780487805,
0.03571428571428571,
0.04878048780487805,
0.04878048780487805,
0,
0.022222222222222223,
0.1,
0.0136986301369863,
0.06153846153846154,
0.022727272727272728,
0.03225806451612903,
0.07692307692307693,
0.0625,
0,
0.075,
0.07547169811320754,
0,
0.038461538461538464,
0.08620689655172414,
0,
0.078125,
0,
0,
0.025,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.08108108108108109,
0.013888888888888888,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.02197802197802198,
0.011363636363636364,
0,
0,
0,
0,
0.017543859649122806,
0,
0.0425531914893617,
0.029411764705882353,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0.029850746268656716,
0,
0.1
] | 1,351 | 0.0183 | false |
import asyncio
import aiohttp
import bot
from typing import Any, Dict, Optional # noqa: F401
async def getGlobalEmotes() -> Optional[Dict[str, str]]:
url: str = 'https://api.betterttv.net/2/emotes'
response: aiohttp.ClientResponse
try:
async with aiohttp.ClientSession(raise_for_status=True) as session, \
session.get(url, timeout=bot.config.httpTimeout) as response:
if response.status != 200:
return None
bttvData: Dict[str, Any] = await response.json()
emotes: Dict[str, str] = {}
emote: Dict[str, str]
for emote in bttvData['emotes']:
emotes[emote['id']] = emote['code']
return emotes
except aiohttp.ClientResponseError as e:
if e.code == 404:
return {}
except asyncio.TimeoutError:
pass
return None
async def getBroadcasterEmotes(broadcaster: str) -> Optional[Dict[str, str]]:
url: str = 'https://api.betterttv.net/2/channels/' + broadcaster
response: aiohttp.ClientResponse
try:
async with aiohttp.ClientSession(raise_for_status=True) as session, \
session.get(url, timeout=bot.config.httpTimeout) as response:
if response.status != 200:
return None
bttvData: Dict[str, Any] = await response.json()
emotes: Dict[str, str] = {}
emote: Dict[str, str]
for emote in bttvData['emotes']:
emotes[emote['id']] = emote['code']
return emotes
except aiohttp.ClientResponseError as e:
if e.code == 404:
return {}
except asyncio.TimeoutError:
pass
return None
| [
"import asyncio\n",
"\n",
"import aiohttp\n",
"\n",
"import bot\n",
"\n",
"from typing import Any, Dict, Optional # noqa: F401\n",
"\n",
"\n",
"async def getGlobalEmotes() -> Optional[Dict[str, str]]:\n",
" url: str = 'https://api.betterttv.net/2/emotes'\n",
" response: aiohttp.ClientResponse\n",
" try:\n",
" async with aiohttp.ClientSession(raise_for_status=True) as session, \\\n",
" session.get(url, timeout=bot.config.httpTimeout) as response:\n",
" if response.status != 200:\n",
" return None\n",
" bttvData: Dict[str, Any] = await response.json()\n",
" emotes: Dict[str, str] = {}\n",
" emote: Dict[str, str]\n",
" for emote in bttvData['emotes']:\n",
" emotes[emote['id']] = emote['code']\n",
" return emotes\n",
" except aiohttp.ClientResponseError as e:\n",
" if e.code == 404:\n",
" return {}\n",
" except asyncio.TimeoutError:\n",
" pass\n",
" return None\n",
"\n",
"\n",
"async def getBroadcasterEmotes(broadcaster: str) -> Optional[Dict[str, str]]:\n",
" url: str = 'https://api.betterttv.net/2/channels/' + broadcaster\n",
" response: aiohttp.ClientResponse\n",
" try:\n",
" async with aiohttp.ClientSession(raise_for_status=True) as session, \\\n",
" session.get(url, timeout=bot.config.httpTimeout) as response:\n",
" if response.status != 200:\n",
" return None\n",
" bttvData: Dict[str, Any] = await response.json()\n",
" emotes: Dict[str, str] = {}\n",
" emote: Dict[str, str]\n",
" for emote in bttvData['emotes']:\n",
" emotes[emote['id']] = emote['code']\n",
" return emotes\n",
" except aiohttp.ClientResponseError as e:\n",
" if e.code == 404:\n",
" return {}\n",
" except asyncio.TimeoutError:\n",
" pass\n",
" return None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 51 | 0 | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from utilities import execution_path, run_all
import os, mapnik
from glob import glob
default_logging_severity = mapnik.logger.get_severity()
def setup():
mapnik.logger.set_severity(mapnik.severity_type.None)
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def teardown():
mapnik.logger.set_severity(default_logging_severity)
plugin_mapping = {
'.csv' : ['csv'],
'.json': ['geojson','ogr'],
'.tif' : ['gdal'],
#'.tif' : ['gdal','raster'],
'.kml' : ['ogr'],
'.gpx' : ['ogr'],
'.vrt' : ['gdal']
}
def test_opening_data():
# https://github.com/mapbox/mapnik-test-data
# cd tests/data
# git clone --depth 1 https://github.com/mapbox/mapnik-test-data
if os.path.exists('../data/mapnik-test-data/'):
files = glob('../data/mapnik-test-data/data/*/*.*')
for filepath in files:
ext = os.path.splitext(filepath)[1]
if plugin_mapping.get(ext):
print 'testing opening %s' % filepath
if 'topo' in filepath:
kwargs = {'type': 'ogr','file': filepath}
kwargs['layer_by_index'] = 0
try:
ds = mapnik.Datasource(**kwargs)
except Exception, e:
print 'could not open, %s: %s' % (kwargs,e)
else:
for plugin in plugin_mapping[ext]:
kwargs = {'type': plugin,'file': filepath}
if plugin is 'ogr':
kwargs['layer_by_index'] = 0
try:
ds = mapnik.Datasource(**kwargs)
except Exception, e:
print 'could not open, %s: %s' % (kwargs,e)
else:
print 'skipping opening %s' % filepath
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"from nose.tools import *\n",
"from utilities import execution_path, run_all\n",
"import os, mapnik\n",
"from glob import glob\n",
"\n",
"default_logging_severity = mapnik.logger.get_severity()\n",
"\n",
"def setup():\n",
" mapnik.logger.set_severity(mapnik.severity_type.None)\n",
" # All of the paths used are relative, if we run the tests\n",
" # from another directory we need to chdir()\n",
" os.chdir(execution_path('.'))\n",
"\n",
"def teardown():\n",
" mapnik.logger.set_severity(default_logging_severity)\n",
"\n",
"plugin_mapping = {\n",
" '.csv' : ['csv'],\n",
" '.json': ['geojson','ogr'],\n",
" '.tif' : ['gdal'],\n",
" #'.tif' : ['gdal','raster'],\n",
" '.kml' : ['ogr'],\n",
" '.gpx' : ['ogr'],\n",
" '.vrt' : ['gdal']\n",
"}\n",
"\n",
"def test_opening_data():\n",
" # https://github.com/mapbox/mapnik-test-data\n",
" # cd tests/data\n",
" # git clone --depth 1 https://github.com/mapbox/mapnik-test-data\n",
" if os.path.exists('../data/mapnik-test-data/'):\n",
" files = glob('../data/mapnik-test-data/data/*/*.*')\n",
" for filepath in files:\n",
" ext = os.path.splitext(filepath)[1]\n",
" if plugin_mapping.get(ext):\n",
" print 'testing opening %s' % filepath\n",
" if 'topo' in filepath:\n",
" kwargs = {'type': 'ogr','file': filepath}\n",
" kwargs['layer_by_index'] = 0\n",
" try:\n",
" ds = mapnik.Datasource(**kwargs)\n",
" except Exception, e:\n",
" print 'could not open, %s: %s' % (kwargs,e)\n",
" else:\n",
" for plugin in plugin_mapping[ext]:\n",
" kwargs = {'type': plugin,'file': filepath}\n",
" if plugin is 'ogr':\n",
" kwargs['layer_by_index'] = 0\n",
" try:\n",
" ds = mapnik.Datasource(**kwargs)\n",
" except Exception, e:\n",
" print 'could not open, %s: %s' % (kwargs,e)\n",
" else:\n",
" print 'skipping opening %s' % filepath\n",
"\n",
"if __name__ == \"__main__\":\n",
" setup()\n",
" exit(run_all(eval(x) for x in dir() if x.startswith(\"test_\")))\n"
] | [
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0.05263157894736842,
0.045454545454545456,
0.03125,
0.043478260869565216,
0.030303030303030304,
0.045454545454545456,
0.045454545454545456,
0.045454545454545456,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0.014705882352941176,
0,
0.018518518518518517,
0.03076923076923077,
0.023809523809523808,
0.01818181818181818,
0.037037037037037035,
0.01694915254237288,
0.023255813953488372,
0.02857142857142857,
0,
0,
0,
0.037037037037037035,
0,
0
] | 61 | 0.013761 | false |
from steam import WebAPI
from config import SteamAPI
import discord
async def csgo(cmd, message, args):
if not args:
return
csgo_input = ' '.join(args)
try:
api = WebAPI(SteamAPI)
userID = api.call('ISteamUser.ResolveVanityURL', vanityurl=csgo_input, url_type=1)['response']['steamid']
stats = api.call('ISteamUserStats.GetUserStatsForGame', steamid=userID, appid='730')['playerstats']['stats']
summary = api.call('ISteamUser.GetPlayerSummaries', steamids=userID)['response']['players'][0]
nickname = str(summary['personaname'])
avatar_url = str(summary['avatarfull'])
v = 'value'
n = 'name'
stat_bases = {
"total_kills": 0,
"total_deaths": 0,
"total_time_played": 0,
"total_kills_knife": 0,
"total_kills_headshot": 0,
"total_shots_fired": 0,
"total_shots_hit": 0,
"total_rounds_played": 0,
"total_mvps": 0,
"total_matches_won": 0,
"total_matches_played": 0}
for stat in stats:
nam = stat[n]
val = stat[v]
if nam in stat_bases:
stat_bases[nam] = val
kdr = stat_bases['total_kills'] / stat_bases['total_deaths']
accuracy = stat_bases['total_shots_hit'] / stat_bases['total_shots_fired']
total_matches_lost = stat_bases['total_matches_played'] - stat_bases['total_matches_won']
win_percent = stat_bases['total_matches_won'] / stat_bases['total_matches_played']
data = {
'Playtime': str(stat_bases['total_time_played'] // 3600) + ' Hours',
'Kills': str(stat_bases['total_kills']),
'Deaths': str(stat_bases['total_deaths']),
'Kill/Death Ratio': "{0:.2f}".format(kdr),
'Shots Fired': str(stat_bases['total_shots_fired']),
'Shots Hit': str(stat_bases['total_shots_hit']),
'Accuracy': "{0:.2f}".format(accuracy * 100) + '%',
'Headshots': str(stat_bases['total_kills_headshot']),
'Knife Kills': str(stat_bases['total_kills_knife']),
'Rounds Played': str(stat_bases['total_rounds_played']),
'Total MVPs': str(stat_bases['total_mvps']),
'Matches Played': str(stat_bases['total_matches_played']),
'Matches Won': str(stat_bases['total_matches_won']),
'Matches Lost': str(total_matches_lost),
'Win Percentage': "{0:.2f}".format(win_percent * 100) + '%'
}
embed = discord.Embed(color=0x1ABC9C)
embed.set_author(name=nickname, icon_url=avatar_url, url=avatar_url)
for unit in data:
embed.add_field(name=unit, value=data[unit])
await message.channel.send(None, embed=embed)
except Exception as e:
cmd.log.error(e)
await message.channel.send('Something went wrong or the user was not found.')
| [
"from steam import WebAPI\n",
"from config import SteamAPI\n",
"import discord\n",
"\n",
"\n",
"async def csgo(cmd, message, args):\n",
" if not args:\n",
" return\n",
" csgo_input = ' '.join(args)\n",
"\n",
" try:\n",
" api = WebAPI(SteamAPI)\n",
" userID = api.call('ISteamUser.ResolveVanityURL', vanityurl=csgo_input, url_type=1)['response']['steamid']\n",
" stats = api.call('ISteamUserStats.GetUserStatsForGame', steamid=userID, appid='730')['playerstats']['stats']\n",
" summary = api.call('ISteamUser.GetPlayerSummaries', steamids=userID)['response']['players'][0]\n",
"\n",
" nickname = str(summary['personaname'])\n",
" avatar_url = str(summary['avatarfull'])\n",
" v = 'value'\n",
" n = 'name'\n",
" stat_bases = {\n",
" \"total_kills\": 0,\n",
" \"total_deaths\": 0,\n",
" \"total_time_played\": 0,\n",
" \"total_kills_knife\": 0,\n",
" \"total_kills_headshot\": 0,\n",
" \"total_shots_fired\": 0,\n",
" \"total_shots_hit\": 0,\n",
" \"total_rounds_played\": 0,\n",
" \"total_mvps\": 0,\n",
" \"total_matches_won\": 0,\n",
" \"total_matches_played\": 0}\n",
"\n",
" for stat in stats:\n",
" nam = stat[n]\n",
" val = stat[v]\n",
" if nam in stat_bases:\n",
" stat_bases[nam] = val\n",
"\n",
" kdr = stat_bases['total_kills'] / stat_bases['total_deaths']\n",
" accuracy = stat_bases['total_shots_hit'] / stat_bases['total_shots_fired']\n",
" total_matches_lost = stat_bases['total_matches_played'] - stat_bases['total_matches_won']\n",
" win_percent = stat_bases['total_matches_won'] / stat_bases['total_matches_played']\n",
"\n",
" data = {\n",
" 'Playtime': str(stat_bases['total_time_played'] // 3600) + ' Hours',\n",
" 'Kills': str(stat_bases['total_kills']),\n",
" 'Deaths': str(stat_bases['total_deaths']),\n",
" 'Kill/Death Ratio': \"{0:.2f}\".format(kdr),\n",
" 'Shots Fired': str(stat_bases['total_shots_fired']),\n",
" 'Shots Hit': str(stat_bases['total_shots_hit']),\n",
" 'Accuracy': \"{0:.2f}\".format(accuracy * 100) + '%',\n",
" 'Headshots': str(stat_bases['total_kills_headshot']),\n",
" 'Knife Kills': str(stat_bases['total_kills_knife']),\n",
" 'Rounds Played': str(stat_bases['total_rounds_played']),\n",
" 'Total MVPs': str(stat_bases['total_mvps']),\n",
" 'Matches Played': str(stat_bases['total_matches_played']),\n",
" 'Matches Won': str(stat_bases['total_matches_won']),\n",
" 'Matches Lost': str(total_matches_lost),\n",
" 'Win Percentage': \"{0:.2f}\".format(win_percent * 100) + '%'\n",
" }\n",
" embed = discord.Embed(color=0x1ABC9C)\n",
" embed.set_author(name=nickname, icon_url=avatar_url, url=avatar_url)\n",
" for unit in data:\n",
" embed.add_field(name=unit, value=data[unit])\n",
" await message.channel.send(None, embed=embed)\n",
"\n",
" except Exception as e:\n",
" cmd.log.error(e)\n",
" await message.channel.send('Something went wrong or the user was not found.')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0.008547008547008548,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.01020408163265306,
0.01098901098901099,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186
] | 70 | 0.001203 | false |
import os
import sqlite3
#######################
# TRANSFER OBJECTS
#######################
####################################################################################################
class Partition:
def __init__(self):
self.pk = None
self.idx = None
self.uid = None
self.num_edges = None
self.instr = None
self.opt = None
self.exec_time = None
self.num_axioms = None
self.stdev = None
####################################################################################################
class Subgraph:
def __init__(self):
self.pk = None
self.partition_pk = None
self.idx = None
self.freq = None
self.exec_time = None
self.stdev = None
####################################################################################################
class Edge:
def __init__(self):
self.pk = None
self.partition_pk = None
self.idx = None
self.type = None
self.freq = None
self.exec_time = None
self.subgraph_pk = None
self.stdev = None
###########
# MISC
###########
####################################################################################################
def assembleWhereClause(fields, prefix = ""):
return str.join(" and ", [(prefix + key) + " = " + fieldToStr(fields[key]) for key in iter(fields)])
####################################################################################################
def fieldToStr(field):
if field == None:
return "null"
elif type(field) is str:
return "'" + field + "'"
elif type(field) is bool:
if field == True:
return "1"
else:
return "0"
else:
return str(field)
####################################################################################################
def intToBool(int):
if int == 0:
return False
else:
return True
####################################################################################################
class Database:
connection = None
@staticmethod
def open(name):
Database.connection = sqlite3.connect(name + ".db")
Database.connection.row_factory = sqlite3.Row
Database.connection.execute('pragma foreign_keys=ON;')
foreign_keys = Database.connection.execute('pragma foreign_keys;').fetchone()[0]
if foreign_keys != 1:
raise Exception("foreign_keys != 1")
# autocommit by default
Database.connection.isolation_level = None
@staticmethod
def execute(sql, params):
if Database.connection == None:
raise Exception("Database.connection == None")
rowcount = Database.connection.execute(sql, params).rowcount
return rowcount
@staticmethod
def executeAndReturnLastRowId(sql, params):
if Database.connection == None:
raise Exception("Database.connection == None")
lastrowid = Database.connection.execute(sql, params).lastrowid
return lastrowid
@staticmethod
def fetch(sql):
if Database.connection == None:
raise Exception("Database.connection == None")
cursor = Database.connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
cursor.close()
return rows
@staticmethod
def fetchOne(sql):
if Database.connection == None:
raise Exception("connection == None")
cursor = Database.connection.cursor()
cursor.execute(sql)
row = cursor.fetchone()
cursor.close()
return row
@staticmethod
def __create(name, script_filename):
Database.open(name)
if script_filename != None:
if not os.path.exists(script_filename):
raise Exception("script \"{0}\" not found".format(script_filename))
with open(script_filename, "r") as script_file:
script = script_file.read()
Database.connection.executescript(script)
Database.close()
@staticmethod
def createIfNeeded(name, script_filename=None):
if os.path.exists(name + ".db"):
return
Database.__create(name, script_filename)
@staticmethod
def create(name, script_filename=None):
if os.path.exists(name + ".db"):
os.remove(name + ".db")
Database.__create(name, script_filename)
@staticmethod
def close():
if Database.connection != None:
Database.connection.close()
##########################
# DATA ACCESS OBJECTS
##########################
####################################################################################################
class PartitionDAO:
@staticmethod
def insert(partition):
if partition.pk != None:
raise Exception("partition.pk != None")
lastrowid = Database.executeAndReturnLastRowId("insert into partition (idx, uid, num_edges, instr, opt, exec_time, stdev, num_axioms) values (?, ?, ?, ?, ?, ?, ?, ?);", [partition.idx, partition.uid, partition.num_edges, partition.instr, partition.opt, partition.exec_time, partition.stdev, partition.num_axioms])
partition.pk = lastrowid
@staticmethod
def update(partition):
if partition.pk == None:
raise Exception("partition.pk != None")
rowcount = Database.execute("update partition set idx=?, uid=?, num_edges=?, instr=?, opt=?, exec_time=?, num_axioms=?, stdev=?, where pk=?;", [partition.idx, partition.uid, partition.num_edges, partition.instr, partition.opt, partition.exec_time, partition.num_axioms, partition.stdev, partition.pk])
if rowcount == 0:
raise Exception("rowcount == 0")
@staticmethod
def __fromRow(row):
if row == None:
raise Exception("row == None")
result = Partition()
result.pk = row["pk"]
result.idx = row["idx"]
result.uid = row["uid"]
result.num_edges = row["num_edges"]
result.instr = intToBool(row["instr"])
result.opt = row["opt"]
result.exec_time = row["exec_time"]
result.stdev = row["stdev"]
result.num_axioms = row["num_axioms"]
return result
@staticmethod
def list():
rows = Database.fetch("select pk, idx, uid, num_edges, instr, opt, exec_time, num_axioms, stdev from partition;")
results = []
for row in rows:
results.append(PartitionDAO.__fromRow(row))
return results
@staticmethod
def find(fields):
if len(fields) == 0:
raise Exception("len(fields) == 0")
return PartitionDAO.__fromRow(Database.fetchOne("select pk, idx, uid, num_edges, instr, opt, exec_time, stdev, num_axioms from partition where " + assembleWhereClause(fields) + ";"))
@staticmethod
def count():
return Database.fetchOne("select count(*) from partition;")[0]
@staticmethod
def lightWeightList():
rows = Database.fetch("select idx, uid, num_axioms from partition;")
results = []
for row in rows:
partition = Partition()
partition.idx = row["idx"]
partition.uid = row["uid"]
partition.num_axioms= row["num_axioms"]
results.append(partition)
return results
@staticmethod
def globalMinMaxNumAxioms(fields):
if len(fields) == 0:
raise Exception("len(fields) == 0")
row = Database.fetchOne("select min(num_axioms) as min, max(num_axioms) as max from (select num_axioms from partition where " + assembleWhereClause(fields) + ");")
return (row["min"], row["max"])
@staticmethod
def execTimes(where, orderBy = ["instr", "opt"]):
if len(where) == 0:
raise Exception("len(fields) == 0")
rows = Database.fetch("select exec_time from partition where " + assembleWhereClause(where) + " order by " + str.join(", ", orderBy) + ";")
results = []
for row in rows:
results.append(row["exec_time"])
return results
####################################################################################################
class SubgraphDAO:
@staticmethod
def insert(subgraph):
if subgraph.pk != None:
raise Exception("subgraph.pk != None")
lastrowid = Database.executeAndReturnLastRowId("insert into subgraph (partition_pk, idx, freq, exec_time, stdev) values (?, ?, ?, ?, ?);", [subgraph.partition_pk, subgraph.idx, subgraph.freq, subgraph.exec_time, subgraph.stdev])
subgraph.pk = lastrowid
@staticmethod
def update(subgraph):
if subgraph.pk == None:
raise Exception("subgraph.pk != None")
rowcount = Database.execute("update subgraph set idx=?, freq=?, exec_time=?, stdev=? where pk=?;", [subgraph.idx, subgraph.freq, subgraph.exec_time, subgraph.stdev, subgraph.pk])
if rowcount == 0:
raise Exception("rowcount == 0")
@staticmethod
def __fromRow(row):
if row == None:
raise Exception("row == None")
result = Subgraph()
result.pk = row["pk"]
result.partition_pk = row["partition_pk"]
result.idx = row["idx"]
result.freq = row["freq"]
result.exec_time = row["exec_time"]
result.stdev = row["stdev"]
return result
@staticmethod
def list():
rows = Database.fetch("select pk, partition_pk, idx, freq, exec_time, stdev from subgraph order by partition_pk, idx;")
results = []
for row in rows:
results.append(SubgraphDAO.__fromRow(row))
return results
@staticmethod
def find(fields):
if len(fields) == 0:
raise Exception("len(fields) == 0")
return SubgraphDAO.__fromRow(Database.fetchOne("select pk, partition_pk, idx, freq, exec_time, stdev from subgraph from subgraph where " + assembleWhereClause(fields) + " order by partition_pk, idx;"))
@staticmethod
def fromPartition(fields):
rows = Database.fetch("select pk, partition_pk, idx, freq, exec_time, stdev from subgraph where partition_pk = (select pk from partition where " + assembleWhereClause(fields) + ") order by partition_pk, idx;")
results = []
for row in rows:
results.append(SubgraphDAO.__fromRow(row))
return results
@staticmethod
def count():
return Database.fetchOne("select count(*) from subgraph;")[0]
####################################################################################################
class EdgeDAO:
@staticmethod
def insert(edge):
if edge.pk != None:
raise Exception("edge.pk != None")
rowcount = Database.execute("insert into edge (partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk) values (?, ?, ?, ?, ?, ?, ?);", [edge.partition_pk, edge.idx, edge.freq, edge.exec_time, edge.stdev, edge.type, edge.subgraph_pk])
if rowcount == 0:
raise Exception("rowcount == 0")
@staticmethod
def update(edge):
if edge.pk == None:
raise Exception("edge.pk != None")
lastrowid = Database.executeAndReturnLastRowId("update edge set idx=?, freq=?, exec_time=?, stdev=?, type=?, subgraph_pk = ? where pk=?;", [edge.idx, edge.freq, edge.exec_time, edge.stdev, edge.type, edge.subgraph_pk, edge.pk])
edge.pk = lastrowid
@staticmethod
def __fromRow(row):
if row == None:
raise Exception("row == None")
result = Edge()
result.pk = row["pk"]
result.partition_pk = row["partition_pk"]
result.idx = row["idx"]
result.freq = row["freq"]
result.exec_time = row["exec_time"]
result.stdev = row["stdev"]
result.type = row["type"]
result.subgraph_pk = row["subgraph_pk"]
return result
@staticmethod
def list():
rows = Database.fetch("select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge order by partition_pk, idx;")
results = []
for row in rows:
results.append(EdgeDAO.__fromRow(row))
return results
@staticmethod
def find(fields):
if len(fields) == 0:
raise Exception("len(fields) == 0")
return EdgeDAO.__fromRow(Database.fetchOne("select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge from edge where " + assembleWhereClause(fields) + " order by partition_pk, idx;"))
@staticmethod
def fromPartition(fields):
rows = Database.fetch("select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge where partition_pk = (select pk from partition where " + assembleWhereClause(fields) + ") order by partition_pk, idx;")
results = []
for row in rows:
results.append(EdgeDAO.__fromRow(row))
return results
@staticmethod
def count():
return Database.fetchOne("select count(*) from edge;")[0]
| [
"import os\n",
"import sqlite3\n",
"\n",
"#######################\n",
"# TRANSFER OBJECTS\n",
"#######################\n",
"\n",
"####################################################################################################\n",
"class Partition:\n",
" def __init__(self):\n",
" self.pk = None\n",
" self.idx = None\n",
" self.uid = None\n",
" self.num_edges = None\n",
" self.instr = None\n",
" self.opt = None\n",
" self.exec_time = None\n",
" self.num_axioms = None\n",
" self.stdev = None\n",
"\n",
"####################################################################################################\n",
"class Subgraph:\n",
" def __init__(self):\n",
" self.pk = None\n",
" self.partition_pk = None\n",
" self.idx = None\n",
" self.freq = None\n",
" self.exec_time = None\n",
" self.stdev = None\n",
"\n",
"####################################################################################################\n",
"class Edge:\n",
" def __init__(self):\n",
" self.pk = None\n",
" self.partition_pk = None\n",
" self.idx = None\n",
" self.type = None\n",
" self.freq = None\n",
" self.exec_time = None\n",
" self.subgraph_pk = None\n",
" self.stdev = None\n",
"\n",
"###########\n",
"# MISC\n",
"###########\n",
"\n",
"####################################################################################################\n",
"def assembleWhereClause(fields, prefix = \"\"):\n",
" return str.join(\" and \", [(prefix + key) + \" = \" + fieldToStr(fields[key]) for key in iter(fields)])\n",
"\n",
"####################################################################################################\n",
"def fieldToStr(field):\n",
" if field == None:\n",
" return \"null\"\n",
" elif type(field) is str:\n",
" return \"'\" + field + \"'\"\n",
" elif type(field) is bool:\n",
" if field == True:\n",
" return \"1\"\n",
" else:\n",
" return \"0\"\n",
" else:\n",
" return str(field)\n",
"\n",
"####################################################################################################\n",
"def intToBool(int):\n",
" if int == 0:\n",
" return False\n",
" else:\n",
" return True\n",
"\n",
"####################################################################################################\n",
"class Database:\n",
" connection = None\n",
"\n",
" @staticmethod\n",
" def open(name):\n",
" Database.connection = sqlite3.connect(name + \".db\")\n",
" Database.connection.row_factory = sqlite3.Row\n",
" Database.connection.execute('pragma foreign_keys=ON;')\n",
" foreign_keys = Database.connection.execute('pragma foreign_keys;').fetchone()[0]\n",
" if foreign_keys != 1:\n",
" raise Exception(\"foreign_keys != 1\")\n",
" # autocommit by default\n",
" Database.connection.isolation_level = None\n",
"\n",
" @staticmethod\n",
" def execute(sql, params):\n",
" if Database.connection == None:\n",
" raise Exception(\"Database.connection == None\")\n",
"\n",
" rowcount = Database.connection.execute(sql, params).rowcount\n",
" return rowcount\n",
"\n",
" @staticmethod\n",
" def executeAndReturnLastRowId(sql, params):\n",
" if Database.connection == None:\n",
" raise Exception(\"Database.connection == None\")\n",
"\n",
" lastrowid = Database.connection.execute(sql, params).lastrowid\n",
" return lastrowid\n",
"\n",
" @staticmethod\n",
" def fetch(sql):\n",
" if Database.connection == None:\n",
" raise Exception(\"Database.connection == None\")\n",
"\n",
" cursor = Database.connection.cursor()\n",
" cursor.execute(sql)\n",
" rows = cursor.fetchall()\n",
" cursor.close()\n",
" return rows\n",
"\n",
" @staticmethod\n",
" def fetchOne(sql):\n",
" if Database.connection == None:\n",
" raise Exception(\"connection == None\")\n",
"\n",
" cursor = Database.connection.cursor()\n",
" cursor.execute(sql)\n",
" row = cursor.fetchone()\n",
" cursor.close()\n",
" return row\n",
"\n",
" @staticmethod\n",
" def __create(name, script_filename):\n",
" Database.open(name)\n",
" if script_filename != None:\n",
" if not os.path.exists(script_filename):\n",
" raise Exception(\"script \\\"{0}\\\" not found\".format(script_filename))\n",
" with open(script_filename, \"r\") as script_file:\n",
" script = script_file.read()\n",
" Database.connection.executescript(script)\n",
" Database.close()\n",
"\n",
" @staticmethod\n",
" def createIfNeeded(name, script_filename=None):\n",
" if os.path.exists(name + \".db\"):\n",
" return\n",
" Database.__create(name, script_filename)\n",
"\n",
" @staticmethod\n",
" def create(name, script_filename=None):\n",
" if os.path.exists(name + \".db\"):\n",
" os.remove(name + \".db\")\n",
" Database.__create(name, script_filename)\n",
"\n",
" @staticmethod\n",
" def close():\n",
" if Database.connection != None:\n",
" Database.connection.close()\n",
"\n",
"##########################\n",
"# DATA ACCESS OBJECTS\n",
"##########################\n",
"\n",
"####################################################################################################\n",
"class PartitionDAO:\n",
" @staticmethod\n",
" def insert(partition):\n",
" if partition.pk != None:\n",
" raise Exception(\"partition.pk != None\")\n",
" lastrowid = Database.executeAndReturnLastRowId(\"insert into partition (idx, uid, num_edges, instr, opt, exec_time, stdev, num_axioms) values (?, ?, ?, ?, ?, ?, ?, ?);\", [partition.idx, partition.uid, partition.num_edges, partition.instr, partition.opt, partition.exec_time, partition.stdev, partition.num_axioms])\n",
" partition.pk = lastrowid\n",
"\n",
" @staticmethod\n",
" def update(partition):\n",
" if partition.pk == None:\n",
" raise Exception(\"partition.pk != None\")\n",
" rowcount = Database.execute(\"update partition set idx=?, uid=?, num_edges=?, instr=?, opt=?, exec_time=?, num_axioms=?, stdev=?, where pk=?;\", [partition.idx, partition.uid, partition.num_edges, partition.instr, partition.opt, partition.exec_time, partition.num_axioms, partition.stdev, partition.pk])\n",
" if rowcount == 0:\n",
" raise Exception(\"rowcount == 0\")\n",
"\n",
" @staticmethod\n",
" def __fromRow(row):\n",
" if row == None:\n",
" raise Exception(\"row == None\")\n",
" result = Partition()\n",
" result.pk = row[\"pk\"]\n",
" result.idx = row[\"idx\"]\n",
" result.uid = row[\"uid\"]\n",
" result.num_edges = row[\"num_edges\"]\n",
" result.instr = intToBool(row[\"instr\"])\n",
" result.opt = row[\"opt\"]\n",
" result.exec_time = row[\"exec_time\"]\n",
" result.stdev = row[\"stdev\"]\n",
" result.num_axioms = row[\"num_axioms\"]\n",
" return result\n",
"\n",
" @staticmethod\n",
" def list():\n",
" rows = Database.fetch(\"select pk, idx, uid, num_edges, instr, opt, exec_time, num_axioms, stdev from partition;\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(PartitionDAO.__fromRow(row))\n",
" return results\n",
"\n",
" @staticmethod\n",
" def find(fields):\n",
" if len(fields) == 0:\n",
" raise Exception(\"len(fields) == 0\")\n",
" return PartitionDAO.__fromRow(Database.fetchOne(\"select pk, idx, uid, num_edges, instr, opt, exec_time, stdev, num_axioms from partition where \" + assembleWhereClause(fields) + \";\"))\n",
"\n",
" @staticmethod\n",
" def count():\n",
" return Database.fetchOne(\"select count(*) from partition;\")[0]\n",
"\n",
" @staticmethod\n",
" def lightWeightList():\n",
" rows = Database.fetch(\"select idx, uid, num_axioms from partition;\")\n",
" results = []\n",
" for row in rows:\n",
" partition = Partition()\n",
" partition.idx = row[\"idx\"]\n",
" partition.uid = row[\"uid\"]\n",
" partition.num_axioms= row[\"num_axioms\"]\n",
" results.append(partition)\n",
" return results\n",
"\n",
" @staticmethod\n",
" def globalMinMaxNumAxioms(fields):\n",
" if len(fields) == 0:\n",
" raise Exception(\"len(fields) == 0\")\n",
" row = Database.fetchOne(\"select min(num_axioms) as min, max(num_axioms) as max from (select num_axioms from partition where \" + assembleWhereClause(fields) + \");\")\n",
" return (row[\"min\"], row[\"max\"])\n",
"\n",
" @staticmethod\n",
" def execTimes(where, orderBy = [\"instr\", \"opt\"]):\n",
" if len(where) == 0:\n",
" raise Exception(\"len(fields) == 0\")\n",
" rows = Database.fetch(\"select exec_time from partition where \" + assembleWhereClause(where) + \" order by \" + str.join(\", \", orderBy) + \";\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(row[\"exec_time\"])\n",
" return results\n",
"\n",
"####################################################################################################\n",
"class SubgraphDAO:\n",
" @staticmethod\n",
" def insert(subgraph):\n",
" if subgraph.pk != None:\n",
" raise Exception(\"subgraph.pk != None\")\n",
" lastrowid = Database.executeAndReturnLastRowId(\"insert into subgraph (partition_pk, idx, freq, exec_time, stdev) values (?, ?, ?, ?, ?);\", [subgraph.partition_pk, subgraph.idx, subgraph.freq, subgraph.exec_time, subgraph.stdev])\n",
" subgraph.pk = lastrowid\n",
"\n",
" @staticmethod\n",
" def update(subgraph):\n",
" if subgraph.pk == None:\n",
" raise Exception(\"subgraph.pk != None\")\n",
" rowcount = Database.execute(\"update subgraph set idx=?, freq=?, exec_time=?, stdev=? where pk=?;\", [subgraph.idx, subgraph.freq, subgraph.exec_time, subgraph.stdev, subgraph.pk])\n",
" if rowcount == 0:\n",
" raise Exception(\"rowcount == 0\")\n",
"\n",
" @staticmethod\n",
" def __fromRow(row):\n",
" if row == None:\n",
" raise Exception(\"row == None\")\n",
" result = Subgraph()\n",
" result.pk = row[\"pk\"]\n",
" result.partition_pk = row[\"partition_pk\"]\n",
" result.idx = row[\"idx\"]\n",
" result.freq = row[\"freq\"]\n",
" result.exec_time = row[\"exec_time\"]\n",
" result.stdev = row[\"stdev\"]\n",
" return result\n",
"\n",
" @staticmethod\n",
" def list():\n",
" rows = Database.fetch(\"select pk, partition_pk, idx, freq, exec_time, stdev from subgraph order by partition_pk, idx;\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(SubgraphDAO.__fromRow(row))\n",
" return results\n",
"\n",
" @staticmethod\n",
" def find(fields):\n",
" if len(fields) == 0:\n",
" raise Exception(\"len(fields) == 0\")\n",
" return SubgraphDAO.__fromRow(Database.fetchOne(\"select pk, partition_pk, idx, freq, exec_time, stdev from subgraph from subgraph where \" + assembleWhereClause(fields) + \" order by partition_pk, idx;\"))\n",
"\n",
" @staticmethod\n",
" def fromPartition(fields):\n",
" rows = Database.fetch(\"select pk, partition_pk, idx, freq, exec_time, stdev from subgraph where partition_pk = (select pk from partition where \" + assembleWhereClause(fields) + \") order by partition_pk, idx;\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(SubgraphDAO.__fromRow(row))\n",
" return results\n",
"\n",
" @staticmethod\n",
" def count():\n",
" return Database.fetchOne(\"select count(*) from subgraph;\")[0]\n",
" \n",
"####################################################################################################\n",
"class EdgeDAO:\n",
" @staticmethod\n",
" def insert(edge):\n",
" if edge.pk != None:\n",
" raise Exception(\"edge.pk != None\")\n",
" rowcount = Database.execute(\"insert into edge (partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk) values (?, ?, ?, ?, ?, ?, ?);\", [edge.partition_pk, edge.idx, edge.freq, edge.exec_time, edge.stdev, edge.type, edge.subgraph_pk])\n",
" if rowcount == 0:\n",
" raise Exception(\"rowcount == 0\")\n",
"\n",
" @staticmethod\n",
" def update(edge):\n",
" if edge.pk == None:\n",
" raise Exception(\"edge.pk != None\")\n",
" lastrowid = Database.executeAndReturnLastRowId(\"update edge set idx=?, freq=?, exec_time=?, stdev=?, type=?, subgraph_pk = ? where pk=?;\", [edge.idx, edge.freq, edge.exec_time, edge.stdev, edge.type, edge.subgraph_pk, edge.pk])\n",
" edge.pk = lastrowid\n",
"\n",
" @staticmethod\n",
" def __fromRow(row):\n",
" if row == None:\n",
" raise Exception(\"row == None\")\n",
" result = Edge()\n",
" result.pk = row[\"pk\"]\n",
" result.partition_pk = row[\"partition_pk\"]\n",
" result.idx = row[\"idx\"]\n",
" result.freq = row[\"freq\"]\n",
" result.exec_time = row[\"exec_time\"]\n",
" result.stdev = row[\"stdev\"]\n",
" result.type = row[\"type\"]\n",
" result.subgraph_pk = row[\"subgraph_pk\"]\n",
" return result\n",
"\n",
" @staticmethod\n",
" def list():\n",
" rows = Database.fetch(\"select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge order by partition_pk, idx;\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(EdgeDAO.__fromRow(row))\n",
" return results\n",
"\n",
" @staticmethod\n",
" def find(fields):\n",
" if len(fields) == 0:\n",
" raise Exception(\"len(fields) == 0\")\n",
" return EdgeDAO.__fromRow(Database.fetchOne(\"select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge from edge where \" + assembleWhereClause(fields) + \" order by partition_pk, idx;\"))\n",
"\n",
" @staticmethod\n",
" def fromPartition(fields):\n",
" rows = Database.fetch(\"select pk, partition_pk, idx, freq, exec_time, stdev, type, subgraph_pk from edge where partition_pk = (select pk from partition where \" + assembleWhereClause(fields) + \") order by partition_pk, idx;\")\n",
" results = []\n",
" for row in rows:\n",
" results.append(EdgeDAO.__fromRow(row))\n",
" return results\n",
"\n",
" @staticmethod\n",
" def count():\n",
" return Database.fetchOne(\"select count(*) from edge;\")[0]\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.06521739130434782,
0.009523809523809525,
0,
0.009900990099009901,
0.043478260869565216,
0.045454545454545456,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.05,
0,
0,
0,
0,
0,
0.009900990099009901,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.05,
0,
0,
0.030303030303030304,
0,
0.003105590062111801,
0,
0,
0,
0,
0.030303030303030304,
0,
0.0032258064516129032,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.005235602094240838,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0.005813953488372093,
0,
0,
0,
0.037037037037037035,
0,
0,
0.006756756756756757,
0,
0,
0,
0,
0,
0.009900990099009901,
0.05263157894736842,
0,
0,
0.03125,
0,
0.004219409282700422,
0,
0,
0,
0,
0.03125,
0,
0.0053475935828877,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0078125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.004761904761904762,
0,
0,
0,
0.0045871559633027525,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.009900990099009901,
0.06666666666666667,
0,
0,
0.03571428571428571,
0,
0.004032258064516129,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0.00423728813559322,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.006993006993006993,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.004608294930875576,
0,
0,
0,
0.004291845493562232,
0,
0,
0,
0,
0,
0,
0,
0
] | 349 | 0.004649 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import date, timedelta, datetime
from System.Collections.Generic import List
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
import decimal as d
import numpy as np
import math
import json
### <summary>
### In this algortihm we show how you can easily use the universe selection feature to fetch symbols
### to be traded using the BaseData custom data system in combination with the AddUniverse{T} method.
### AddUniverse{T} requires a function that will return the symbols to be traded.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="custom universes" />
class DropboxBaseDataUniverseSelectionAlgorithm(QCAlgorithm):
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Daily;
self.SetStartDate(2013,1,1)
self.SetEndDate(2013,12,31)
self.AddUniverse(StockDataSource, "my-stock-data-source", self.stockDataSource)
def stockDataSource(self, data):
list = []
for item in data:
for symbol in item["Symbols"]:
list.append(symbol)
return list
def OnData(self, slice):
if slice.Bars.Count == 0: return
if self._changes == SecurityChanges.None: return
# start fresh
self.Liquidate()
percentage = 1 / d.Decimal(slice.Bars.Count)
for tradeBar in slice.Bars.Values:
self.SetHoldings(tradeBar.Symbol, percentage)
# reset changes
self._changes = SecurityChanges.None
def OnSecuritiesChanged(self, changes):
self._changes = changes
class StockDataSource(PythonData):
def GetSource(self, config, date, isLiveMode):
url = "https://www.dropbox.com/s/2az14r5xbx4w5j6/daily-stock-picker-live.csv?dl=1" if isLiveMode else \
"https://www.dropbox.com/s/rmiiktz0ntpff3a/daily-stock-picker-backtest.csv?dl=1"
return SubscriptionDataSource(url, SubscriptionTransportMedium.RemoteFile)
def Reader(self, config, line, date, isLiveMode):
if not (line.strip() and line[0].isdigit()): return None
stocks = StockDataSource()
stocks.Symbol = config.Symbol
csv = line.split(',')
if isLiveMode:
stocks.Time = date
stocks["Symbols"] = csv
else:
stocks.Time = datetime.strptime(csv[0], "%Y%m%d")
stocks["Symbols"] = csv[1:]
return stocks | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Data import SubscriptionDataSource\n",
"from QuantConnect.Python import PythonData\n",
"from datetime import date, timedelta, datetime\n",
"from System.Collections.Generic import List\n",
"from QuantConnect.Algorithm import QCAlgorithm\n",
"from QuantConnect.Data.UniverseSelection import *\n",
"import decimal as d\n",
"import numpy as np\n",
"import math\n",
"import json\n",
"\n",
"### <summary>\n",
"### In this algortihm we show how you can easily use the universe selection feature to fetch symbols\n",
"### to be traded using the BaseData custom data system in combination with the AddUniverse{T} method.\n",
"### AddUniverse{T} requires a function that will return the symbols to be traded.\n",
"### </summary>\n",
"### <meta name=\"tag\" content=\"using data\" />\n",
"### <meta name=\"tag\" content=\"universes\" />\n",
"### <meta name=\"tag\" content=\"custom universes\" />\n",
"class DropboxBaseDataUniverseSelectionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
"\n",
" self.UniverseSettings.Resolution = Resolution.Daily;\n",
"\n",
" self.SetStartDate(2013,1,1)\n",
" self.SetEndDate(2013,12,31)\n",
" \n",
" self.AddUniverse(StockDataSource, \"my-stock-data-source\", self.stockDataSource)\n",
" \n",
" def stockDataSource(self, data):\n",
" list = []\n",
" for item in data:\n",
" for symbol in item[\"Symbols\"]:\n",
" list.append(symbol)\n",
" return list\n",
"\n",
" def OnData(self, slice):\n",
"\n",
" if slice.Bars.Count == 0: return\n",
" if self._changes == SecurityChanges.None: return\n",
" \n",
" # start fresh\n",
" self.Liquidate()\n",
"\n",
" percentage = 1 / d.Decimal(slice.Bars.Count)\n",
" for tradeBar in slice.Bars.Values:\n",
" self.SetHoldings(tradeBar.Symbol, percentage)\n",
" \n",
" # reset changes\n",
" self._changes = SecurityChanges.None\n",
" \n",
" def OnSecuritiesChanged(self, changes):\n",
" self._changes = changes\n",
" \n",
"class StockDataSource(PythonData):\n",
" \n",
" def GetSource(self, config, date, isLiveMode):\n",
" url = \"https://www.dropbox.com/s/2az14r5xbx4w5j6/daily-stock-picker-live.csv?dl=1\" if isLiveMode else \\\n",
" \"https://www.dropbox.com/s/rmiiktz0ntpff3a/daily-stock-picker-backtest.csv?dl=1\"\n",
"\n",
" return SubscriptionDataSource(url, SubscriptionTransportMedium.RemoteFile)\n",
" \n",
" def Reader(self, config, line, date, isLiveMode):\n",
" if not (line.strip() and line[0].isdigit()): return None\n",
" \n",
" stocks = StockDataSource()\n",
" stocks.Symbol = config.Symbol\n",
" \n",
" csv = line.split(',')\n",
" if isLiveMode:\n",
" stocks.Time = date\n",
" stocks[\"Symbols\"] = csv\n",
" else:\n",
" stocks.Time = datetime.strptime(csv[0], \"%Y%m%d\")\n",
" stocks[\"Symbols\"] = csv[1:]\n",
" return stocks"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0.018867924528301886,
0.023255813953488372,
0.02127659574468085,
0.022727272727272728,
0.02127659574468085,
0.02,
0.05,
0.05263157894736842,
0.08333333333333333,
0.08333333333333333,
0,
0.07142857142857142,
0.019801980198019802,
0.0196078431372549,
0.024390243902439025,
0.06666666666666667,
0.022222222222222223,
0.022727272727272728,
0.0196078431372549,
0.016129032258064516,
0,
0,
0,
0.01639344262295082,
0,
0.05555555555555555,
0.05555555555555555,
0.1111111111111111,
0.011363636363636364,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0.017543859649122806,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0.1111111111111111,
0.02857142857142857,
0.2,
0,
0.008928571428571428,
0.010752688172043012,
0,
0.012048192771084338,
0.2,
0,
0.015384615384615385,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 98 | 0.026386 | false |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""CUDA implementation of n-dimensional Cartesian spaces."""
from builtins import int
import numpy as np
from odl.set.sets import RealNumbers
from odl.space.base_ntuples import FnBase, FnBaseVector
from odl.space.weighting import (
Weighting, ArrayWeighting, ConstWeighting, NoWeighting,
CustomInner, CustomNorm, CustomDist)
from odl.util.utility import dtype_repr, signature_string
from odlcuda.ufuncs import CudaFnUfuncs
try:
import odlcuda.odlcuda_ as backend
CUDA_AVAILABLE = True
except ImportError:
backend = None
CUDA_AVAILABLE = False
__all__ = ('CudaFn', 'CudaFnVector',
'CUDA_DTYPES', 'CUDA_AVAILABLE',
'CudaFnConstWeighting', 'CudaFnArrayWeighting')
def _get_int_type():
"""Return the correct int vector type on the current platform."""
if np.dtype(np.int).itemsize == 4:
return 'CudaVectorInt32'
elif np.dtype(np.int).itemsize == 8:
return 'CudaVectorInt64'
else:
return 'CudaVectorIntNOT_AVAILABLE'
def _add_if_exists(dtype, name):
"""Add ``dtype`` to ``CUDA_DTYPES`` if it's available."""
if hasattr(backend, name):
_TYPE_MAP_NPY2CUDA[np.dtype(dtype)] = getattr(backend, name)
CUDA_DTYPES.append(np.dtype(dtype))
# A list of all available dtypes
CUDA_DTYPES = []
# Typemap from numpy dtype to implementations
_TYPE_MAP_NPY2CUDA = {}
# Initialize the available dtypes
_add_if_exists(np.float, 'CudaVectorFloat64')
_add_if_exists(np.float32, 'CudaVectorFloat32')
_add_if_exists(np.float64, 'CudaVectorFloat64')
_add_if_exists(np.int, _get_int_type())
_add_if_exists(np.int8, 'CudaVectorInt8')
_add_if_exists(np.int16, 'CudaVectorInt16')
_add_if_exists(np.int32, 'CudaVectorInt32')
_add_if_exists(np.int64, 'CudaVectorInt64')
_add_if_exists(np.uint8, 'CudaVectorUInt8')
_add_if_exists(np.uint16, 'CudaVectorUInt16')
_add_if_exists(np.uint32, 'CudaVectorUInt32')
_add_if_exists(np.uint64, 'CudaVectorUInt64')
CUDA_DTYPES = list(set(CUDA_DTYPES)) # Remove duplicates
class CudaFn(FnBase):
"""The space `FnBase`, implemented in CUDA.
Requires the compiled ODL extension ``odlcuda``.
"""
def __init__(self, size, dtype='float32', **kwargs):
"""Initialize a new instance.
Parameters
----------
size : positive `int`
The number of dimensions of the space
dtype : `object`
The data type of the storage array. Can be provided in any
way the `numpy.dtype` function understands, most notably
as built-in type, as one of NumPy's internal datatype
objects or as string.
Only scalar data types are allowed.
weighting : optional
Use weighted inner product, norm, and dist. The following
types are supported as ``weight``:
`FnWeightingBase` :
Use this weighting as-is. Compatibility with this
space's elements is not checked during init.
`float` :
Weighting by a constant
`array-like` :
Weighting by a vector (1-dim. array, corresponds to
a diagonal matrix). Note that the array is stored in
main memory, which results in slower space functions
due to a copy during evaluation.
`CudaFnVector` :
same as 1-dim. array-like, except that copying is
avoided if the ``dtype`` of the vector is the
same as this space's ``dtype``.
Default: no weighting
This option cannot be combined with ``dist``, ``norm``
or ``inner``.
exponent : positive `float`, optional
Exponent of the norm. For values other than 2.0, no
inner product is defined.
This option is ignored if ``dist``, ``norm`` or
``inner`` is given.
Default: 2.0
dist : `callable`, optional
The distance function defining a metric on the space.
It must accept two `FnVector` arguments and
fulfill the following mathematical conditions for any
three vectors ``x, y, z``:
- ``dist(x, y) >= 0``
- ``dist(x, y) = 0`` if and only if ``x = y``
- ``dist(x, y) = dist(y, x)``
- ``dist(x, y) <= dist(x, z) + dist(z, y)``
This option cannot be combined with ``weight``,
``norm`` or ``inner``.
norm : `callable`, optional
The norm implementation. It must accept an
`FnVector` argument, return a `float` and satisfy the
following conditions for all vectors ``x, y`` and scalars
``s``:
- ``||x|| >= 0``
- ``||x|| = 0`` if and only if ``x = 0``
- ``||s * x|| = |s| * ||x||``
- ``||x + y|| <= ||x|| + ||y||``
By default, ``norm(x)`` is calculated as ``inner(x, x)``.
This option cannot be combined with ``weight``,
``dist`` or ``inner``.
inner : `callable`, optional
The inner product implementation. It must accept two
`FnVector` arguments, return a element from
the field of the space (real or complex number) and
satisfy the following conditions for all vectors
``x, y, z`` and scalars ``s``:
- ``<x, y> = conj(<y, x>)``
- ``<s*x + y, z> = s * <x, z> + <y, z>``
- ``<x, x> = 0`` if and only if ``x = 0``
This option cannot be combined with ``weight``,
``dist`` or ``norm``.
"""
if np.dtype(dtype) not in _TYPE_MAP_NPY2CUDA.keys():
raise TypeError('data type {!r} not supported in CUDA'
''.format(dtype))
super(CudaFn, self).__init__(size, dtype)
self._vector_impl = _TYPE_MAP_NPY2CUDA[self.dtype]
dist = kwargs.pop('dist', None)
norm = kwargs.pop('norm', None)
inner = kwargs.pop('inner', None)
weighting = kwargs.pop('weighting', None)
exponent = kwargs.pop('exponent', 2.0)
# Check validity of option combination (3 or 4 out of 4 must be None)
if sum(x is None for x in (dist, norm, inner, weighting)) < 3:
raise ValueError('invalid combination of options `weight`, '
'`dist`, `norm` and `inner`')
if weighting is not None:
if isinstance(weighting, Weighting):
self.__weighting = weighting
elif np.isscalar(weighting):
self.__weighting = CudaFnConstWeighting(
weighting, exponent=exponent)
elif isinstance(weighting, CudaFnVector):
self.__weighting = CudaFnArrayWeighting(
weighting, exponent=exponent)
else:
# Must make a CudaFnVector from the array
weighting = self.element(np.asarray(weighting))
if weighting.ndim == 1:
self.__weighting = CudaFnArrayWeighting(
weighting, exponent=exponent)
else:
raise ValueError('invalid weighting argument {!r}'
''.format(weighting))
elif dist is not None:
self.__weighting = CudaFnCustomDist(dist)
elif norm is not None:
self.__weighting = CudaFnCustomNorm(norm)
elif inner is not None:
# Use fast dist implementation
self.__weighting = CudaFnCustomInner(inner)
else: # all None -> no weighing
self.__weighting = CudaFnNoWeighting(exponent)
@property
def exponent(self):
"""Exponent of the norm and distance."""
return self.weighting.exponent
@property
def weighting(self):
"""This space's weighting scheme."""
return self.__weighting
@property
def is_weighted(self):
"""Return `True` if the weighting is not `CudaFnNoWeighting`."""
return not isinstance(self.weighting, CudaFnNoWeighting)
def element(self, inp=None, data_ptr=None):
"""Create a new element.
Parameters
----------
inp : `array-like` or scalar, optional
Input to initialize the new element.
If ``inp`` is a `numpy.ndarray` of shape ``(size,)``
and the same data type as this space, the array is wrapped,
not copied.
Other array-like objects are copied (with broadcasting
if necessary).
If a single value is given, it is copied to all entries.
If both ``inp`` and ``data_ptr`` are `None`, an empty
element is created with no guarantee of its state
(memory allocation only).
data_ptr : `int`, optional
Memory address of a CUDA array container
Cannot be combined with ``inp``.
Returns
-------
element : `CudaFnVector`
The new element
Notes
-----
This method preserves "array views" of correct size and type,
see the examples below.
TODO: No, it does not yet!
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> x = uc3.element(np.array([1, 2, 3], dtype='uint8'))
>>> x
CudaFn(3, 'uint8').element([1, 2, 3])
>>> y = uc3.element([1, 2, 3])
>>> y
CudaFn(3, 'uint8').element([1, 2, 3])
"""
if inp is None:
if data_ptr is None:
return self.element_type(self, self._vector_impl(self.size))
else: # TODO: handle non-1 length strides
return self.element_type(
self, self._vector_impl.from_pointer(data_ptr, self.size,
1))
else:
if data_ptr is None:
if isinstance(inp, self._vector_impl):
return self.element_type(self, inp)
elif isinstance(inp, self.element_type):
if inp in self:
return inp
else:
# Bulk-copy for non-matching dtypes
elem = self.element()
elem[:] = inp
return elem
else:
# Array-like input. Need to go through a NumPy array
arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=1)
if arr.shape != (self.size,):
raise ValueError('expected input shape {}, got {}'
''.format((self.size,), arr.shape))
elem = self.element()
elem[:] = arr
return elem
else:
raise ValueError('cannot provide both `inp` and `data_ptr`')
def _lincomb(self, a, x1, b, x2, out):
"""Linear combination of ``x1`` and ``x2``, assigned to ``out``.
Calculate ``z = a * x + b * y`` using optimized CUDA routines.
Parameters
----------
a, b : `field` element
Scalar to multiply ``x`` and ``y`` with.
x, y : `CudaFnVector`
The summands.
out : `CudaFnVector`
The Vector that the result is written to.
Returns
-------
`None`
Examples
--------
>>> r3 = CudaFn(3)
>>> x = r3.element([1, 2, 3])
>>> y = r3.element([4, 5, 6])
>>> out = r3.element()
>>> r3.lincomb(2, x, 3, y, out) # out is returned
CudaFn(3).element([14.0, 19.0, 24.0])
>>> out
CudaFn(3).element([14.0, 19.0, 24.0])
"""
out.data.lincomb(a, x1.data, b, x2.data)
def _inner(self, x1, x2):
"""Calculate the inner product of x and y.
Parameters
----------
x1, x2 : `CudaFnVector`
Returns
-------
inner: `float` or `complex`
The inner product of x and y
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> x = uc3.element([1, 2, 3])
>>> y = uc3.element([3, 1, 5])
>>> uc3.inner(x, y)
20.0
"""
return self.weighting.inner(x1, x2)
def _integral(self, x):
"""Raw integral of vector.
Parameters
----------
x : `CudaFnVector`
The vector whose integral should be computed.
Returns
-------
inner : `field` element
Inner product of the vectors
Examples
--------
>>> r3 = CudaFn(2, dtype='float32')
>>> x = r3.element([3, -1])
>>> r3.integral(x)
2.0
Notes
-----
Integration of vectors is defined as the sum of the elements
of the vector, i.e. the discrete measure.
In weighted spaces, the unweighted measure is used for the integral.
"""
return x.ufuncs.sum()
def _dist(self, x1, x2):
"""Calculate the distance between two vectors.
Parameters
----------
x1, x2 : `CudaFnVector`
The vectors whose mutual distance is calculated
Returns
-------
dist : `float`
Distance between the vectors
Examples
--------
>>> r2 = CudaFn(2)
>>> x = r2.element([3, 8])
>>> y = r2.element([0, 4])
>>> r2.dist(x, y)
5.0
"""
return self.weighting.dist(x1, x2)
def _norm(self, x):
"""Calculate the norm of ``x``.
This method is implemented separately from ``sqrt(inner(x,x))``
for efficiency reasons.
Parameters
----------
x : `CudaFnVector`
Returns
-------
norm : `float`
The norm of x
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> x = uc3.element([2, 3, 6])
>>> uc3.norm(x)
7.0
"""
return self.weighting.norm(x)
def _multiply(self, x1, x2, out):
"""The pointwise product of two vectors, assigned to ``out``.
This is defined as:
multiply(x, y, out) := [x[0]*y[0], x[1]*y[1], ..., x[n-1]*y[n-1]]
Parameters
----------
x1, x2 : `CudaFnVector`
Factors in product
out : `CudaFnVector`
Element to which the result is written
Returns
-------
`None`
Examples
--------
>>> rn = CudaFn(3)
>>> x1 = rn.element([5, 3, 2])
>>> x2 = rn.element([1, 2, 3])
>>> out = rn.element()
>>> rn.multiply(x1, x2, out) # out is returned
CudaFn(3).element([5.0, 6.0, 6.0])
>>> out
CudaFn(3).element([5.0, 6.0, 6.0])
"""
out.data.multiply(x1.data, x2.data)
def _divide(self, x1, x2, out):
"""The pointwise division of two vectors, assigned to ``out``.
This is defined as:
multiply(z, x, y) := [x[0]/y[0], x[1]/y[1], ..., x[n-1]/y[n-1]]
Parameters
----------
x1, x2 : `CudaFnVector`
Factors in the product
out : `CudaFnVector`
Element to which the result is written
Returns
-------
None
Examples
--------
>>> rn = CudaFn(3)
>>> x1 = rn.element([5, 3, 2])
>>> x2 = rn.element([1, 2, 2])
>>> out = rn.element()
>>> rn.divide(x1, x2, out) # out is returned
CudaFn(3).element([5.0, 1.5, 1.0])
>>> out
CudaFn(3).element([5.0, 1.5, 1.0])
"""
out.data.divide(x1.data, x2.data)
def zero(self):
"""Create a vector of zeros."""
return self.element_type(self, self._vector_impl(self.size, 0))
def one(self):
"""Create a vector of ones."""
return self.element_type(self, self._vector_impl(self.size, 1))
def __eq__(self, other):
"""s.__eq__(other) <==> s == other.
Returns
-------
equals : `bool`
`True` if other is an instance of this space's type
with the same ``size``, ``dtype`` and space functions,
otherwise `False`.
Examples
--------
>>> from numpy.linalg import norm
>>> def dist(x, y, p):
... return norm(x - y, ord=p)
>>> from functools import partial
>>> dist2 = partial(dist, p=2)
>>> r3 = CudaFn(3, dist=dist2)
>>> r3_same = CudaFn(3, dist=dist2)
>>> r3 == r3_same
True
Different ``dist`` functions result in different spaces - the
same applies for ``norm`` and ``inner``:
>>> dist1 = partial(dist, p=1)
>>> r3_1 = CudaFn(3, dist=dist1)
>>> r3_2 = CudaFn(3, dist=dist2)
>>> r3_1 == r3_2
False
Be careful with Lambdas - they result in non-identical function
objects:
>>> r3_lambda1 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1))
>>> r3_lambda2 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1))
>>> r3_lambda1 == r3_lambda2
False
"""
if other is self:
return True
return (super(CudaFn, self).__eq__(other) and
self.weighting == other.weighting)
@property
def impl(self):
"""Name of the implementation: ``'cuda'``."""
return 'cuda'
@staticmethod
def available_dtypes():
"""Return the available data types."""
return CUDA_DTYPES
@staticmethod
def default_dtype(field=None):
"""Return the default of `CudaFn` data type for a given field.
Parameters
----------
field : `Field`
Set of numbers to be represented by a data type.
Currently supported: `RealNumbers`.
Returns
-------
dtype : `type`
Numpy data type specifier. The returned defaults are:
``RealNumbers()`` : , ``np.dtype('float32')``
"""
if field is None or field == RealNumbers():
return np.dtype('float32')
else:
raise ValueError('no default data type defined for field {}'
''.format(field))
def __repr__(self):
"""Return ``repr(self)``."""
if self.is_real:
ctor = 'rn'
elif self.is_complex:
ctor = 'cn'
else:
ctor = 'fn'
posargs = [self.size]
default_dtype_str = dtype_repr(self.default_dtype(self.field))
optargs = [('dtype', dtype_repr(self.dtype), default_dtype_str),
('impl', self.impl, 'numpy')]
inner_str = signature_string(posargs, optargs)
weight_str = self.weighting.repr_part
if weight_str:
inner_str += ', ' + weight_str
return '{}({})'.format(ctor, inner_str)
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
@property
def element_type(self):
""" `CudaFnVector` """
return CudaFnVector
class CudaFnVector(FnBaseVector):
"""Representation of a `CudaFn` element."""
def __init__(self, space, data):
"""Initialize a new instance."""
super(CudaFnVector, self).__init__(space)
self.__data = data
@property
def data(self):
"""The data container of this vector, type ``CudaFnImplVector``."""
return self.__data
@property
def data_ptr(self):
"""A raw pointer to the data of this vector."""
return self.data.data_ptr()
def __eq__(self, other):
"""Return ``self == other``.
Returns
-------
equals : `bool`
`True` if all elements of ``other`` are equal to this
vector's elements, `False` otherwise
Examples
--------
>>> r3 = CudaFn(3, 'float32')
>>> x = r3.element([1, 2, 3])
>>> x == x
True
>>> y = r3.element([1, 2, 3])
>>> x == y
True
>>> y = r3.element([0, 0, 0])
>>> x == y
False
>>> r3_2 = CudaFn(3, 'uint8')
>>> z = r3_2.element([1, 2, 3])
>>> x != z
True
"""
if other is self:
return True
elif other not in self.space:
return False
else:
return self.data == other.data
def copy(self):
"""Create an identical (deep) copy of this vector.
Returns
-------
copy : `CudaFnVector`
The deep copy
Examples
--------
>>> vec1 = CudaFn(3, 'uint8').element([1, 2, 3])
>>> vec2 = vec1.copy()
>>> vec2
CudaFn(3, 'uint8').element([1, 2, 3])
>>> vec1 == vec2
True
>>> vec1 is vec2
False
"""
return self.space.element_type(self.space, self.data.copy())
def asarray(self, start=None, stop=None, step=None, out=None):
"""Extract the data of this array as a numpy array.
Parameters
----------
start : `int`, optional
Start position. None means the first element.
start : `int`, optional
One element past the last element to be extracted.
None means the last element.
start : `int`, optional
Step length. None means 1.
out : `numpy.ndarray`
Array in which the result should be written in-place.
Has to be contiguous and of the correct dtype.
Returns
-------
asarray : `numpy.ndarray`
Numpy array of the same type as the space.
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> y = uc3.element([1, 2, 3])
>>> y.asarray()
array([1, 2, 3], dtype=uint8)
>>> y.asarray(1, 3)
array([2, 3], dtype=uint8)
Using the out parameter
>>> out = np.empty((3,), dtype='uint8')
>>> result = y.asarray(out=out)
>>> out
array([1, 2, 3], dtype=uint8)
>>> result is out
True
"""
if out is None:
return self.data.get_to_host(slice(start, stop, step))
else:
self.data.copy_device_to_host(slice(start, stop, step), out)
return out
def __getitem__(self, indices):
"""Access values of this vector.
This will cause the values to be copied to CPU
which is a slow operation.
Parameters
----------
indices : `int` or `slice`
The position(s) that should be accessed
Returns
-------
values : scalar or `CudaFnVector`
The value(s) at the index (indices)
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> y = uc3.element([1, 2, 3])
>>> y[0]
1
>>> z = y[1:3]
>>> z
CudaFn(2, 'uint8').element([2, 3])
>>> y[::2]
CudaFn(2, 'uint8').element([1, 3])
>>> y[::-1]
CudaFn(3, 'uint8').element([3, 2, 1])
The returned value is a view, modifications are reflected
in the original data:
>>> z[:] = [4, 5]
>>> y
CudaFn(3, 'uint8').element([1, 4, 5])
"""
if isinstance(indices, slice):
data = self.data.getslice(indices)
return type(self.space)(data.size, data.dtype).element(data)
else:
return self.data.__getitem__(indices)
def __setitem__(self, indices, values):
"""Set values of this vector.
This will cause the values to be copied to CPU
which is a slow operation.
Parameters
----------
indices : `int` or `slice`
The position(s) that should be set
values : scalar, `array-like` or `CudaFnVector`
The value(s) that are to be assigned.
If ``index`` is an `int`, ``value`` must be single value.
If ``index`` is a `slice`, ``value`` must be broadcastable
to the size of the slice (same size, shape (1,)
or single value).
Returns
-------
`None`
Examples
--------
>>> uc3 = CudaFn(3, 'uint8')
>>> y = uc3.element([1, 2, 3])
>>> y[0] = 5
>>> y
CudaFn(3, 'uint8').element([5, 2, 3])
>>> y[1:3] = [7, 8]
>>> y
CudaFn(3, 'uint8').element([5, 7, 8])
>>> y[:] = np.array([0, 0, 0])
>>> y
CudaFn(3, 'uint8').element([0, 0, 0])
Scalar assignment
>>> y[:] = 5
>>> y
CudaFn(3, 'uint8').element([5, 5, 5])
"""
if (isinstance(values, type(self)) and
indices in (slice(None), Ellipsis)):
self.assign(values) # use lincomb magic
else:
if isinstance(indices, slice):
# Convert value to the correct type if needed
value_array = np.asarray(values, dtype=self.space.dtype)
if value_array.ndim == 0:
self.data.fill(values)
else:
# Size checking is performed in c++
self.data.setslice(indices, value_array)
else:
self.data[int(indices)] = values
@property
def ufuncs(self):
"""`CudaFnUfuncs`, access to numpy style ufuncs.
Examples
--------
>>> r2 = CudaFn(2)
>>> x = r2.element([1, -2])
>>> x.ufuncs.absolute()
CudaFn(2).element([1.0, 2.0])
These functions can also be used with broadcasting
>>> x.ufuncs.add(3)
CudaFn(2).element([4.0, 1.0])
and non-space elements
>>> x.ufuncs.subtract([3, 3])
CudaFn(2).element([-2.0, -5.0])
There is also support for various reductions (sum, prod, min, max)
>>> x.ufuncs.sum()
-1.0
Also supports out parameter
>>> y = r2.element([3, 4])
>>> out = r2.element()
>>> result = x.ufuncs.add(y, out=out)
>>> result
CudaFn(2).element([4.0, 2.0])
>>> result is out
True
Notes
-----
Not all ufuncs are currently optimized, some use the default numpy
implementation. This can be improved in the future.
See also
--------
odl.util.ufuncs.FnBaseUfuncs
Base class for ufuncs in `FnBase` spaces.
"""
return CudaFnUfuncs(self)
def _weighting(weighting, exponent):
"""Return a weighting whose type is inferred from the arguments."""
if np.isscalar(weighting):
weighting = CudaFnConstWeighting(
weighting, exponent)
elif isinstance(weighting, CudaFnVector):
weighting = CudaFnArrayWeighting(
weighting, exponent=exponent)
else:
weight_ = np.asarray(weighting)
if weight_.dtype == object:
raise ValueError('bad weighting {}'.format(weighting))
if weight_.ndim == 1:
weighting = CudaFnArrayWeighting(
weight_, exponent)
elif weight_.ndim == 2:
raise NotImplementedError('matrix weighting not implemented '
'for CUDA spaces')
# weighting = CudaFnMatrixWeighting(
# weight_, exponent)
else:
raise ValueError('array-like weight must have 1 or 2 dimensions, '
'but {} has {} dimensions'
''.format(weighting, weighting.ndim))
return weighting
def _dist_default(x1, x2):
"""Default Euclidean distance implementation."""
return x1.data.dist(x2.data)
def _pdist_default(x1, x2, p):
"""Default p-distance implementation."""
if p == float('inf'):
raise NotImplementedError('inf-norm not implemented')
return x1.data.dist_power(x2.data, p)
def _pdist_diagweight(x1, x2, p, w):
"""Diagonally weighted p-distance implementation."""
return x1.data.dist_weight(x2.data, p, w.data)
def _norm_default(x):
"""Default Euclidean norm implementation."""
return x.data.norm()
def _pnorm_default(x, p):
"""Default p-norm implementation."""
if p == float('inf'):
raise NotImplementedError('inf-norm not implemented')
return x.data.norm_power(p)
def _pnorm_diagweight(x, p, w):
"""Diagonally weighted p-norm implementation."""
if p == float('inf'):
raise NotImplementedError('inf-norm not implemented')
return x.data.norm_weight(p, w.data)
def _inner_default(x1, x2):
"""Default Euclidean inner product implementation."""
return x1.data.inner(x2.data)
def _inner_diagweight(x1, x2, w):
return x1.data.inner_weight(x2.data, w.data)
class CudaFnArrayWeighting(ArrayWeighting):
"""Vector weighting for `CudaFn`.
For exponent 2.0, a new weighted inner product with vector ``w``
is defined as::
<a, b>_w := <w * a, b> = b^H (w * a)
with ``b^H`` standing for transposed complex conjugate and
``w * a`` for element-wise multiplication.
For other exponents, only norm and dist are defined. In the case of
exponent ``inf``, the weighted norm is
||a||_{w, inf} := ||w * a||_inf
otherwise it is::
||a||_{w, p} := ||w^{1/p} * a||
Note that this definition does **not** fulfill the limit property
in ``p``, i.e.::
||x||_{w, p} --/-> ||x||_{w, inf} for p --> inf
unless ``w = (1,...,1)``.
The vector may only have positive entries, otherwise it does not
define an inner product or norm, respectively. This is not checked
during initialization.
"""
def __init__(self, vector, exponent=2.0):
"""Initialize a new instance.
Parameters
----------
vector : `CudaFnVector`
Weighting vector of the inner product, norm and distance
exponent : positive `float`
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
"""
if not isinstance(vector, CudaFnVector):
raise TypeError('vector {!r} is not a CudaFnVector instance'
''.format(vector))
super(CudaFnArrayWeighting, self).__init__(
vector, impl='cuda', exponent=exponent)
def inner(self, x1, x2):
"""Calculate the vector weighted inner product of two vectors.
Parameters
----------
x1, x2 : `CudaFnVector`
Vectors whose inner product is calculated
Returns
-------
inner : `float` or `complex`
The inner product of the two provided vectors
"""
if self.exponent != 2.0:
raise NotImplementedError('No inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
else:
return _inner_diagweight(x1, x2, self.array)
def norm(self, x):
"""Calculate the vector-weighted norm of a vector.
Parameters
----------
x : `CudaFnVector`
Vector whose norm is calculated
Returns
-------
norm : `float`
The norm of the provided vector
"""
if self.exponent == float('inf'):
raise NotImplementedError('inf norm not implemented yet')
else:
return _pnorm_diagweight(x, self.exponent, self.array)
def dist(self, x1, x2):
"""Calculate the vector-weighted distance between two vectors.
Parameters
----------
x1, x2 : `CudaFnVector`
Vectors whose mutual distance is calculated
Returns
-------
dist : `float`
The distance between the vectors
"""
if self.exponent == float('inf'):
raise NotImplementedError('inf norm not implemented yet')
else:
return _pdist_diagweight(x1, x2, self.exponent, self.array)
class CudaFnConstWeighting(ConstWeighting):
"""Weighting of `CudaFn` by a constant.
For exponent 2.0, a new weighted inner product with constant
``c`` is defined as::
<a, b>_c = c * <a, b> = c * b^H a
with ``b^H`` standing for transposed complex conjugate.
For other exponents, only norm and dist are defined. In the case of
exponent ``inf``, the weighted norm is defined as::
||a||_{c, inf} := c ||a||_inf
otherwise it is::
||a||_{c, p} := c^{1/p} ||a||_p
Note that this definition does **not** fulfill the limit property
in ``p``, i.e.::
||a||_{c,p} --/-> ||a||_{c,inf} for p --> inf
unless ``c = 1``.
The constant must be positive, otherwise it does not define an
inner product or norm, respectively.
"""
def __init__(self, constant, exponent=2.0):
"""Initialize a new instance.
Parameters
----------
constant : positive finite `float`
Weighting constant of the inner product.
exponent : positive `float`
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
"""
super(CudaFnConstWeighting, self).__init__(
constant, impl='cuda', exponent=exponent)
def inner(self, x1, x2):
"""Calculate the constant-weighted inner product of two vectors.
Parameters
----------
x1, x2 : `CudaFnVector`
Vectors whose inner product is calculated
Returns
-------
inner : `float` or `complex`
The inner product of the two vectors
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
else:
return self.const * _inner_default(x1, x2)
def norm(self, x):
"""Calculate the constant-weighted norm of a vector.
Parameters
----------
x1 : `CudaFnVector`
Vector whose norm is calculated
Returns
-------
norm : `float`
The norm of the vector
"""
if self.exponent == float('inf'):
raise NotImplementedError
# Example impl
# return self.const * float(_pnorm_default(x, self.exponent))
else:
return (self.const ** (1 / self.exponent) *
float(_pnorm_default(x, self.exponent)))
def dist(self, x1, x2):
"""Calculate the constant-weighted distance between two vectors.
Parameters
----------
x1, x2 : `CudaFnVector`
Vectors whose mutual distance is calculated
Returns
-------
dist : `float`
The distance between the vectors
"""
if self.exponent == float('inf'):
raise NotImplementedError
else:
return (self.const ** (1 / self.exponent) *
_pdist_default(x1, x2, self.exponent))
class CudaFnNoWeighting(NoWeighting, CudaFnConstWeighting):
"""Weighting of `CudaFn` with constant 1.
For exponent 2.0, the unweighted inner product is defined as::
<a, b> := b^H a
with ``b^H`` standing for transposed complex conjugate.
For other exponents, only norm and dist are defined.
"""
# Implement singleton pattern for efficiency in the default case
_instance = None
def __new__(cls, *args, **kwargs):
"""Implement singleton pattern if ``exponent==2.0``."""
if len(args) == 0:
exponent = kwargs.pop('exponent', 2.0)
else:
exponent = args[0]
args = args[1:]
if exponent == 2.0:
if not cls._instance:
cls._instance = super(CudaFnConstWeighting, cls).__new__(
cls, *args, **kwargs)
return cls._instance
else:
return super(CudaFnConstWeighting, cls).__new__(
cls, *args, **kwargs)
def __init__(self, exponent=2.0):
"""Initialize a new instance.
Parameters
----------
exponent : positive `float`
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
"""
super(CudaFnNoWeighting, self).__init__(exponent=exponent, impl='cuda')
class CudaFnCustomInner(CustomInner):
"""Class for handling a user-specified inner product on `CudaFn`."""
def __init__(self, inner):
"""Initialize a new instance.
Parameters
----------
inner : `callable`
The inner product implementation. It must accept two
`FnVector` arguments, return an element from their space's
field (real or complex number) and satisfy the following
conditions for all vectors ``x, y, z`` and scalars ``s``:
- ``<x, y> = conj(<y, x>)``
- ``<s*x + y, z> = s * <x, z> + <y, z>``
- ``<x, x> = 0`` if and only if ``x = 0``
"""
super(CudaFnCustomInner, self).__init__(inner, impl='cuda')
class CudaFnCustomNorm(CustomNorm):
"""Class for handling a user-specified norm in `CudaFn`.
Note that this removes ``inner``.
"""
def __init__(self, norm):
"""Initialize a new instance.
Parameters
----------
norm : `callable`
The norm implementation. It must accept a `CudaFnVector`
argument, return a `float` and satisfy the following
conditions for all vectors ``x, y`` and scalars ``s``:
- ``||x|| >= 0``
- ``||x|| = 0`` if and only if ``x = 0``
- ``||s * x|| = |s| * ||x||``
- ``||x + y|| <= ||x|| + ||y||``
"""
super(CudaFnCustomNorm, self).__init__(norm, impl='cuda')
class CudaFnCustomDist(CustomDist):
"""Class for handling a user-specified distance in `CudaFn`.
Note that this removes ``inner`` and ``norm``.
"""
def __init__(self, dist):
"""Initialize a new instance.
Parameters
----------
dist : `callable`
The distance function defining a metric on `Fn`. It must
accept two `FnVector` arguments, return a `float` and and
fulfill the following mathematical conditions for any three
vectors ``x, y, z``:
- ``dist(x, y) >= 0``
- ``dist(x, y) = 0`` if and only if ``x = y``
- ``dist(x, y) = dist(y, x)``
- ``dist(x, y) <= dist(x, z) + dist(z, y)``
"""
super(CudaFnCustomDist, self).__init__(dist, impl='cuda')
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from odl.util.testutils import run_doctests
run_doctests()
| [
"# Copyright 2014-2016 The ODL development group\n",
"#\n",
"# This file is part of ODL.\n",
"#\n",
"# ODL is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# ODL is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with ODL. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\"\"\"CUDA implementation of n-dimensional Cartesian spaces.\"\"\"\n",
"\n",
"from builtins import int\n",
"import numpy as np\n",
"\n",
"from odl.set.sets import RealNumbers\n",
"from odl.space.base_ntuples import FnBase, FnBaseVector\n",
"from odl.space.weighting import (\n",
" Weighting, ArrayWeighting, ConstWeighting, NoWeighting,\n",
" CustomInner, CustomNorm, CustomDist)\n",
"from odl.util.utility import dtype_repr, signature_string\n",
"\n",
"from odlcuda.ufuncs import CudaFnUfuncs\n",
"\n",
"try:\n",
" import odlcuda.odlcuda_ as backend\n",
" CUDA_AVAILABLE = True\n",
"except ImportError:\n",
" backend = None\n",
" CUDA_AVAILABLE = False\n",
"\n",
"\n",
"__all__ = ('CudaFn', 'CudaFnVector',\n",
" 'CUDA_DTYPES', 'CUDA_AVAILABLE',\n",
" 'CudaFnConstWeighting', 'CudaFnArrayWeighting')\n",
"\n",
"\n",
"def _get_int_type():\n",
" \"\"\"Return the correct int vector type on the current platform.\"\"\"\n",
" if np.dtype(np.int).itemsize == 4:\n",
" return 'CudaVectorInt32'\n",
" elif np.dtype(np.int).itemsize == 8:\n",
" return 'CudaVectorInt64'\n",
" else:\n",
" return 'CudaVectorIntNOT_AVAILABLE'\n",
"\n",
"\n",
"def _add_if_exists(dtype, name):\n",
" \"\"\"Add ``dtype`` to ``CUDA_DTYPES`` if it's available.\"\"\"\n",
" if hasattr(backend, name):\n",
" _TYPE_MAP_NPY2CUDA[np.dtype(dtype)] = getattr(backend, name)\n",
" CUDA_DTYPES.append(np.dtype(dtype))\n",
"\n",
"\n",
"# A list of all available dtypes\n",
"CUDA_DTYPES = []\n",
"\n",
"# Typemap from numpy dtype to implementations\n",
"_TYPE_MAP_NPY2CUDA = {}\n",
"\n",
"# Initialize the available dtypes\n",
"_add_if_exists(np.float, 'CudaVectorFloat64')\n",
"_add_if_exists(np.float32, 'CudaVectorFloat32')\n",
"_add_if_exists(np.float64, 'CudaVectorFloat64')\n",
"_add_if_exists(np.int, _get_int_type())\n",
"_add_if_exists(np.int8, 'CudaVectorInt8')\n",
"_add_if_exists(np.int16, 'CudaVectorInt16')\n",
"_add_if_exists(np.int32, 'CudaVectorInt32')\n",
"_add_if_exists(np.int64, 'CudaVectorInt64')\n",
"_add_if_exists(np.uint8, 'CudaVectorUInt8')\n",
"_add_if_exists(np.uint16, 'CudaVectorUInt16')\n",
"_add_if_exists(np.uint32, 'CudaVectorUInt32')\n",
"_add_if_exists(np.uint64, 'CudaVectorUInt64')\n",
"CUDA_DTYPES = list(set(CUDA_DTYPES)) # Remove duplicates\n",
"\n",
"\n",
"class CudaFn(FnBase):\n",
"\n",
" \"\"\"The space `FnBase`, implemented in CUDA.\n",
"\n",
" Requires the compiled ODL extension ``odlcuda``.\n",
" \"\"\"\n",
"\n",
" def __init__(self, size, dtype='float32', **kwargs):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" size : positive `int`\n",
" The number of dimensions of the space\n",
" dtype : `object`\n",
" The data type of the storage array. Can be provided in any\n",
" way the `numpy.dtype` function understands, most notably\n",
" as built-in type, as one of NumPy's internal datatype\n",
" objects or as string.\n",
"\n",
" Only scalar data types are allowed.\n",
"\n",
" weighting : optional\n",
" Use weighted inner product, norm, and dist. The following\n",
" types are supported as ``weight``:\n",
"\n",
" `FnWeightingBase` :\n",
" Use this weighting as-is. Compatibility with this\n",
" space's elements is not checked during init.\n",
"\n",
" `float` :\n",
" Weighting by a constant\n",
"\n",
" `array-like` :\n",
" Weighting by a vector (1-dim. array, corresponds to\n",
" a diagonal matrix). Note that the array is stored in\n",
" main memory, which results in slower space functions\n",
" due to a copy during evaluation.\n",
"\n",
" `CudaFnVector` :\n",
" same as 1-dim. array-like, except that copying is\n",
" avoided if the ``dtype`` of the vector is the\n",
" same as this space's ``dtype``.\n",
"\n",
" Default: no weighting\n",
"\n",
" This option cannot be combined with ``dist``, ``norm``\n",
" or ``inner``.\n",
"\n",
" exponent : positive `float`, optional\n",
" Exponent of the norm. For values other than 2.0, no\n",
" inner product is defined.\n",
"\n",
" This option is ignored if ``dist``, ``norm`` or\n",
" ``inner`` is given.\n",
"\n",
" Default: 2.0\n",
"\n",
" dist : `callable`, optional\n",
" The distance function defining a metric on the space.\n",
" It must accept two `FnVector` arguments and\n",
" fulfill the following mathematical conditions for any\n",
" three vectors ``x, y, z``:\n",
"\n",
" - ``dist(x, y) >= 0``\n",
" - ``dist(x, y) = 0`` if and only if ``x = y``\n",
" - ``dist(x, y) = dist(y, x)``\n",
" - ``dist(x, y) <= dist(x, z) + dist(z, y)``\n",
"\n",
" This option cannot be combined with ``weight``,\n",
" ``norm`` or ``inner``.\n",
"\n",
" norm : `callable`, optional\n",
" The norm implementation. It must accept an\n",
" `FnVector` argument, return a `float` and satisfy the\n",
" following conditions for all vectors ``x, y`` and scalars\n",
" ``s``:\n",
"\n",
" - ``||x|| >= 0``\n",
" - ``||x|| = 0`` if and only if ``x = 0``\n",
" - ``||s * x|| = |s| * ||x||``\n",
" - ``||x + y|| <= ||x|| + ||y||``\n",
"\n",
" By default, ``norm(x)`` is calculated as ``inner(x, x)``.\n",
"\n",
" This option cannot be combined with ``weight``,\n",
" ``dist`` or ``inner``.\n",
"\n",
" inner : `callable`, optional\n",
" The inner product implementation. It must accept two\n",
" `FnVector` arguments, return a element from\n",
" the field of the space (real or complex number) and\n",
" satisfy the following conditions for all vectors\n",
" ``x, y, z`` and scalars ``s``:\n",
"\n",
" - ``<x, y> = conj(<y, x>)``\n",
" - ``<s*x + y, z> = s * <x, z> + <y, z>``\n",
" - ``<x, x> = 0`` if and only if ``x = 0``\n",
"\n",
" This option cannot be combined with ``weight``,\n",
" ``dist`` or ``norm``.\n",
" \"\"\"\n",
" if np.dtype(dtype) not in _TYPE_MAP_NPY2CUDA.keys():\n",
" raise TypeError('data type {!r} not supported in CUDA'\n",
" ''.format(dtype))\n",
"\n",
" super(CudaFn, self).__init__(size, dtype)\n",
" self._vector_impl = _TYPE_MAP_NPY2CUDA[self.dtype]\n",
"\n",
" dist = kwargs.pop('dist', None)\n",
" norm = kwargs.pop('norm', None)\n",
" inner = kwargs.pop('inner', None)\n",
" weighting = kwargs.pop('weighting', None)\n",
" exponent = kwargs.pop('exponent', 2.0)\n",
"\n",
" # Check validity of option combination (3 or 4 out of 4 must be None)\n",
" if sum(x is None for x in (dist, norm, inner, weighting)) < 3:\n",
" raise ValueError('invalid combination of options `weight`, '\n",
" '`dist`, `norm` and `inner`')\n",
" if weighting is not None:\n",
" if isinstance(weighting, Weighting):\n",
" self.__weighting = weighting\n",
" elif np.isscalar(weighting):\n",
" self.__weighting = CudaFnConstWeighting(\n",
" weighting, exponent=exponent)\n",
" elif isinstance(weighting, CudaFnVector):\n",
" self.__weighting = CudaFnArrayWeighting(\n",
" weighting, exponent=exponent)\n",
" else:\n",
" # Must make a CudaFnVector from the array\n",
" weighting = self.element(np.asarray(weighting))\n",
" if weighting.ndim == 1:\n",
" self.__weighting = CudaFnArrayWeighting(\n",
" weighting, exponent=exponent)\n",
" else:\n",
" raise ValueError('invalid weighting argument {!r}'\n",
" ''.format(weighting))\n",
" elif dist is not None:\n",
" self.__weighting = CudaFnCustomDist(dist)\n",
" elif norm is not None:\n",
" self.__weighting = CudaFnCustomNorm(norm)\n",
" elif inner is not None:\n",
" # Use fast dist implementation\n",
" self.__weighting = CudaFnCustomInner(inner)\n",
" else: # all None -> no weighing\n",
" self.__weighting = CudaFnNoWeighting(exponent)\n",
"\n",
" @property\n",
" def exponent(self):\n",
" \"\"\"Exponent of the norm and distance.\"\"\"\n",
" return self.weighting.exponent\n",
"\n",
" @property\n",
" def weighting(self):\n",
" \"\"\"This space's weighting scheme.\"\"\"\n",
" return self.__weighting\n",
"\n",
" @property\n",
" def is_weighted(self):\n",
" \"\"\"Return `True` if the weighting is not `CudaFnNoWeighting`.\"\"\"\n",
" return not isinstance(self.weighting, CudaFnNoWeighting)\n",
"\n",
" def element(self, inp=None, data_ptr=None):\n",
" \"\"\"Create a new element.\n",
"\n",
" Parameters\n",
" ----------\n",
" inp : `array-like` or scalar, optional\n",
" Input to initialize the new element.\n",
"\n",
" If ``inp`` is a `numpy.ndarray` of shape ``(size,)``\n",
" and the same data type as this space, the array is wrapped,\n",
" not copied.\n",
" Other array-like objects are copied (with broadcasting\n",
" if necessary).\n",
"\n",
" If a single value is given, it is copied to all entries.\n",
"\n",
" If both ``inp`` and ``data_ptr`` are `None`, an empty\n",
" element is created with no guarantee of its state\n",
" (memory allocation only).\n",
"\n",
" data_ptr : `int`, optional\n",
" Memory address of a CUDA array container\n",
"\n",
" Cannot be combined with ``inp``.\n",
"\n",
" Returns\n",
" -------\n",
" element : `CudaFnVector`\n",
" The new element\n",
"\n",
" Notes\n",
" -----\n",
" This method preserves \"array views\" of correct size and type,\n",
" see the examples below.\n",
"\n",
" TODO: No, it does not yet!\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> x = uc3.element(np.array([1, 2, 3], dtype='uint8'))\n",
" >>> x\n",
" CudaFn(3, 'uint8').element([1, 2, 3])\n",
" >>> y = uc3.element([1, 2, 3])\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([1, 2, 3])\n",
" \"\"\"\n",
" if inp is None:\n",
" if data_ptr is None:\n",
" return self.element_type(self, self._vector_impl(self.size))\n",
" else: # TODO: handle non-1 length strides\n",
" return self.element_type(\n",
" self, self._vector_impl.from_pointer(data_ptr, self.size,\n",
" 1))\n",
" else:\n",
" if data_ptr is None:\n",
" if isinstance(inp, self._vector_impl):\n",
" return self.element_type(self, inp)\n",
" elif isinstance(inp, self.element_type):\n",
" if inp in self:\n",
" return inp\n",
" else:\n",
" # Bulk-copy for non-matching dtypes\n",
" elem = self.element()\n",
" elem[:] = inp\n",
" return elem\n",
" else:\n",
" # Array-like input. Need to go through a NumPy array\n",
" arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=1)\n",
" if arr.shape != (self.size,):\n",
" raise ValueError('expected input shape {}, got {}'\n",
" ''.format((self.size,), arr.shape))\n",
" elem = self.element()\n",
" elem[:] = arr\n",
" return elem\n",
" else:\n",
" raise ValueError('cannot provide both `inp` and `data_ptr`')\n",
"\n",
" def _lincomb(self, a, x1, b, x2, out):\n",
" \"\"\"Linear combination of ``x1`` and ``x2``, assigned to ``out``.\n",
"\n",
" Calculate ``z = a * x + b * y`` using optimized CUDA routines.\n",
"\n",
" Parameters\n",
" ----------\n",
" a, b : `field` element\n",
" Scalar to multiply ``x`` and ``y`` with.\n",
" x, y : `CudaFnVector`\n",
" The summands.\n",
" out : `CudaFnVector`\n",
" The Vector that the result is written to.\n",
"\n",
" Returns\n",
" -------\n",
" `None`\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = CudaFn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> y = r3.element([4, 5, 6])\n",
" >>> out = r3.element()\n",
" >>> r3.lincomb(2, x, 3, y, out) # out is returned\n",
" CudaFn(3).element([14.0, 19.0, 24.0])\n",
" >>> out\n",
" CudaFn(3).element([14.0, 19.0, 24.0])\n",
" \"\"\"\n",
" out.data.lincomb(a, x1.data, b, x2.data)\n",
"\n",
" def _inner(self, x1, x2):\n",
" \"\"\"Calculate the inner product of x and y.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
"\n",
" Returns\n",
" -------\n",
" inner: `float` or `complex`\n",
" The inner product of x and y\n",
"\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> x = uc3.element([1, 2, 3])\n",
" >>> y = uc3.element([3, 1, 5])\n",
" >>> uc3.inner(x, y)\n",
" 20.0\n",
" \"\"\"\n",
" return self.weighting.inner(x1, x2)\n",
"\n",
" def _integral(self, x):\n",
" \"\"\"Raw integral of vector.\n",
"\n",
" Parameters\n",
" ----------\n",
" x : `CudaFnVector`\n",
" The vector whose integral should be computed.\n",
"\n",
" Returns\n",
" -------\n",
" inner : `field` element\n",
" Inner product of the vectors\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = CudaFn(2, dtype='float32')\n",
" >>> x = r3.element([3, -1])\n",
" >>> r3.integral(x)\n",
" 2.0\n",
"\n",
" Notes\n",
" -----\n",
" Integration of vectors is defined as the sum of the elements\n",
" of the vector, i.e. the discrete measure.\n",
"\n",
" In weighted spaces, the unweighted measure is used for the integral.\n",
" \"\"\"\n",
" return x.ufuncs.sum()\n",
"\n",
" def _dist(self, x1, x2):\n",
" \"\"\"Calculate the distance between two vectors.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
" The vectors whose mutual distance is calculated\n",
"\n",
" Returns\n",
" -------\n",
" dist : `float`\n",
" Distance between the vectors\n",
"\n",
" Examples\n",
" --------\n",
" >>> r2 = CudaFn(2)\n",
" >>> x = r2.element([3, 8])\n",
" >>> y = r2.element([0, 4])\n",
" >>> r2.dist(x, y)\n",
" 5.0\n",
" \"\"\"\n",
" return self.weighting.dist(x1, x2)\n",
"\n",
" def _norm(self, x):\n",
" \"\"\"Calculate the norm of ``x``.\n",
"\n",
" This method is implemented separately from ``sqrt(inner(x,x))``\n",
" for efficiency reasons.\n",
"\n",
" Parameters\n",
" ----------\n",
" x : `CudaFnVector`\n",
"\n",
" Returns\n",
" -------\n",
" norm : `float`\n",
" The norm of x\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> x = uc3.element([2, 3, 6])\n",
" >>> uc3.norm(x)\n",
" 7.0\n",
" \"\"\"\n",
" return self.weighting.norm(x)\n",
"\n",
" def _multiply(self, x1, x2, out):\n",
" \"\"\"The pointwise product of two vectors, assigned to ``out``.\n",
"\n",
" This is defined as:\n",
"\n",
" multiply(x, y, out) := [x[0]*y[0], x[1]*y[1], ..., x[n-1]*y[n-1]]\n",
"\n",
" Parameters\n",
" ----------\n",
"\n",
" x1, x2 : `CudaFnVector`\n",
" Factors in product\n",
" out : `CudaFnVector`\n",
" Element to which the result is written\n",
"\n",
" Returns\n",
" -------\n",
" `None`\n",
"\n",
" Examples\n",
" --------\n",
"\n",
" >>> rn = CudaFn(3)\n",
" >>> x1 = rn.element([5, 3, 2])\n",
" >>> x2 = rn.element([1, 2, 3])\n",
" >>> out = rn.element()\n",
" >>> rn.multiply(x1, x2, out) # out is returned\n",
" CudaFn(3).element([5.0, 6.0, 6.0])\n",
" >>> out\n",
" CudaFn(3).element([5.0, 6.0, 6.0])\n",
" \"\"\"\n",
" out.data.multiply(x1.data, x2.data)\n",
"\n",
" def _divide(self, x1, x2, out):\n",
" \"\"\"The pointwise division of two vectors, assigned to ``out``.\n",
"\n",
" This is defined as:\n",
"\n",
" multiply(z, x, y) := [x[0]/y[0], x[1]/y[1], ..., x[n-1]/y[n-1]]\n",
"\n",
" Parameters\n",
" ----------\n",
"\n",
" x1, x2 : `CudaFnVector`\n",
" Factors in the product\n",
" out : `CudaFnVector`\n",
" Element to which the result is written\n",
"\n",
" Returns\n",
" -------\n",
" None\n",
"\n",
" Examples\n",
" --------\n",
"\n",
" >>> rn = CudaFn(3)\n",
" >>> x1 = rn.element([5, 3, 2])\n",
" >>> x2 = rn.element([1, 2, 2])\n",
" >>> out = rn.element()\n",
" >>> rn.divide(x1, x2, out) # out is returned\n",
" CudaFn(3).element([5.0, 1.5, 1.0])\n",
" >>> out\n",
" CudaFn(3).element([5.0, 1.5, 1.0])\n",
" \"\"\"\n",
" out.data.divide(x1.data, x2.data)\n",
"\n",
" def zero(self):\n",
" \"\"\"Create a vector of zeros.\"\"\"\n",
" return self.element_type(self, self._vector_impl(self.size, 0))\n",
"\n",
" def one(self):\n",
" \"\"\"Create a vector of ones.\"\"\"\n",
" return self.element_type(self, self._vector_impl(self.size, 1))\n",
"\n",
" def __eq__(self, other):\n",
" \"\"\"s.__eq__(other) <==> s == other.\n",
"\n",
" Returns\n",
" -------\n",
" equals : `bool`\n",
" `True` if other is an instance of this space's type\n",
" with the same ``size``, ``dtype`` and space functions,\n",
" otherwise `False`.\n",
"\n",
" Examples\n",
" --------\n",
" >>> from numpy.linalg import norm\n",
" >>> def dist(x, y, p):\n",
" ... return norm(x - y, ord=p)\n",
"\n",
" >>> from functools import partial\n",
" >>> dist2 = partial(dist, p=2)\n",
" >>> r3 = CudaFn(3, dist=dist2)\n",
" >>> r3_same = CudaFn(3, dist=dist2)\n",
" >>> r3 == r3_same\n",
" True\n",
"\n",
" Different ``dist`` functions result in different spaces - the\n",
" same applies for ``norm`` and ``inner``:\n",
"\n",
" >>> dist1 = partial(dist, p=1)\n",
" >>> r3_1 = CudaFn(3, dist=dist1)\n",
" >>> r3_2 = CudaFn(3, dist=dist2)\n",
" >>> r3_1 == r3_2\n",
" False\n",
"\n",
" Be careful with Lambdas - they result in non-identical function\n",
" objects:\n",
"\n",
" >>> r3_lambda1 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1))\n",
" >>> r3_lambda2 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1))\n",
" >>> r3_lambda1 == r3_lambda2\n",
" False\n",
" \"\"\"\n",
" if other is self:\n",
" return True\n",
"\n",
" return (super(CudaFn, self).__eq__(other) and\n",
" self.weighting == other.weighting)\n",
"\n",
" @property\n",
" def impl(self):\n",
" \"\"\"Name of the implementation: ``'cuda'``.\"\"\"\n",
" return 'cuda'\n",
"\n",
" @staticmethod\n",
" def available_dtypes():\n",
" \"\"\"Return the available data types.\"\"\"\n",
" return CUDA_DTYPES\n",
"\n",
" @staticmethod\n",
" def default_dtype(field=None):\n",
" \"\"\"Return the default of `CudaFn` data type for a given field.\n",
"\n",
" Parameters\n",
" ----------\n",
" field : `Field`\n",
" Set of numbers to be represented by a data type.\n",
" Currently supported: `RealNumbers`.\n",
"\n",
" Returns\n",
" -------\n",
" dtype : `type`\n",
" Numpy data type specifier. The returned defaults are:\n",
"\n",
" ``RealNumbers()`` : , ``np.dtype('float32')``\n",
" \"\"\"\n",
" if field is None or field == RealNumbers():\n",
" return np.dtype('float32')\n",
" else:\n",
" raise ValueError('no default data type defined for field {}'\n",
" ''.format(field))\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" if self.is_real:\n",
" ctor = 'rn'\n",
" elif self.is_complex:\n",
" ctor = 'cn'\n",
" else:\n",
" ctor = 'fn'\n",
"\n",
" posargs = [self.size]\n",
" default_dtype_str = dtype_repr(self.default_dtype(self.field))\n",
" optargs = [('dtype', dtype_repr(self.dtype), default_dtype_str),\n",
" ('impl', self.impl, 'numpy')]\n",
"\n",
" inner_str = signature_string(posargs, optargs)\n",
"\n",
" weight_str = self.weighting.repr_part\n",
" if weight_str:\n",
" inner_str += ', ' + weight_str\n",
"\n",
" return '{}({})'.format(ctor, inner_str)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return repr(self)\n",
"\n",
" @property\n",
" def element_type(self):\n",
" \"\"\" `CudaFnVector` \"\"\"\n",
" return CudaFnVector\n",
"\n",
"\n",
"class CudaFnVector(FnBaseVector):\n",
"\n",
" \"\"\"Representation of a `CudaFn` element.\"\"\"\n",
"\n",
" def __init__(self, space, data):\n",
" \"\"\"Initialize a new instance.\"\"\"\n",
" super(CudaFnVector, self).__init__(space)\n",
" self.__data = data\n",
"\n",
" @property\n",
" def data(self):\n",
" \"\"\"The data container of this vector, type ``CudaFnImplVector``.\"\"\"\n",
" return self.__data\n",
"\n",
" @property\n",
" def data_ptr(self):\n",
" \"\"\"A raw pointer to the data of this vector.\"\"\"\n",
" return self.data.data_ptr()\n",
"\n",
" def __eq__(self, other):\n",
" \"\"\"Return ``self == other``.\n",
"\n",
" Returns\n",
" -------\n",
" equals : `bool`\n",
" `True` if all elements of ``other`` are equal to this\n",
" vector's elements, `False` otherwise\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = CudaFn(3, 'float32')\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> x == x\n",
" True\n",
" >>> y = r3.element([1, 2, 3])\n",
" >>> x == y\n",
" True\n",
" >>> y = r3.element([0, 0, 0])\n",
" >>> x == y\n",
" False\n",
" >>> r3_2 = CudaFn(3, 'uint8')\n",
" >>> z = r3_2.element([1, 2, 3])\n",
" >>> x != z\n",
" True\n",
" \"\"\"\n",
" if other is self:\n",
" return True\n",
" elif other not in self.space:\n",
" return False\n",
" else:\n",
" return self.data == other.data\n",
"\n",
" def copy(self):\n",
" \"\"\"Create an identical (deep) copy of this vector.\n",
"\n",
" Returns\n",
" -------\n",
" copy : `CudaFnVector`\n",
" The deep copy\n",
"\n",
" Examples\n",
" --------\n",
" >>> vec1 = CudaFn(3, 'uint8').element([1, 2, 3])\n",
" >>> vec2 = vec1.copy()\n",
" >>> vec2\n",
" CudaFn(3, 'uint8').element([1, 2, 3])\n",
" >>> vec1 == vec2\n",
" True\n",
" >>> vec1 is vec2\n",
" False\n",
" \"\"\"\n",
" return self.space.element_type(self.space, self.data.copy())\n",
"\n",
" def asarray(self, start=None, stop=None, step=None, out=None):\n",
" \"\"\"Extract the data of this array as a numpy array.\n",
"\n",
" Parameters\n",
" ----------\n",
" start : `int`, optional\n",
" Start position. None means the first element.\n",
" start : `int`, optional\n",
" One element past the last element to be extracted.\n",
" None means the last element.\n",
" start : `int`, optional\n",
" Step length. None means 1.\n",
" out : `numpy.ndarray`\n",
" Array in which the result should be written in-place.\n",
" Has to be contiguous and of the correct dtype.\n",
"\n",
" Returns\n",
" -------\n",
" asarray : `numpy.ndarray`\n",
" Numpy array of the same type as the space.\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> y = uc3.element([1, 2, 3])\n",
" >>> y.asarray()\n",
" array([1, 2, 3], dtype=uint8)\n",
" >>> y.asarray(1, 3)\n",
" array([2, 3], dtype=uint8)\n",
"\n",
" Using the out parameter\n",
"\n",
" >>> out = np.empty((3,), dtype='uint8')\n",
" >>> result = y.asarray(out=out)\n",
" >>> out\n",
" array([1, 2, 3], dtype=uint8)\n",
" >>> result is out\n",
" True\n",
" \"\"\"\n",
" if out is None:\n",
" return self.data.get_to_host(slice(start, stop, step))\n",
" else:\n",
" self.data.copy_device_to_host(slice(start, stop, step), out)\n",
" return out\n",
"\n",
" def __getitem__(self, indices):\n",
" \"\"\"Access values of this vector.\n",
"\n",
" This will cause the values to be copied to CPU\n",
" which is a slow operation.\n",
"\n",
" Parameters\n",
" ----------\n",
" indices : `int` or `slice`\n",
" The position(s) that should be accessed\n",
"\n",
" Returns\n",
" -------\n",
" values : scalar or `CudaFnVector`\n",
" The value(s) at the index (indices)\n",
"\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> y = uc3.element([1, 2, 3])\n",
" >>> y[0]\n",
" 1\n",
" >>> z = y[1:3]\n",
" >>> z\n",
" CudaFn(2, 'uint8').element([2, 3])\n",
" >>> y[::2]\n",
" CudaFn(2, 'uint8').element([1, 3])\n",
" >>> y[::-1]\n",
" CudaFn(3, 'uint8').element([3, 2, 1])\n",
"\n",
" The returned value is a view, modifications are reflected\n",
" in the original data:\n",
"\n",
" >>> z[:] = [4, 5]\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([1, 4, 5])\n",
" \"\"\"\n",
" if isinstance(indices, slice):\n",
" data = self.data.getslice(indices)\n",
" return type(self.space)(data.size, data.dtype).element(data)\n",
" else:\n",
" return self.data.__getitem__(indices)\n",
"\n",
" def __setitem__(self, indices, values):\n",
" \"\"\"Set values of this vector.\n",
"\n",
" This will cause the values to be copied to CPU\n",
" which is a slow operation.\n",
"\n",
" Parameters\n",
" ----------\n",
" indices : `int` or `slice`\n",
" The position(s) that should be set\n",
" values : scalar, `array-like` or `CudaFnVector`\n",
" The value(s) that are to be assigned.\n",
"\n",
" If ``index`` is an `int`, ``value`` must be single value.\n",
"\n",
" If ``index`` is a `slice`, ``value`` must be broadcastable\n",
" to the size of the slice (same size, shape (1,)\n",
" or single value).\n",
"\n",
" Returns\n",
" -------\n",
" `None`\n",
"\n",
" Examples\n",
" --------\n",
" >>> uc3 = CudaFn(3, 'uint8')\n",
" >>> y = uc3.element([1, 2, 3])\n",
" >>> y[0] = 5\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([5, 2, 3])\n",
" >>> y[1:3] = [7, 8]\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([5, 7, 8])\n",
" >>> y[:] = np.array([0, 0, 0])\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([0, 0, 0])\n",
"\n",
" Scalar assignment\n",
"\n",
" >>> y[:] = 5\n",
" >>> y\n",
" CudaFn(3, 'uint8').element([5, 5, 5])\n",
" \"\"\"\n",
" if (isinstance(values, type(self)) and\n",
" indices in (slice(None), Ellipsis)):\n",
" self.assign(values) # use lincomb magic\n",
" else:\n",
" if isinstance(indices, slice):\n",
" # Convert value to the correct type if needed\n",
" value_array = np.asarray(values, dtype=self.space.dtype)\n",
"\n",
" if value_array.ndim == 0:\n",
" self.data.fill(values)\n",
" else:\n",
" # Size checking is performed in c++\n",
" self.data.setslice(indices, value_array)\n",
" else:\n",
" self.data[int(indices)] = values\n",
"\n",
" @property\n",
" def ufuncs(self):\n",
" \"\"\"`CudaFnUfuncs`, access to numpy style ufuncs.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r2 = CudaFn(2)\n",
" >>> x = r2.element([1, -2])\n",
" >>> x.ufuncs.absolute()\n",
" CudaFn(2).element([1.0, 2.0])\n",
"\n",
" These functions can also be used with broadcasting\n",
"\n",
" >>> x.ufuncs.add(3)\n",
" CudaFn(2).element([4.0, 1.0])\n",
"\n",
" and non-space elements\n",
"\n",
" >>> x.ufuncs.subtract([3, 3])\n",
" CudaFn(2).element([-2.0, -5.0])\n",
"\n",
" There is also support for various reductions (sum, prod, min, max)\n",
"\n",
" >>> x.ufuncs.sum()\n",
" -1.0\n",
"\n",
" Also supports out parameter\n",
"\n",
" >>> y = r2.element([3, 4])\n",
" >>> out = r2.element()\n",
" >>> result = x.ufuncs.add(y, out=out)\n",
" >>> result\n",
" CudaFn(2).element([4.0, 2.0])\n",
" >>> result is out\n",
" True\n",
"\n",
" Notes\n",
" -----\n",
" Not all ufuncs are currently optimized, some use the default numpy\n",
" implementation. This can be improved in the future.\n",
"\n",
" See also\n",
" --------\n",
" odl.util.ufuncs.FnBaseUfuncs\n",
" Base class for ufuncs in `FnBase` spaces.\n",
" \"\"\"\n",
" return CudaFnUfuncs(self)\n",
"\n",
"\n",
"def _weighting(weighting, exponent):\n",
" \"\"\"Return a weighting whose type is inferred from the arguments.\"\"\"\n",
" if np.isscalar(weighting):\n",
" weighting = CudaFnConstWeighting(\n",
" weighting, exponent)\n",
" elif isinstance(weighting, CudaFnVector):\n",
" weighting = CudaFnArrayWeighting(\n",
" weighting, exponent=exponent)\n",
" else:\n",
" weight_ = np.asarray(weighting)\n",
" if weight_.dtype == object:\n",
" raise ValueError('bad weighting {}'.format(weighting))\n",
" if weight_.ndim == 1:\n",
" weighting = CudaFnArrayWeighting(\n",
" weight_, exponent)\n",
" elif weight_.ndim == 2:\n",
" raise NotImplementedError('matrix weighting not implemented '\n",
" 'for CUDA spaces')\n",
"# weighting = CudaFnMatrixWeighting(\n",
"# weight_, exponent)\n",
" else:\n",
" raise ValueError('array-like weight must have 1 or 2 dimensions, '\n",
" 'but {} has {} dimensions'\n",
" ''.format(weighting, weighting.ndim))\n",
" return weighting\n",
"\n",
"\n",
"def _dist_default(x1, x2):\n",
" \"\"\"Default Euclidean distance implementation.\"\"\"\n",
" return x1.data.dist(x2.data)\n",
"\n",
"\n",
"def _pdist_default(x1, x2, p):\n",
" \"\"\"Default p-distance implementation.\"\"\"\n",
" if p == float('inf'):\n",
" raise NotImplementedError('inf-norm not implemented')\n",
" return x1.data.dist_power(x2.data, p)\n",
"\n",
"\n",
"def _pdist_diagweight(x1, x2, p, w):\n",
" \"\"\"Diagonally weighted p-distance implementation.\"\"\"\n",
" return x1.data.dist_weight(x2.data, p, w.data)\n",
"\n",
"\n",
"def _norm_default(x):\n",
" \"\"\"Default Euclidean norm implementation.\"\"\"\n",
" return x.data.norm()\n",
"\n",
"\n",
"def _pnorm_default(x, p):\n",
" \"\"\"Default p-norm implementation.\"\"\"\n",
" if p == float('inf'):\n",
" raise NotImplementedError('inf-norm not implemented')\n",
" return x.data.norm_power(p)\n",
"\n",
"\n",
"def _pnorm_diagweight(x, p, w):\n",
" \"\"\"Diagonally weighted p-norm implementation.\"\"\"\n",
" if p == float('inf'):\n",
" raise NotImplementedError('inf-norm not implemented')\n",
" return x.data.norm_weight(p, w.data)\n",
"\n",
"\n",
"def _inner_default(x1, x2):\n",
" \"\"\"Default Euclidean inner product implementation.\"\"\"\n",
" return x1.data.inner(x2.data)\n",
"\n",
"\n",
"def _inner_diagweight(x1, x2, w):\n",
" return x1.data.inner_weight(x2.data, w.data)\n",
"\n",
"\n",
"class CudaFnArrayWeighting(ArrayWeighting):\n",
"\n",
" \"\"\"Vector weighting for `CudaFn`.\n",
"\n",
" For exponent 2.0, a new weighted inner product with vector ``w``\n",
" is defined as::\n",
"\n",
" <a, b>_w := <w * a, b> = b^H (w * a)\n",
"\n",
" with ``b^H`` standing for transposed complex conjugate and\n",
" ``w * a`` for element-wise multiplication.\n",
"\n",
" For other exponents, only norm and dist are defined. In the case of\n",
" exponent ``inf``, the weighted norm is\n",
"\n",
" ||a||_{w, inf} := ||w * a||_inf\n",
"\n",
" otherwise it is::\n",
"\n",
" ||a||_{w, p} := ||w^{1/p} * a||\n",
"\n",
" Note that this definition does **not** fulfill the limit property\n",
" in ``p``, i.e.::\n",
"\n",
" ||x||_{w, p} --/-> ||x||_{w, inf} for p --> inf\n",
"\n",
" unless ``w = (1,...,1)``.\n",
"\n",
" The vector may only have positive entries, otherwise it does not\n",
" define an inner product or norm, respectively. This is not checked\n",
" during initialization.\n",
" \"\"\"\n",
"\n",
" def __init__(self, vector, exponent=2.0):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" vector : `CudaFnVector`\n",
" Weighting vector of the inner product, norm and distance\n",
" exponent : positive `float`\n",
" Exponent of the norm. For values other than 2.0, the inner\n",
" product is not defined.\n",
" \"\"\"\n",
" if not isinstance(vector, CudaFnVector):\n",
" raise TypeError('vector {!r} is not a CudaFnVector instance'\n",
" ''.format(vector))\n",
"\n",
" super(CudaFnArrayWeighting, self).__init__(\n",
" vector, impl='cuda', exponent=exponent)\n",
"\n",
" def inner(self, x1, x2):\n",
" \"\"\"Calculate the vector weighted inner product of two vectors.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
" Vectors whose inner product is calculated\n",
"\n",
" Returns\n",
" -------\n",
" inner : `float` or `complex`\n",
" The inner product of the two provided vectors\n",
" \"\"\"\n",
" if self.exponent != 2.0:\n",
" raise NotImplementedError('No inner product defined for '\n",
" 'exponent != 2 (got {})'\n",
" ''.format(self.exponent))\n",
" else:\n",
" return _inner_diagweight(x1, x2, self.array)\n",
"\n",
" def norm(self, x):\n",
" \"\"\"Calculate the vector-weighted norm of a vector.\n",
"\n",
" Parameters\n",
" ----------\n",
" x : `CudaFnVector`\n",
" Vector whose norm is calculated\n",
"\n",
" Returns\n",
" -------\n",
" norm : `float`\n",
" The norm of the provided vector\n",
" \"\"\"\n",
" if self.exponent == float('inf'):\n",
" raise NotImplementedError('inf norm not implemented yet')\n",
" else:\n",
" return _pnorm_diagweight(x, self.exponent, self.array)\n",
"\n",
" def dist(self, x1, x2):\n",
" \"\"\"Calculate the vector-weighted distance between two vectors.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
" Vectors whose mutual distance is calculated\n",
"\n",
" Returns\n",
" -------\n",
" dist : `float`\n",
" The distance between the vectors\n",
" \"\"\"\n",
" if self.exponent == float('inf'):\n",
" raise NotImplementedError('inf norm not implemented yet')\n",
" else:\n",
" return _pdist_diagweight(x1, x2, self.exponent, self.array)\n",
"\n",
"\n",
"class CudaFnConstWeighting(ConstWeighting):\n",
"\n",
" \"\"\"Weighting of `CudaFn` by a constant.\n",
"\n",
" For exponent 2.0, a new weighted inner product with constant\n",
" ``c`` is defined as::\n",
"\n",
" <a, b>_c = c * <a, b> = c * b^H a\n",
"\n",
" with ``b^H`` standing for transposed complex conjugate.\n",
"\n",
" For other exponents, only norm and dist are defined. In the case of\n",
" exponent ``inf``, the weighted norm is defined as::\n",
"\n",
" ||a||_{c, inf} := c ||a||_inf\n",
"\n",
" otherwise it is::\n",
"\n",
" ||a||_{c, p} := c^{1/p} ||a||_p\n",
"\n",
" Note that this definition does **not** fulfill the limit property\n",
" in ``p``, i.e.::\n",
"\n",
" ||a||_{c,p} --/-> ||a||_{c,inf} for p --> inf\n",
"\n",
" unless ``c = 1``.\n",
"\n",
" The constant must be positive, otherwise it does not define an\n",
" inner product or norm, respectively.\n",
" \"\"\"\n",
"\n",
" def __init__(self, constant, exponent=2.0):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" constant : positive finite `float`\n",
" Weighting constant of the inner product.\n",
" exponent : positive `float`\n",
" Exponent of the norm. For values other than 2.0, the inner\n",
" product is not defined.\n",
" \"\"\"\n",
" super(CudaFnConstWeighting, self).__init__(\n",
" constant, impl='cuda', exponent=exponent)\n",
"\n",
" def inner(self, x1, x2):\n",
" \"\"\"Calculate the constant-weighted inner product of two vectors.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
" Vectors whose inner product is calculated\n",
"\n",
" Returns\n",
" -------\n",
" inner : `float` or `complex`\n",
" The inner product of the two vectors\n",
" \"\"\"\n",
" if self.exponent != 2.0:\n",
" raise NotImplementedError('no inner product defined for '\n",
" 'exponent != 2 (got {})'\n",
" ''.format(self.exponent))\n",
" else:\n",
" return self.const * _inner_default(x1, x2)\n",
"\n",
" def norm(self, x):\n",
" \"\"\"Calculate the constant-weighted norm of a vector.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1 : `CudaFnVector`\n",
" Vector whose norm is calculated\n",
"\n",
" Returns\n",
" -------\n",
" norm : `float`\n",
" The norm of the vector\n",
" \"\"\"\n",
" if self.exponent == float('inf'):\n",
" raise NotImplementedError\n",
" # Example impl\n",
" # return self.const * float(_pnorm_default(x, self.exponent))\n",
" else:\n",
" return (self.const ** (1 / self.exponent) *\n",
" float(_pnorm_default(x, self.exponent)))\n",
"\n",
" def dist(self, x1, x2):\n",
" \"\"\"Calculate the constant-weighted distance between two vectors.\n",
"\n",
" Parameters\n",
" ----------\n",
" x1, x2 : `CudaFnVector`\n",
" Vectors whose mutual distance is calculated\n",
"\n",
" Returns\n",
" -------\n",
" dist : `float`\n",
" The distance between the vectors\n",
" \"\"\"\n",
" if self.exponent == float('inf'):\n",
" raise NotImplementedError\n",
" else:\n",
" return (self.const ** (1 / self.exponent) *\n",
" _pdist_default(x1, x2, self.exponent))\n",
"\n",
"\n",
"class CudaFnNoWeighting(NoWeighting, CudaFnConstWeighting):\n",
"\n",
" \"\"\"Weighting of `CudaFn` with constant 1.\n",
"\n",
" For exponent 2.0, the unweighted inner product is defined as::\n",
"\n",
" <a, b> := b^H a\n",
"\n",
" with ``b^H`` standing for transposed complex conjugate.\n",
"\n",
" For other exponents, only norm and dist are defined.\n",
" \"\"\"\n",
"\n",
" # Implement singleton pattern for efficiency in the default case\n",
" _instance = None\n",
"\n",
" def __new__(cls, *args, **kwargs):\n",
" \"\"\"Implement singleton pattern if ``exponent==2.0``.\"\"\"\n",
" if len(args) == 0:\n",
" exponent = kwargs.pop('exponent', 2.0)\n",
" else:\n",
" exponent = args[0]\n",
" args = args[1:]\n",
"\n",
" if exponent == 2.0:\n",
" if not cls._instance:\n",
" cls._instance = super(CudaFnConstWeighting, cls).__new__(\n",
" cls, *args, **kwargs)\n",
" return cls._instance\n",
" else:\n",
" return super(CudaFnConstWeighting, cls).__new__(\n",
" cls, *args, **kwargs)\n",
"\n",
" def __init__(self, exponent=2.0):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" exponent : positive `float`\n",
" Exponent of the norm. For values other than 2.0, the inner\n",
" product is not defined.\n",
" \"\"\"\n",
" super(CudaFnNoWeighting, self).__init__(exponent=exponent, impl='cuda')\n",
"\n",
"\n",
"class CudaFnCustomInner(CustomInner):\n",
"\n",
" \"\"\"Class for handling a user-specified inner product on `CudaFn`.\"\"\"\n",
"\n",
" def __init__(self, inner):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" inner : `callable`\n",
" The inner product implementation. It must accept two\n",
" `FnVector` arguments, return an element from their space's\n",
" field (real or complex number) and satisfy the following\n",
" conditions for all vectors ``x, y, z`` and scalars ``s``:\n",
"\n",
" - ``<x, y> = conj(<y, x>)``\n",
" - ``<s*x + y, z> = s * <x, z> + <y, z>``\n",
" - ``<x, x> = 0`` if and only if ``x = 0``\n",
" \"\"\"\n",
" super(CudaFnCustomInner, self).__init__(inner, impl='cuda')\n",
"\n",
"\n",
"class CudaFnCustomNorm(CustomNorm):\n",
"\n",
" \"\"\"Class for handling a user-specified norm in `CudaFn`.\n",
"\n",
" Note that this removes ``inner``.\n",
" \"\"\"\n",
"\n",
" def __init__(self, norm):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" norm : `callable`\n",
" The norm implementation. It must accept a `CudaFnVector`\n",
" argument, return a `float` and satisfy the following\n",
" conditions for all vectors ``x, y`` and scalars ``s``:\n",
"\n",
" - ``||x|| >= 0``\n",
" - ``||x|| = 0`` if and only if ``x = 0``\n",
" - ``||s * x|| = |s| * ||x||``\n",
" - ``||x + y|| <= ||x|| + ||y||``\n",
" \"\"\"\n",
" super(CudaFnCustomNorm, self).__init__(norm, impl='cuda')\n",
"\n",
"\n",
"class CudaFnCustomDist(CustomDist):\n",
"\n",
" \"\"\"Class for handling a user-specified distance in `CudaFn`.\n",
"\n",
" Note that this removes ``inner`` and ``norm``.\n",
" \"\"\"\n",
"\n",
" def __init__(self, dist):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" dist : `callable`\n",
" The distance function defining a metric on `Fn`. It must\n",
" accept two `FnVector` arguments, return a `float` and and\n",
" fulfill the following mathematical conditions for any three\n",
" vectors ``x, y, z``:\n",
"\n",
" - ``dist(x, y) >= 0``\n",
" - ``dist(x, y) = 0`` if and only if ``x = y``\n",
" - ``dist(x, y) = dist(y, x)``\n",
" - ``dist(x, y) <= dist(x, z) + dist(z, y)``\n",
" \"\"\"\n",
" super(CudaFnCustomDist, self).__init__(dist, impl='cuda')\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" # pylint: disable=wrong-import-position\n",
" from odl.util.testutils import run_doctests\n",
" run_doctests()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 1,316 | 0 | false |
import json
class QiStream(object):
"""Qi stream definition"""
def __init__(self):
self.__Id = 0
self.__Name = None
self.__Description = None
self.__TypeId = None
self.__BehaviourId = None
def getId(self):
return self.__Id
def setId(self, Id):
self.__Id = Id
Id = property(getId, setId)
def getName(self):
return self.__Name
def setName(self, Name):
self.__Name = Name
Name = property(getName, setName)
def getDescription(self):
return self.__Description
def setDescription(self, Description):
self.__Description = Description
Description = property(getDescription, setDescription)
def getTypeId(self):
return self.__TypeId
def setTypeId(self, TypeId):
self.__TypeId = TypeId
TypeId = property(getTypeId, setTypeId)
def getBehaviourId(self):
return self.__BehaviourId
def setBehaviourId(self, BehaviourId):
self.__BehaviourId = BehaviourId
BehaviourId = property(getBehaviourId, setBehaviourId)
def toString(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
dictionary = {
"Id" : self.__Id }
if self.__Name is not None and len(self.__Name) > 0:
dictionary["Name"] = self.__Name
if self.__Description is not None and len(self.__Description) > 0:
dictionary["Description"] = self.__Description
if self.__TypeId is not None:
dictionary["TypeId"] = self.TypeId
if self.__BehaviourId is not None:
dictionary["BehaviourId"] = self.__BehaviourId
return dictionary
@staticmethod
def fromString(content):
dictionary = json.loads(content)
return QiStream.fromDictionary(dictionary)
@staticmethod
def fromDictionary(content):
stream = QiStream()
if len(content) == 0:
return typeProperty
if "Id" in content:
stream.Id = content["Id"]
if "Name" in content:
stream.Name = content["Name"]
if "Description" in content:
stream.Description = content["Description"]
if "TypeId" in content:
stream.TypeId = content["TypeId"]
if "BehaviourId" in content:
stream.BehaviourId = content["BehaviourId"]
return stream
| [
"import json\n",
"\n",
"class QiStream(object):\n",
" \"\"\"Qi stream definition\"\"\"\n",
"\n",
" def __init__(self):\n",
" self.__Id = 0\n",
" self.__Name = None\n",
" self.__Description = None\n",
" self.__TypeId = None\n",
" self.__BehaviourId = None\n",
" \n",
" def getId(self):\n",
" return self.__Id\n",
"\n",
" def setId(self, Id):\n",
" self.__Id = Id\n",
"\n",
" Id = property(getId, setId)\n",
"\n",
"\n",
" def getName(self):\n",
" return self.__Name\n",
"\n",
" def setName(self, Name):\n",
" self.__Name = Name\n",
"\n",
" Name = property(getName, setName)\n",
"\n",
" \n",
" def getDescription(self):\n",
" return self.__Description\n",
"\n",
" def setDescription(self, Description):\n",
" self.__Description = Description\n",
"\n",
" Description = property(getDescription, setDescription)\n",
"\n",
"\n",
" def getTypeId(self):\n",
" return self.__TypeId\n",
"\n",
" def setTypeId(self, TypeId):\n",
" self.__TypeId = TypeId\n",
"\n",
" TypeId = property(getTypeId, setTypeId)\n",
"\n",
" def getBehaviourId(self):\n",
" return self.__BehaviourId\n",
"\n",
" def setBehaviourId(self, BehaviourId):\n",
" self.__BehaviourId = BehaviourId\n",
"\n",
" BehaviourId = property(getBehaviourId, setBehaviourId)\n",
" \n",
" def toString(self):\n",
" return json.dumps(self.toDictionary())\n",
"\n",
" def toDictionary(self):\n",
" \n",
" dictionary = {\n",
" \"Id\" : self.__Id }\n",
"\n",
" if self.__Name is not None and len(self.__Name) > 0:\n",
" dictionary[\"Name\"] = self.__Name\n",
"\n",
" if self.__Description is not None and len(self.__Description) > 0:\n",
" dictionary[\"Description\"] = self.__Description\n",
"\n",
" if self.__TypeId is not None:\n",
" dictionary[\"TypeId\"] = self.TypeId\n",
" \n",
" if self.__BehaviourId is not None:\n",
" dictionary[\"BehaviourId\"] = self.__BehaviourId\n",
"\n",
" return dictionary\n",
"\n",
" @staticmethod\n",
" def fromString(content):\n",
" dictionary = json.loads(content)\n",
" return QiStream.fromDictionary(dictionary)\n",
"\n",
" @staticmethod\n",
" def fromDictionary(content):\n",
"\n",
" stream = QiStream()\n",
"\n",
" if len(content) == 0:\n",
" return typeProperty\n",
"\n",
" if \"Id\" in content:\n",
" stream.Id = content[\"Id\"]\n",
"\n",
" if \"Name\" in content:\n",
" stream.Name = content[\"Name\"]\n",
"\n",
" if \"Description\" in content:\n",
" stream.Description = content[\"Description\"]\n",
"\n",
" if \"TypeId\" in content:\n",
" stream.TypeId = content[\"TypeId\"]\n",
" \n",
" if \"BehaviourId\" in content:\n",
" stream.BehaviourId = content[\"BehaviourId\"]\n",
" \n",
" return stream\n",
"\n"
] | [
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.1111111111111111,
0,
0.06451612903225806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.07692307692307693,
0,
1
] | 107 | 0.021496 | false |
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2015 Lucas Koegel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from piko import Piko
from hm import HM
from pyowm import OWM
import time
import sys
import logging, logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.handlers.RotatingFileHandler('/home/pi/Desktop/piko/pikoToHM.log', maxBytes=1024*1024*512, backupCount=2)
fh.setLevel(logging.DEBUG)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(format)
fh.setFormatter(format)
logger.addHandler(ch)
logger.addHandler(fh)
PIKO_INTERVAL = 30 # seconds
OWM_INTERVAL = 1800 # seconds
HM_PV_REMAINING_POWER_ID = 12772
HM_PV_STRING_1_POWER_ID = 15241
HM_PV_STRING_2_POWER_ID = 15242
HM_WEATHER_FORECAST_CLOUDS_ID = 20144
HM_WEATHER_CURRENT_TEMPERATURE_ID = 21442
HM_WEATHER_FORECAST_TEMPERATURE_ID = 21443
OWM_API_KEY = 'insert'
OWM_CITY_ID = 2835477
logging.info('Started')
p = Piko(host='http://192.168.178.123')
hm = HM('http://192.168.178.49')
owm = OWM(OWM_API_KEY)
last_weather_update = time.time() - OWM_INTERVAL # - OWM_INTERVAL to update on first run
while(True):
try:
# -------------------------------
# Weather
now = time.time()
if (now - last_weather_update) >= OWM_INTERVAL:
try:
# Queries the OWM web API for three hours weather forecast for the specified city ID.
# A Forecaster object is returned, containing a Forecast instance covering a global streak of five days:
# this instance encapsulates Weather objects, with a time interval of three hours one from each other
logging.debug('Calling: owm.three_hours_forecast_at_id')
forecast = owm.three_hours_forecast_at_id(OWM_CITY_ID).get_forecast()
# get current weather
logging.debug('Calling: owm.weather_at_id')
weather = owm.weather_at_id(OWM_CITY_ID).get_weather()
# set the cloud coverage of the weather to homematic
# .get_clouds(): Returns the cloud coverage percentage as an int
logging.debug('Calling: set_state HM_WEATHER_FORECAST_CLOUDS_ID')
hm.set_state(HM_WEATHER_FORECAST_CLOUDS_ID, weather.get_clouds())
# set the current temperature of the weather to homematic
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_CURRENT_TEMPERATURE_ID, weather.get_temperature(unit="celsius")["temp"])
# set the temperature of the weather in 12 hours to homematic
# .get(): Lookups up into the Weather items list for the item at the specified index
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_FORECAST_TEMPERATURE_ID, forecast.get(3).get_temperature(unit="celsius")["temp"])
# Update last_weather_update time
last_weather_update = time.time()
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error on updating weather: {0}'.format(err))
# -------------------------------
# Piko
# Get values for remaining power calculation
logging.debug('Calling: get_current_power')
current_solar_power = p.get_current_power()
logging.debug('Calling: get_consumption_phase_1')
consumption_phase_1 = p.get_consumption_phase_1()
consumption_phase_2 = p.get_consumption_phase_2()
logging.debug('Calling: get_consumption_phase_2')
logging.debug('Calling: get_consumption_phase_3')
consumption_phase_3 = p.get_consumption_phase_3()
# Get values for string 1 power and string 2 power
logging.debug('Calling: get_string1_current')
string1Current = p.get_string1_current()
logging.debug('Calling: get_string2_current')
string2Current = p.get_string2_current()
logging.debug('Calling: get_string1_voltage')
string1Voltage = p.get_string1_voltage()
logging.debug('Calling: get_string2_voltage')
string2Voltage = p.get_string2_voltage()
if current_solar_power < 0:
# Piko is off
logging.info('Piko is off, going to sleep 10 minutes.')
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, 0)
logging.debug('Calling: time.sleep 600')
time.sleep(600)
continue
# Calculate remaining power
logging.debug('Rounding for remaining_power')
remaining_power = round(current_solar_power - (consumption_phase_1 + consumption_phase_2 + consumption_phase_3))
if remaining_power < 0:
remaining_power = 0
# Calculate string 1 power and string 2 power
string1 = round(string1Current * string1Voltage)
string2 = round(string2Current * string2Voltage)
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, remaining_power)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, string1)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, string2)
# Sleep
logging.debug('Calling: time.sleep PIKO_INTERVAL')
time.sleep(PIKO_INTERVAL)
except KeyboardInterrupt:
break
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error: {0}'.format(err))
continue
| [
"#!/usr/bin/env python3\n",
"\n",
"# The MIT License (MIT)\n",
"#\n",
"# Copyright (c) 2015 Lucas Koegel\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"Software\"), to deal\n",
"# in the Software without restriction, including without limitation the rights\n",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"# copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in all\n",
"# copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n",
"# SOFTWARE.\n",
"\n",
"from piko import Piko\n",
"from hm import HM\n",
"from pyowm import OWM\n",
"\n",
"import time\n",
"import sys\n",
"import logging, logging.handlers\n",
"\n",
"\n",
"logger = logging.getLogger()\n",
"logger.setLevel(logging.DEBUG)\n",
"ch = logging.StreamHandler()\n",
"ch.setLevel(logging.INFO)\n",
"fh = logging.handlers.RotatingFileHandler('/home/pi/Desktop/piko/pikoToHM.log', maxBytes=1024*1024*512, backupCount=2)\n",
"fh.setLevel(logging.DEBUG)\n",
"logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n",
"format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')\n",
"ch.setFormatter(format)\n",
"fh.setFormatter(format)\n",
"logger.addHandler(ch)\n",
"logger.addHandler(fh)\n",
"\n",
"PIKO_INTERVAL = 30 # seconds\n",
"OWM_INTERVAL = 1800 # seconds\n",
"HM_PV_REMAINING_POWER_ID = 12772\n",
"HM_PV_STRING_1_POWER_ID = 15241\n",
"HM_PV_STRING_2_POWER_ID = 15242\n",
"HM_WEATHER_FORECAST_CLOUDS_ID = 20144\n",
"HM_WEATHER_CURRENT_TEMPERATURE_ID = 21442\n",
"HM_WEATHER_FORECAST_TEMPERATURE_ID = 21443\n",
"OWM_API_KEY = 'insert'\n",
"OWM_CITY_ID = 2835477\n",
"\n",
"logging.info('Started')\n",
"p = Piko(host='http://192.168.178.123')\n",
"hm = HM('http://192.168.178.49')\n",
"owm = OWM(OWM_API_KEY)\n",
"last_weather_update = time.time() - OWM_INTERVAL # - OWM_INTERVAL to update on first run\n",
"\n",
"\n",
"while(True):\n",
" try:\n",
" # -------------------------------\n",
" # Weather\n",
" now = time.time()\n",
" if (now - last_weather_update) >= OWM_INTERVAL:\n",
" try:\n",
"\n",
" # Queries the OWM web API for three hours weather forecast for the specified city ID. \n",
" # A Forecaster object is returned, containing a Forecast instance covering a global streak of five days: \n",
" # this instance encapsulates Weather objects, with a time interval of three hours one from each other\n",
" logging.debug('Calling: owm.three_hours_forecast_at_id')\n",
" forecast = owm.three_hours_forecast_at_id(OWM_CITY_ID).get_forecast()\n",
" \n",
" # get current weather\n",
" logging.debug('Calling: owm.weather_at_id')\n",
" weather = owm.weather_at_id(OWM_CITY_ID).get_weather()\n",
" \n",
" # set the cloud coverage of the weather to homematic\n",
" # .get_clouds(): Returns the cloud coverage percentage as an int\n",
" logging.debug('Calling: set_state HM_WEATHER_FORECAST_CLOUDS_ID')\n",
" hm.set_state(HM_WEATHER_FORECAST_CLOUDS_ID, weather.get_clouds())\n",
" \n",
" # set the current temperature of the weather to homematic\n",
" # .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}\n",
" hm.set_state(HM_WEATHER_CURRENT_TEMPERATURE_ID, weather.get_temperature(unit=\"celsius\")[\"temp\"])\n",
" \n",
" # set the temperature of the weather in 12 hours to homematic\n",
" # .get(): Lookups up into the Weather items list for the item at the specified index\n",
" # .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}\n",
" hm.set_state(HM_WEATHER_FORECAST_TEMPERATURE_ID, forecast.get(3).get_temperature(unit=\"celsius\")[\"temp\"])\n",
" \n",
" # Update last_weather_update time\n",
" last_weather_update = time.time()\n",
" except: # catch *all* exceptions\n",
" err = sys.exc_info()[0]\n",
" logging.exception('Error on updating weather: {0}'.format(err))\n",
" \n",
" \n",
" # -------------------------------\n",
" # Piko\n",
" # Get values for remaining power calculation\n",
" logging.debug('Calling: get_current_power')\n",
" current_solar_power = p.get_current_power()\n",
" \n",
" logging.debug('Calling: get_consumption_phase_1')\n",
" consumption_phase_1 = p.get_consumption_phase_1()\n",
" \n",
" consumption_phase_2 = p.get_consumption_phase_2()\n",
" logging.debug('Calling: get_consumption_phase_2')\n",
" \n",
" logging.debug('Calling: get_consumption_phase_3')\n",
" consumption_phase_3 = p.get_consumption_phase_3()\n",
" \n",
" # Get values for string 1 power and string 2 power\n",
" logging.debug('Calling: get_string1_current')\n",
" string1Current = p.get_string1_current()\n",
" \n",
" logging.debug('Calling: get_string2_current')\n",
" string2Current = p.get_string2_current()\n",
" \n",
" logging.debug('Calling: get_string1_voltage')\n",
" string1Voltage = p.get_string1_voltage()\n",
" \n",
" logging.debug('Calling: get_string2_voltage')\n",
" string2Voltage = p.get_string2_voltage()\n",
" \n",
" if current_solar_power < 0:\n",
" # Piko is off\n",
" logging.info('Piko is off, going to sleep 10 minutes.')\n",
" # Set state of homematic\n",
" logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')\n",
" hm.set_state(HM_PV_REMAINING_POWER_ID, 0)\n",
" \n",
" logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')\n",
" hm.set_state(HM_PV_STRING_1_POWER_ID, 0)\n",
" \n",
" logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')\n",
" hm.set_state(HM_PV_STRING_2_POWER_ID, 0)\n",
" \n",
" logging.debug('Calling: time.sleep 600')\n",
" time.sleep(600)\n",
" continue\n",
" \n",
" # Calculate remaining power\n",
" logging.debug('Rounding for remaining_power')\n",
" remaining_power = round(current_solar_power - (consumption_phase_1 + consumption_phase_2 + consumption_phase_3))\n",
" if remaining_power < 0:\n",
" remaining_power = 0\n",
" \n",
" # Calculate string 1 power and string 2 power\n",
" string1 = round(string1Current * string1Voltage)\n",
" string2 = round(string2Current * string2Voltage)\n",
" \n",
" # Set state of homematic\n",
" logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')\n",
" hm.set_state(HM_PV_REMAINING_POWER_ID, remaining_power)\n",
" \n",
" logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')\n",
" hm.set_state(HM_PV_STRING_1_POWER_ID, string1)\n",
" \n",
" logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')\n",
" hm.set_state(HM_PV_STRING_2_POWER_ID, string2)\n",
" \n",
" # Sleep\n",
" logging.debug('Calling: time.sleep PIKO_INTERVAL')\n",
" time.sleep(PIKO_INTERVAL)\n",
" \n",
" except KeyboardInterrupt:\n",
" break\n",
" except: # catch *all* exceptions\n",
" err = sys.exc_info()[0]\n",
" logging.exception('Error: {0}'.format(err))\n",
" continue\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02247191011235955,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019417475728155338,
0.01639344262295082,
0.00847457627118644,
0,
0.011627906976744186,
0.058823529411764705,
0,
0,
0,
0.058823529411764705,
0,
0.012345679012345678,
0.012195121951219513,
0.012195121951219513,
0.058823529411764705,
0,
0.00684931506849315,
0.008849557522123894,
0.058823529411764705,
0,
0.009900990099009901,
0.00684931506849315,
0.00819672131147541,
0.058823529411764705,
0,
0,
0.044444444444444446,
0,
0,
0.2,
0.2,
0.023809523809523808,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0.07692307692307693,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0.008264462809917356,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.05405405405405406,
0,
0,
0
] | 178 | 0.016644 | false |
#-----------------------------------------------------------------------------------------------
# Name: dbutils.py
# Purpose: Realizar operaciones sobre Bases de Datos
#
# Author: William Lopez
#
# Created: 08/05/2015
#-----------------------------------------------------------------------------------------------
''' Módulo dbutils para realizar operaciones basicas sobre sqllite3
Clases:
clsSQLite -> Manejo de sqlite3
'''
__author__ = "William Lopez"
__copyright__ = ""
__credits__ = ["", "", "",""]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "William Lopez"
__email__ = "wlopez.a@gmail.com"
__status__ = "Development"
#-------------------------------------------------------------------------------
#Importacion de Modulos
#-------------------------------------------------------------------------------
import sqlite3
#-------------------------------------------------------------------------------
#Clase: clsSQLite
#-------------------------------------------------------------------------------
class clsSQLite(object):
''' Clase para realizar operaciones basicas sobre bases de datos sqlite
Propiedades:
database <string> : Ruta y nombre de la base de datos
Metodos:
execsql(sql,args()) : Ejecuta una sentencia sql que no retorna resultados
getresults(sql,args=(),one=False) : Ejecuta una sentencia sql que retorna uno o varios resultados
'''
def __init__(self,database=""):
self.database = database
def execsql(self,sql,args=()):
''' Ejecutar una sentencia SQL que no retorna ningun resultado
Argumentos:
sql <string> -> Sentencia sql, para pasar partes variables usar ?
args <lista> -> Argumentos como Lista, opcional
'''
try:
db = sqlite3.connect(self.database)
#cur = db.cursor()
db.text_factory = str
db.execute(sql,args)
db.commit()
db.close()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
db.rollback()
db.close()
def getresults(self,sql,args=(),one=False):
''' Ejecuta una sentencia SQL que retorna uno o más resultados
Argumentos:
sql <string> -> Sentencia sql, para pasar partes variables usar ?
args <lista> -> Argumentos como Lista, opcional
one <boolean> -> True: Devuelve un dato como resultado
False: Retorna filas
Retorna:
resultados
'''
try:
db = sqlite3.connect(self.database)
db.text_factory = str
db.row_factory = sqlite3.Row
cur = db.cursor()
cur.execute(sql,args)
if one:
resultados = cur.fetchone()[0]
else:
resultados = cur.fetchall()
db.close()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
db.rollback()
db.close()
return resultados | [
"#-----------------------------------------------------------------------------------------------\n",
"# Name: dbutils.py\n",
"# Purpose: Realizar operaciones sobre Bases de Datos\n",
"#\n",
"# Author: William Lopez\n",
"#\n",
"# Created: 08/05/2015\n",
"#-----------------------------------------------------------------------------------------------\n",
"''' Módulo dbutils para realizar operaciones basicas sobre sqllite3\n",
"\n",
" Clases:\n",
" clsSQLite -> Manejo de sqlite3\n",
"'''\n",
"__author__ = \"William Lopez\"\n",
"__copyright__ = \"\"\n",
"__credits__ = [\"\", \"\", \"\",\"\"]\n",
"__license__ = \"GPL\"\n",
"__version__ = \"1.0.0\"\n",
"__maintainer__ = \"William Lopez\"\n",
"__email__ = \"wlopez.a@gmail.com\"\n",
"__status__ = \"Development\"\n",
"\n",
"\n",
"#-------------------------------------------------------------------------------\n",
"#Importacion de Modulos\n",
"#-------------------------------------------------------------------------------\n",
"import sqlite3\n",
"\n",
"#-------------------------------------------------------------------------------\n",
"#Clase: clsSQLite\n",
"#-------------------------------------------------------------------------------\n",
"class clsSQLite(object):\n",
" ''' Clase para realizar operaciones basicas sobre bases de datos sqlite\n",
"\n",
" Propiedades:\n",
" database <string> : Ruta y nombre de la base de datos\n",
"\n",
" Metodos:\n",
" execsql(sql,args()) : Ejecuta una sentencia sql que no retorna resultados\n",
" getresults(sql,args=(),one=False) : Ejecuta una sentencia sql que retorna uno o varios resultados\n",
" '''\n",
"\n",
" def __init__(self,database=\"\"):\n",
" self.database = database\n",
"\n",
" def execsql(self,sql,args=()):\n",
" ''' Ejecutar una sentencia SQL que no retorna ningun resultado\n",
"\n",
" Argumentos:\n",
" sql <string> -> Sentencia sql, para pasar partes variables usar ?\n",
" args <lista> -> Argumentos como Lista, opcional\n",
" '''\n",
" try:\n",
" db = sqlite3.connect(self.database)\n",
" #cur = db.cursor()\n",
" db.text_factory = str\n",
" db.execute(sql,args)\n",
" db.commit()\n",
" db.close()\n",
" except sqlite3.Error, e:\n",
" print \"Error %s:\" % e.args[0]\n",
" db.rollback()\n",
" db.close()\n",
"\n",
" def getresults(self,sql,args=(),one=False):\n",
" ''' Ejecuta una sentencia SQL que retorna uno o más resultados\n",
"\n",
" Argumentos:\n",
" sql <string> -> Sentencia sql, para pasar partes variables usar ?\n",
" args <lista> -> Argumentos como Lista, opcional\n",
" one <boolean> -> True: Devuelve un dato como resultado\n",
" False: Retorna filas\n",
"\n",
" Retorna:\n",
" resultados\n",
" '''\n",
" try:\n",
" db = sqlite3.connect(self.database)\n",
" db.text_factory = str\n",
" db.row_factory = sqlite3.Row\n",
" cur = db.cursor()\n",
" cur.execute(sql,args)\n",
" if one:\n",
" resultados = cur.fetchone()[0]\n",
" else:\n",
" resultados = cur.fetchall()\n",
" db.close()\n",
" except sqlite3.Error, e:\n",
" print \"Error %s:\" % e.args[0]\n",
" db.rollback()\n",
" db.close()\n",
"\n",
" return resultados"
] | [
0.020618556701030927,
0,
0,
0,
0,
0,
0,
0.020618556701030927,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0.041666666666666664,
0.024691358024691357,
0.06666666666666667,
0,
0.024691358024691357,
0.05555555555555555,
0.024691358024691357,
0.04,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.009433962264150943,
0,
0,
0.027777777777777776,
0,
0,
0.05714285714285714,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04
] | 93 | 0.007609 | false |
# Задача 12
# Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python
#гл. 6).
# Danilenkov M.A.
X = "X"
O = "O"
EMPTY = " "
TIE = "TIE"
NUM_SQUARES = 9
def display_instruct():
"""Инструкции"""
print(
"""
На ходу, введи цифру, согласно рисунку:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8
\n
"""
)
def ask_yes_no(question):
"""Вопрос да/нет"""
response = None
while response not in ("д", "н"):
response = input(question).lower()
return response
def ask_number(question, low, high):
"""Число из диапазона"""
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
"""Первый ход"""
go_first = ask_yes_no("Возьмешь первый ход? (д/н): ")
if go_first == "д":
print("\nТвои крестики")
human = X
computer = O
else:
print("\nТвои нули")
computer = X
human = O
return computer, human
def new_board():
"""Ноовое поле"""
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
"""Вывести поле на экран"""
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8], "\n")
def legal_moves(board):
"""Возможные ходы"""
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
"""Определяемпобедителя"""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
"""Ход игрока"""
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Твой ход (0 - 8):", 0, NUM_SQUARES)
if move not in legal:
print("\nЭтот квадрат занят\n")
print("Хорошо")
return move
def computer_move(board, computer, human):
"""Компьютер делает ход"""
board = board[:]
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
print("Я выберу поле", end=" ")
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
board[move] = EMPTY
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
board[move] = EMPTY
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
"""Изменить ход"""
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
"""Поздравление победителя"""
if the_winner != TIE:
print(the_winner, "выиграл\n")
else:
print("Ничья\n")
if the_winner == computer:
print("Машины завоёвывают мир \n")
elif the_winner == human:
print("Неееет")
elif the_winner == TIE:
print("Никто")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn = next_turn(turn)
the_winner = winner(board)
congrat_winner(the_winner, computer, human)
main()
input("\n\nНажмите ENTER длы выхода")
| [
"# Задача 12\n",
"# Разработайте игру \"Крестики-нолики\". (см. М.Доусон Программируем на Python\n",
"#гл. 6).\n",
"\n",
"# Danilenkov M.A.\n",
"\n",
"X = \"X\"\n",
"O = \"O\"\n",
"EMPTY = \" \"\n",
"TIE = \"TIE\"\n",
"NUM_SQUARES = 9\n",
"\n",
"\n",
"def display_instruct():\n",
" \"\"\"Инструкции\"\"\" \n",
" print(\n",
" \"\"\"\n",
" На ходу, введи цифру, согласно рисунку:\n",
" \n",
" 0 | 1 | 2\n",
" ---------\n",
" 3 | 4 | 5\n",
" ---------\n",
" 6 | 7 | 8\n",
"\n",
" \n",
"\\n\n",
" \"\"\"\n",
" )\n",
"\n",
"\n",
"def ask_yes_no(question):\n",
" \"\"\"Вопрос да/нет\"\"\"\n",
" response = None\n",
" while response not in (\"д\", \"н\"):\n",
" response = input(question).lower()\n",
" return response\n",
"\n",
"\n",
"def ask_number(question, low, high):\n",
" \"\"\"Число из диапазона\"\"\"\n",
" response = None\n",
" while response not in range(low, high):\n",
" response = int(input(question))\n",
" return response\n",
"\n",
"\n",
"def pieces():\n",
" \"\"\"Первый ход\"\"\"\n",
" go_first = ask_yes_no(\"Возьмешь первый ход? (д/н): \")\n",
" if go_first == \"д\":\n",
" print(\"\\nТвои крестики\")\n",
" human = X\n",
" computer = O\n",
" else:\n",
" print(\"\\nТвои нули\")\n",
" computer = X\n",
" human = O\n",
" return computer, human\n",
"\n",
"\n",
"def new_board():\n",
" \"\"\"Ноовое поле\"\"\"\n",
" board = []\n",
" for square in range(NUM_SQUARES):\n",
" board.append(EMPTY)\n",
" return board\n",
"\n",
"\n",
"def display_board(board):\n",
" \"\"\"Вывести поле на экран\"\"\"\n",
" print(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2])\n",
" print(\"\\t\", \"---------\")\n",
" print(\"\\t\", board[3], \"|\", board[4], \"|\", board[5])\n",
" print(\"\\t\", \"---------\")\n",
" print(\"\\t\", board[6], \"|\", board[7], \"|\", board[8], \"\\n\")\n",
"\n",
"\n",
"def legal_moves(board):\n",
" \"\"\"Возможные ходы\"\"\"\n",
" moves = []\n",
" for square in range(NUM_SQUARES):\n",
" if board[square] == EMPTY:\n",
" moves.append(square)\n",
" return moves\n",
"\n",
"\n",
"def winner(board):\n",
" \"\"\"Определяемпобедителя\"\"\"\n",
" WAYS_TO_WIN = ((0, 1, 2),\n",
" (3, 4, 5),\n",
" (6, 7, 8),\n",
" (0, 3, 6),\n",
" (1, 4, 7),\n",
" (2, 5, 8),\n",
" (0, 4, 8),\n",
" (2, 4, 6))\n",
" \n",
" for row in WAYS_TO_WIN:\n",
" if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:\n",
" winner = board[row[0]]\n",
" return winner\n",
"\n",
" if EMPTY not in board:\n",
" return TIE\n",
"\n",
" return None\n",
"\n",
"\n",
"def human_move(board, human):\n",
" \"\"\"Ход игрока\"\"\" \n",
" legal = legal_moves(board)\n",
" move = None\n",
" while move not in legal:\n",
" move = ask_number(\"Твой ход (0 - 8):\", 0, NUM_SQUARES)\n",
" if move not in legal:\n",
" print(\"\\nЭтот квадрат занят\\n\")\n",
" print(\"Хорошо\")\n",
" return move\n",
"\n",
"\n",
"def computer_move(board, computer, human):\n",
" \"\"\"Компьютер делает ход\"\"\"\n",
" board = board[:]\n",
" BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)\n",
"\n",
" print(\"Я выберу поле\", end=\" \")\n",
" \n",
" for move in legal_moves(board):\n",
" board[move] = computer\n",
" if winner(board) == computer:\n",
" print(move)\n",
" return move\n",
" board[move] = EMPTY\n",
" \n",
" for move in legal_moves(board):\n",
" board[move] = human\n",
" if winner(board) == human:\n",
" print(move)\n",
" return move\n",
" board[move] = EMPTY\n",
"\n",
" for move in BEST_MOVES:\n",
" if move in legal_moves(board):\n",
" print(move)\n",
" return move\n",
"\n",
"\n",
"def next_turn(turn):\n",
" \"\"\"Изменить ход\"\"\"\n",
" if turn == X:\n",
" return O\n",
" else:\n",
" return X\n",
"\n",
" \n",
"def congrat_winner(the_winner, computer, human):\n",
" \"\"\"Поздравление победителя\"\"\"\n",
" if the_winner != TIE:\n",
" print(the_winner, \"выиграл\\n\")\n",
" else:\n",
" print(\"Ничья\\n\")\n",
"\n",
" if the_winner == computer:\n",
" print(\"Машины завоёвывают мир \\n\")\n",
"\n",
" elif the_winner == human:\n",
" print(\"Неееет\")\n",
"\n",
" elif the_winner == TIE:\n",
" print(\"Никто\")\n",
"\n",
"\n",
"def main():\n",
" display_instruct()\n",
" computer, human = pieces()\n",
" turn = X\n",
" board = new_board()\n",
" display_board(board)\n",
"\n",
" while not winner(board):\n",
" if turn == human:\n",
" move = human_move(board, human)\n",
" board[move] = human\n",
" else:\n",
" move = computer_move(board, computer, human)\n",
" board[move] = computer\n",
" display_board(board)\n",
" turn = next_turn(turn)\n",
"\n",
" the_winner = winner(board)\n",
" congrat_winner(the_winner, computer, human)\n",
"\n",
"\n",
"main()\n",
"input(\"\\n\\nНажмите ENTER длы выхода\")\n"
] | [
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.125,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0.125,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 196 | 0.008409 | false |
import c4d
from c4d import documents, plugins, storage
#Author: Joey Gaspe
#OBJ export settings example
def main():
# Get OBJ export plugin, 1030178 is its ID
plug = plugins.FindPlugin(1030178, c4d.PLUGINTYPE_SCENESAVER)
if plug is None:
return
# Get a path to save the exported file
filePath = c4d.storage.LoadDialog(title="Save File for OBJ Export", flags=c4d.FILESELECT_SAVE, force_suffix="obj")
if filePath is None:
return
op = {}
# Send MSG_RETRIEVEPRIVATEDATA to OBJ export plugin
if plug.Message(c4d.MSG_RETRIEVEPRIVATEDATA, op):
print op
if "imexporter" not in op:
return
# BaseList2D object stored in "imexporter" key hold the settings
objExport = op["imexporter"]
if objExport is None:
return
# Define the settings
# Example of OBJ export settings from the UI:
objExport[c4d.OBJEXPORTOPTIONS_TEXTURECOORDINATES] = True
objExport[c4d.OBJEXPORTOPTIONS_MATERIAL] = c4d.OBJEXPORTOPTIONS_MATERIAL_MATERIAL
# objExport[c4d.] =
# export without dialogs
if c4d.documents.SaveDocument(doc, filePath, c4d.SAVEDOCUMENTFLAGS_DONTADDTORECENTLIST, 1030178):
print "Document successfully exported to:"
print filePath
else:
print "Export failed!"
c4d.EventAdd()
if __name__=='__main__':
main()
| [
"import c4d\n",
"from c4d import documents, plugins, storage\n",
"\n",
"#Author: Joey Gaspe\n",
"#OBJ export settings example\n",
"\n",
"def main():\n",
"\n",
" # Get OBJ export plugin, 1030178 is its ID\n",
" plug = plugins.FindPlugin(1030178, c4d.PLUGINTYPE_SCENESAVER)\n",
" if plug is None:\n",
" return\n",
"\n",
" # Get a path to save the exported file\n",
" filePath = c4d.storage.LoadDialog(title=\"Save File for OBJ Export\", flags=c4d.FILESELECT_SAVE, force_suffix=\"obj\")\n",
"\n",
" if filePath is None:\n",
" return\n",
"\n",
" op = {}\n",
" # Send MSG_RETRIEVEPRIVATEDATA to OBJ export plugin\n",
" if plug.Message(c4d.MSG_RETRIEVEPRIVATEDATA, op):\n",
" print op\n",
" if \"imexporter\" not in op:\n",
" return\n",
"\n",
" # BaseList2D object stored in \"imexporter\" key hold the settings\n",
" objExport = op[\"imexporter\"]\n",
" if objExport is None:\n",
" return\n",
"\n",
" # Define the settings\n",
" # Example of OBJ export settings from the UI:\n",
" objExport[c4d.OBJEXPORTOPTIONS_TEXTURECOORDINATES] = True\n",
" objExport[c4d.OBJEXPORTOPTIONS_MATERIAL] = c4d.OBJEXPORTOPTIONS_MATERIAL_MATERIAL\n",
"\n",
" # objExport[c4d.] =\n",
"\n",
" # export without dialogs\n",
" if c4d.documents.SaveDocument(doc, filePath, c4d.SAVEDOCUMENTFLAGS_DONTADDTORECENTLIST, 1030178):\n",
" print \"Document successfully exported to:\"\n",
" print filePath\n",
" else:\n",
" print \"Export failed!\"\n",
"\n",
" c4d.EventAdd()\n",
"\n",
"if __name__=='__main__':\n",
" main()\n"
] | [
0,
0,
0,
0.05,
0.034482758620689655,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0.08,
0
] | 49 | 0.005648 | false |
# Copyright (C) 2015 Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
# import xbmc
# import xbmcaddon
# # xbmcaddon.Addon('plugin.program.super.favourites').openSettings()
# label = xbmc.getInfoLabel('ListItem.Label')
# path = xbmc.getInfoLabel('ListItem.FolderPath')
# filename = xbmc.getInfoLabel('ListItem.FilenameAndPath')
# name = xbmc.getInfoLabel('ListItem.Label')
# thumb = xbmc.getInfoLabel('ListItem.Thumb')
# playable = xbmc.getInfoLabel('ListItem.Property(IsPlayable)').lower() == 'true'
# fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
# isFolder = xbmc.getCondVisibility('ListItem.IsFolder') == 1
# menu = []
# menu.append(label)
# menu.append('Settings')
# choice = xbmcgui.Dialog().select('PLTV', menu)
# if choice == None:
# return
# if choice == 0:
# #call you function
# return
# if choice == 1:
# xbmcaddon.Addon('plugin.program.super.favourites').openSettings()
# return
# Copyright (C) 2015 Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import subprocess, os, sys, re, threading
import time, datetime, threading
import httplib, urllib, urllib2
import base64, shutil, random, errno
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
from Globals import *
#Check settings2.xml for channel nums, channel types, channel names.
def readSettings2():
print 'readSettings2'
settingsFile = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'settings2.xml'))
channelLST = []
channelsLST = []
NEWLST = []
channelTYPE = 0
if xbmcvfs.exists(settingsFile):
f = open(settingsFile,'r')
lineLST = f.readlines()
for x in range(999):
channelNUM = x + 1
channelNAME = ''
channelINFO = [channelNUM, channelNAME]
channelsLST.append(channelINFO)
for i in range(len(lineLST)):
line = lineLST[i]
if '_type" value="' in line:
channelINFO = (line.split('<setting id="Channel_')[1]).replace('" />','')
channelTYPE = int((channelINFO.split('_type" value="')[1]).replace('\n',''))
channelNUM = int((channelINFO.split('_type" value="')[0]).replace('\n',''))
if channelTYPE <= 6:
if '<setting id="Channel_' + str(channelNUM) + '_1" value="' in line:
channelNAME = (line.split('value="')[1]).replace('" />','').replace('\n','')
channelINFO = [channelNUM, channelNAME]
channelLST.append(channelINFO)
elif channelTYPE == 7:
if '<setting id="Channel_' + str(channelNUM) + '_1" value="' in line:
channelNAME = 'Directory Channel'
channelINFO = [channelNUM, channelNAME]
channelLST.append(channelINFO)
elif channelTYPE >= 8:
if '<setting id="Channel_' + str(channelNUM) + '_rule_1_opt_1' in line:
channelNAME = (line.split('value="')[1]).replace('" />','').replace('\n','')
channelINFO = [channelNUM, channelNAME]
channelLST.append(channelINFO)
for n in range(len(channelsLST)):
try:
chanLST = channelLST[n]
NEW = chanLST
except:
NUMLST = channelsLST[n]
NEW = NUMLST
pass
NEWLST.append(NEW)
return NEWLST
def AppendPlugin(type, path, name):
print 'AppendPlugin'
try:
plugin = path.split('/')[2]
except:
plugin = path
pass
print plugin
if type == 'directory':
print 'directory'
#write setting2 config for chtype 16
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "16")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", path)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", "")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", "0")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "1")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", name)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
else:
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "9")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", "5400")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", path)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", name)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", plugin)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "1")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", name)
ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
# _STD_SETTINGS = 0
# _ADDTOFAVES = 100
# _SF_SETTINGS = 200
# _LAUNCH_SF = 300
# _SEARCH = 400
# _DOWNLOAD = 500
# _PLAYLIST = 600
# def doStandard():
# window = xbmcgui.getCurrentWindowId()
# if window == 12005: #video playing
# xbmc.executebuiltin('ActivateWindow(videoplaylist)')
# return
# xbmc.executebuiltin('Action(ContextMenu)')
# def copyFave(name, thumb, cmd):
# import favourite
# import utils
# text = utils.GETTEXT(30019)
# folder = utils.GetFolder(text)
# if not folder:
# return False
# file = os.path.join(folder, utils.FILENAME)
# faves = favourite.getFavourites(file)
# #if it is already in there don't add again
# for fave in faves:
# if fave[2] == cmd:
# return False
# fave = [name, thumb, cmd]
# faves.append(fave)
# favourite.writeFavourites(file, faves)
# return True
# def activateCommand(cmd):
# cmds = cmd.split(',', 1)
# activate = cmds[0]+',return)'
# plugin = cmds[1][:-1]
# #check if it is a different window and if so activate it
# id = str(xbmcgui.getCurrentWindowId())
# if id not in activate:
# xbmc.executebuiltin(activate)
# xbmc.executebuiltin('Container.Update(%s)' % plugin)
# def doMenu():
# try:
# import utils
# except:
# doStandard()
# return
# import contextmenu
# # to prevent master profile setting being used in other profiles
# if (REAL_SETTINGS.getSetting("Context")) != 'true':
# doStandard()
# return
# choice = 0
# label = xbmc.getInfoLabel('ListItem.Label')
# path = xbmc.getInfoLabel('ListItem.FolderPath')
# filename = xbmc.getInfoLabel('ListItem.FilenameAndPath')
# name = xbmc.getInfoLabel('ListItem.Label')
# thumb = xbmc.getInfoLabel('ListItem.Thumb')
# window = xbmcgui.getCurrentWindowId()
# playable = xbmc.getInfoLabel('ListItem.Property(IsPlayable)').lower() == 'true'
# fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
# isFolder = xbmc.getCondVisibility('ListItem.IsFolder') == 1
# try: file = xbmc.Player().getPlayingFile()
# except: file = None
# isStream = False
# if file:
# isStream = file.startswith('http://')
# #GOTHAM only
# #if hasattr(xbmc.Player(), 'isInternetStream'):
# # isStream = xbmc.Player().isInternetStream()
# #elif file:
# # isStream = file.startswith('http://')
# print '**** Context Menu Information ****'
# print 'Label : %s' % label
# print 'Folder : %s' % folder
# print 'Path : %s' % path
# print 'Filename : %s' % filename
# print 'Name : %s' % name
# print 'Thumb : %s' % thumb
# print 'Fanart : %s' % fanart
# print 'Window : %d' % window
# print 'IsPlayable : %s' % playable
# print 'IsFolder : %s' % isFolder
# print 'File : %s' % file
# print 'IsStream : %s' % isStream
# menu = []
# if (len(menu) == 0) and window == 12005: #video playing
# if isStream:
# menu.append(('Download %s' % label , _DOWNLOAD))
# menu.append(('Show Playlist', _PLAYLIST))
# else:
# return doStandard()
# #cancel download feature for now
# return doStandard()
# if (len(menu) == 0) and len(path) > 0:
# menu.append(('Add to PseudoTV Live', _ADDTOFAVES))
# menu.append(('PseudoTV Live Settings', _SF_SETTINGS))
# #elif window == 10000: #Home screen
# # menu.append((utils.GETTEXT(30053), _LAUNCH_SF))
# # menu.append((utils.GETTEXT(30049), _SF_SETTINGS))
# if len(menu) == 0:
# doStandard()
# return
# xbmcgui.Window(10000).setProperty('SF_MENU_VISIBLE', 'true')
# choice = contextmenu.showMenu(utils.ADDONID, menu)
# if choice == _PLAYLIST:
# xbmc.executebuiltin('ActivateWindow(videoplaylist)')
# if choice == _DOWNLOAD:
# import download
# download.download(file, 'c:\\temp\\file.mpg', 'Super Favourites')
# if choice == _STD_SETTINGS:
# xbmc.executebuiltin('XBMC.Action(ContextMenu)')
# if choice == _SF_SETTINGS:
# utils.ADDON.openSettings()
# if choice == _ADDTOFAVES:
# if isFolder:
# cmd = 'ActivateWindow(%d,"%s")' % (window, path)
# elif path.lower().startswith('script'):
# if path[-1] == '/':
# path = path[:-1]
# cmd = 'RunScript("%s")' % path.replace('script://', '')
# elif path.lower().startswith('videodb') and len(filename) > 0:
# cmd = 'PlayMedia("%s")' % filename
# #elif path.lower().startswith('musicdb') and len(filename) > 0:
# # cmd = 'PlayMedia("%s")' % filename
# else:
# cmd = 'PlayMedia("%s&sf_win_id=%d_")' % (path, window)
# copyFave(name, thumb, cmd)
# if choice == _LAUNCH_SF:
# xbmc.executebuiltin('ActivateWindow(programs,plugin://%s)' % utils.ADDONID)
# if choice == _SEARCH:
# thumb = thumb if len(thumb) > 0 else 'null'
# fanart = fanart if len(fanart) > 0 else 'null'
# import urllib
# _SUPERSEARCH = 0 #declared as 0 in default.py
# winID = 10025 #video
# cmd = 'ActivateWindow(%d,"plugin://%s/?mode=%d&keyword=%s&image=%s&fanart=%s")' % (window, utils.ADDONID, _SUPERSEARCH, urllib.quote_plus(name), urllib.quote_plus(thumb), urllib.quote_plus(fanart))
# activateCommand(cmd)
# if xbmcgui.Window(10000).getProperty('SF_MENU_VISIBLE') != 'true':
# doMenu()
# xbmcgui.Window(10000).clearProperty('SF_MENU_VISIBLE') | [
"# Copyright (C) 2015 Kevin S. Graer\n",
"#\n",
"#\n",
"# This file is part of PseudoTV Live.\n",
"#\n",
"# PseudoTV is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# PseudoTV is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\n",
"# import xbmc\n",
"# import xbmcaddon\n",
" # # xbmcaddon.Addon('plugin.program.super.favourites').openSettings()\n",
" # label = xbmc.getInfoLabel('ListItem.Label')\n",
" # path = xbmc.getInfoLabel('ListItem.FolderPath')\n",
" # filename = xbmc.getInfoLabel('ListItem.FilenameAndPath')\n",
" # name = xbmc.getInfoLabel('ListItem.Label')\n",
" # thumb = xbmc.getInfoLabel('ListItem.Thumb')\n",
" # playable = xbmc.getInfoLabel('ListItem.Property(IsPlayable)').lower() == 'true'\n",
" # fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')\n",
" # isFolder = xbmc.getCondVisibility('ListItem.IsFolder') == 1\n",
" \n",
" \n",
"# menu = []\n",
"# menu.append(label)\n",
"# menu.append('Settings')\n",
"# choice = xbmcgui.Dialog().select('PLTV', menu)\n",
"\n",
"# if choice == None:\n",
" # return\n",
"\n",
"# if choice == 0:\n",
" # #call you function\n",
" # return\n",
"\n",
"# if choice == 1:\n",
" # xbmcaddon.Addon('plugin.program.super.favourites').openSettings()\n",
" # return\n",
" \n",
" # Copyright (C) 2015 Kevin S. Graer\n",
"#\n",
"#\n",
"# This file is part of PseudoTV Live.\n",
"#\n",
"# PseudoTV is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# PseudoTV is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\n",
"import subprocess, os, sys, re, threading\n",
"import time, datetime, threading\n",
"import httplib, urllib, urllib2\n",
"import base64, shutil, random, errno\n",
"import xbmc, xbmcgui, xbmcaddon, xbmcvfs\n",
"\n",
"from Globals import *\n",
"\n",
"#Check settings2.xml for channel nums, channel types, channel names.\n",
"\n",
"def readSettings2():\n",
" print 'readSettings2'\n",
" settingsFile = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'settings2.xml')) \n",
" channelLST = []\n",
" channelsLST = []\n",
" NEWLST = []\n",
" channelTYPE = 0\n",
"\n",
" if xbmcvfs.exists(settingsFile):\n",
" f = open(settingsFile,'r')\n",
" lineLST = f.readlines()\n",
"\n",
" for x in range(999):\n",
" channelNUM = x + 1\n",
" channelNAME = ''\n",
" channelINFO = [channelNUM, channelNAME]\n",
" channelsLST.append(channelINFO)\n",
"\n",
" for i in range(len(lineLST)):\n",
" line = lineLST[i]\n",
"\n",
" if '_type\" value=\"' in line:\n",
" channelINFO = (line.split('<setting id=\"Channel_')[1]).replace('\" />','')\n",
" channelTYPE = int((channelINFO.split('_type\" value=\"')[1]).replace('\\n',''))\n",
" channelNUM = int((channelINFO.split('_type\" value=\"')[0]).replace('\\n',''))\n",
"\n",
" if channelTYPE <= 6:\n",
" if '<setting id=\"Channel_' + str(channelNUM) + '_1\" value=\"' in line:\n",
" channelNAME = (line.split('value=\"')[1]).replace('\" />','').replace('\\n','')\n",
" channelINFO = [channelNUM, channelNAME]\n",
" channelLST.append(channelINFO)\n",
"\n",
" elif channelTYPE == 7:\n",
" if '<setting id=\"Channel_' + str(channelNUM) + '_1\" value=\"' in line:\n",
" channelNAME = 'Directory Channel'\n",
" channelINFO = [channelNUM, channelNAME]\n",
" channelLST.append(channelINFO)\n",
"\n",
" elif channelTYPE >= 8:\n",
" if '<setting id=\"Channel_' + str(channelNUM) + '_rule_1_opt_1' in line:\n",
" channelNAME = (line.split('value=\"')[1]).replace('\" />','').replace('\\n','')\n",
" channelINFO = [channelNUM, channelNAME]\n",
" channelLST.append(channelINFO)\n",
"\n",
" for n in range(len(channelsLST)):\n",
" try:\n",
" chanLST = channelLST[n]\n",
" NEW = chanLST\n",
" except:\n",
" NUMLST = channelsLST[n]\n",
" NEW = NUMLST\n",
" pass\n",
"\n",
" NEWLST.append(NEW)\n",
"\n",
" return NEWLST\n",
"\n",
" \n",
"def AppendPlugin(type, path, name):\n",
" print 'AppendPlugin'\n",
"\n",
" try:\n",
" plugin = path.split('/')[2]\n",
" except:\n",
" plugin = path\n",
" pass\n",
" print plugin\n",
"\n",
" if type == 'directory':\n",
" print 'directory'\n",
" #write setting2 config for chtype 16\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_type\", \"16\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_time\", \"0\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_1\", path)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_2\", \"\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_3\", \"\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_4\", \"0\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rulecount\", \"1\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rule_1_id\", \"1\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rule_1_opt_1\", name)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_changed\", \"true\")\n",
" else:\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_type\", \"9\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_time\", \"0\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_1\", \"5400\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_2\", path)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_3\", name)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_4\", plugin)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rulecount\", \"1\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rule_1_id\", \"1\")\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_rule_1_opt_1\", name)\n",
" ADDON_SETTINGS.setSetting(\"Channel_\" + str(channelNum) + \"_changed\", \"true\")\n",
"\n",
" \n",
" \n",
"# _STD_SETTINGS = 0\n",
"# _ADDTOFAVES = 100\n",
"# _SF_SETTINGS = 200\n",
"# _LAUNCH_SF = 300\n",
"# _SEARCH = 400\n",
"# _DOWNLOAD = 500\n",
"# _PLAYLIST = 600\n",
"\n",
"\n",
"# def doStandard():\n",
" # window = xbmcgui.getCurrentWindowId()\n",
"\n",
" # if window == 12005: #video playing\n",
" # xbmc.executebuiltin('ActivateWindow(videoplaylist)')\n",
" # return\n",
"\n",
" # xbmc.executebuiltin('Action(ContextMenu)')\n",
"\n",
"\n",
"# def copyFave(name, thumb, cmd):\n",
" # import favourite\n",
" # import utils\n",
"\n",
" # text = utils.GETTEXT(30019)\n",
"\n",
" # folder = utils.GetFolder(text)\n",
" # if not folder:\n",
" # return False\n",
" \n",
" # file = os.path.join(folder, utils.FILENAME)\n",
" # faves = favourite.getFavourites(file)\n",
"\n",
" # #if it is already in there don't add again\n",
" # for fave in faves:\n",
" # if fave[2] == cmd: \n",
" # return False\n",
"\n",
" # fave = [name, thumb, cmd] \n",
" \n",
" # faves.append(fave)\n",
" # favourite.writeFavourites(file, faves)\n",
"\n",
" # return True\n",
"\n",
"\n",
"# def activateCommand(cmd):\n",
" # cmds = cmd.split(',', 1)\n",
"\n",
" # activate = cmds[0]+',return)'\n",
" # plugin = cmds[1][:-1]\n",
"\n",
" # #check if it is a different window and if so activate it\n",
" # id = str(xbmcgui.getCurrentWindowId())\n",
"\n",
" # if id not in activate:\n",
" # xbmc.executebuiltin(activate)\n",
" \n",
" # xbmc.executebuiltin('Container.Update(%s)' % plugin)\n",
"\n",
"\n",
"# def doMenu():\n",
" # try:\n",
" # import utils\n",
" # except:\n",
" # doStandard()\n",
" # return \n",
"\n",
" # import contextmenu\n",
"\n",
" # # to prevent master profile setting being used in other profiles\n",
" # if (REAL_SETTINGS.getSetting(\"Context\")) != 'true':\n",
" # doStandard()\n",
" # return\n",
"\n",
" # choice = 0\n",
" # label = xbmc.getInfoLabel('ListItem.Label')\n",
" # path = xbmc.getInfoLabel('ListItem.FolderPath')\n",
" # filename = xbmc.getInfoLabel('ListItem.FilenameAndPath')\n",
" # name = xbmc.getInfoLabel('ListItem.Label')\n",
" # thumb = xbmc.getInfoLabel('ListItem.Thumb')\n",
" # window = xbmcgui.getCurrentWindowId()\n",
" # playable = xbmc.getInfoLabel('ListItem.Property(IsPlayable)').lower() == 'true'\n",
" # fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')\n",
" # isFolder = xbmc.getCondVisibility('ListItem.IsFolder') == 1\n",
"\n",
" # try: file = xbmc.Player().getPlayingFile()\n",
" # except: file = None\n",
"\n",
" # isStream = False\n",
" # if file:\n",
" # isStream = file.startswith('http://')\n",
"\n",
" # #GOTHAM only \n",
" # #if hasattr(xbmc.Player(), 'isInternetStream'):\n",
" # # isStream = xbmc.Player().isInternetStream()\n",
" # #elif file:\n",
" # # isStream = file.startswith('http://')\n",
"\n",
" # print '**** Context Menu Information ****'\n",
" # print 'Label : %s' % label\n",
" # print 'Folder : %s' % folder \n",
" # print 'Path : %s' % path \n",
" # print 'Filename : %s' % filename\n",
" # print 'Name : %s' % name \n",
" # print 'Thumb : %s' % thumb\n",
" # print 'Fanart : %s' % fanart \n",
" # print 'Window : %d' % window \n",
" # print 'IsPlayable : %s' % playable\n",
" # print 'IsFolder : %s' % isFolder\n",
" # print 'File : %s' % file\n",
" # print 'IsStream : %s' % isStream\n",
"\n",
" # menu = []\n",
"\n",
" # if (len(menu) == 0) and window == 12005: #video playing\n",
" # if isStream:\n",
" # menu.append(('Download %s' % label , _DOWNLOAD))\n",
" # menu.append(('Show Playlist', _PLAYLIST))\n",
" # else:\n",
" # return doStandard()\n",
" # #cancel download feature for now\n",
" # return doStandard()\n",
" \n",
" # if (len(menu) == 0) and len(path) > 0: \n",
" # menu.append(('Add to PseudoTV Live', _ADDTOFAVES))\n",
" # menu.append(('PseudoTV Live Settings', _SF_SETTINGS))\n",
" \n",
" # #elif window == 10000: #Home screen\n",
" # # menu.append((utils.GETTEXT(30053), _LAUNCH_SF))\n",
" # # menu.append((utils.GETTEXT(30049), _SF_SETTINGS))\n",
"\n",
"\n",
" # if len(menu) == 0:\n",
" # doStandard()\n",
" # return\n",
"\n",
" # xbmcgui.Window(10000).setProperty('SF_MENU_VISIBLE', 'true')\n",
" # choice = contextmenu.showMenu(utils.ADDONID, menu)\n",
"\n",
" # if choice == _PLAYLIST:\n",
" # xbmc.executebuiltin('ActivateWindow(videoplaylist)')\n",
"\n",
" # if choice == _DOWNLOAD: \n",
" # import download\n",
" # download.download(file, 'c:\\\\temp\\\\file.mpg', 'Super Favourites')\n",
" \n",
" # if choice == _STD_SETTINGS:\n",
" # xbmc.executebuiltin('XBMC.Action(ContextMenu)')\n",
"\n",
" # if choice == _SF_SETTINGS:\n",
" # utils.ADDON.openSettings()\n",
"\n",
" # if choice == _ADDTOFAVES:\n",
" # if isFolder:\n",
" # cmd = 'ActivateWindow(%d,\"%s\")' % (window, path)\n",
" # elif path.lower().startswith('script'):\n",
" # if path[-1] == '/':\n",
" # path = path[:-1]\n",
" # cmd = 'RunScript(\"%s\")' % path.replace('script://', '')\n",
" # elif path.lower().startswith('videodb') and len(filename) > 0:\n",
" # cmd = 'PlayMedia(\"%s\")' % filename\n",
" # #elif path.lower().startswith('musicdb') and len(filename) > 0:\n",
" # # cmd = 'PlayMedia(\"%s\")' % filename\n",
" # else:\n",
" # cmd = 'PlayMedia(\"%s&sf_win_id=%d_\")' % (path, window)\n",
"\n",
" # copyFave(name, thumb, cmd)\n",
"\n",
" # if choice == _LAUNCH_SF:\n",
" # xbmc.executebuiltin('ActivateWindow(programs,plugin://%s)' % utils.ADDONID)\n",
"\n",
" # if choice == _SEARCH:\n",
" # thumb = thumb if len(thumb) > 0 else 'null'\n",
" # fanart = fanart if len(fanart) > 0 else 'null'\n",
" # import urllib\n",
" # _SUPERSEARCH = 0 #declared as 0 in default.py\n",
" # winID = 10025 #video\n",
" # cmd = 'ActivateWindow(%d,\"plugin://%s/?mode=%d&keyword=%s&image=%s&fanart=%s\")' % (window, utils.ADDONID, _SUPERSEARCH, urllib.quote_plus(name), urllib.quote_plus(thumb), urllib.quote_plus(fanart))\n",
" # activateCommand(cmd)\n",
"\n",
"\n",
"# if xbmcgui.Window(10000).getProperty('SF_MENU_VISIBLE') != 'true':\n",
" # doMenu()\n",
" # xbmcgui.Window(10000).clearProperty('SF_MENU_VISIBLE')"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013513513513513514,
0.018867924528301886,
0.017241379310344827,
0.015873015873015872,
0.018867924528301886,
0.018867924528301886,
0.023255813953488372,
0.014285714285714285,
0.015151515151515152,
0.2,
0.2,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0.04,
0.07692307692307693,
0,
0,
0.013888888888888888,
0.07692307692307693,
0.2,
0.04878048780487805,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0.030303030303030304,
0.03125,
0.02702702702702703,
0.024390243902439025,
0,
0,
0,
0.014492753623188406,
0,
0.047619047619047616,
0,
0.022988505747126436,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0.021505376344086023,
0.021739130434782608,
0,
0,
0.011627906976744186,
0.030927835051546393,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.011363636363636364,
0.030927835051546393,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011904761904761904,
0.011363636363636364,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011904761904761904,
0.011363636363636364,
0.011764705882352941,
0,
0.1111111111111111,
0.1111111111111111,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.3333333333333333,
0,
0,
0,
0,
0,
0.024390243902439025,
0.037037037037037035,
0,
0.030303030303030304,
0.3333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0.025,
0.024390243902439025,
0,
0.024390243902439025,
0,
0.023809523809523808,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0.015625,
0,
0.029411764705882353,
0,
0,
0.2,
0.02040816326530612,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0,
0.029411764705882353,
0.02857142857142857,
0.014285714285714285,
0,
0.02040816326530612,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0.004807692307692308,
0,
0,
0,
0,
0,
0.016666666666666666
] | 356 | 0.011642 | false |
from config import permitted_id
import discord
import inspect
async def evaluate(cmd, message, args):
if message.author.id in permitted_id:
if not args:
await message.channel.send(cmd.help())
else:
try:
execution = " ".join(args)
output = eval(execution)
if inspect.isawaitable(output):
output = await output
status = discord.Embed(title='✅ Executed', color=0x66CC66)
if output:
try:
status.add_field(name='Results', value='\n```\n' + str(output) + '\n```')
except:
pass
except Exception as e:
cmd.log.error(e)
status = discord.Embed(type='rich', color=0xDB0000,
title='❗ Error')
status.add_field(name='Execution Failed', value=str(e))
await message.channel.send(None, embed=status)
else:
status = discord.Embed(type='rich', color=0xDB0000,
title='⛔ Insufficient Permissions. Bot Owner or Server Admin Only.')
await message.channel.send(None, embed=status)
| [
"from config import permitted_id\n",
"import discord\n",
"import inspect\n",
"\n",
"\n",
"async def evaluate(cmd, message, args):\n",
" if message.author.id in permitted_id:\n",
" if not args:\n",
" await message.channel.send(cmd.help())\n",
" else:\n",
" try:\n",
" execution = \" \".join(args)\n",
" output = eval(execution)\n",
" if inspect.isawaitable(output):\n",
" output = await output\n",
" status = discord.Embed(title='✅ Executed', color=0x66CC66)\n",
" if output:\n",
" try:\n",
" status.add_field(name='Results', value='\\n```\\n' + str(output) + '\\n```')\n",
" except:\n",
" pass\n",
" except Exception as e:\n",
" cmd.log.error(e)\n",
" status = discord.Embed(type='rich', color=0xDB0000,\n",
" title='❗ Error')\n",
" status.add_field(name='Execution Failed', value=str(e))\n",
" await message.channel.send(None, embed=status)\n",
" else:\n",
" status = discord.Embed(type='rich', color=0xDB0000,\n",
" title='⛔ Insufficient Permissions. Bot Owner or Server Admin Only.')\n",
" await message.channel.send(None, embed=status)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0
] | 31 | 0.001804 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_required,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from .._common_conversion import (
_int_to_str,
_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from .._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from .._http import HTTPRequest
from ._error import (
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
)
from ._upload_chunking import (
_PageBlobChunkUploader,
_upload_blob_chunks,
)
from .models import (
_BlobTypes,
PageBlobProperties,
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from ._encryption import _generate_blob_encryption_data
from ._serialization import (
_get_path,
_validate_and_format_range_headers,
)
from ._deserialization import (
_convert_xml_to_page_ranges,
_parse_page_properties,
_parse_base_properties,
)
from .baseblobservice import BaseBlobService
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
_PAGE_ALIGNMENT = 512
class PageBlobService(BaseBlobService):
'''
Page blobs are a collection of 512-byte pages optimized for random read and
write operations. To create a page blob, you initialize the page blob and
specify the maximum size the page blob will grow. To add or update the
contents of a page blob, you write a page or pages by specifying an offset
and a range that align to 512-byte page boundaries. A write to a page blob
can overwrite just one page, some pages, or up to 4 MB of the page blob.
Writes to page blobs happen in-place and are immediately committed to the
blob. The maximum size for a page blob is 1 TB.
:ivar int MAX_PAGE_SIZE:
The size of the pages put by create_blob_from_* methods. Smaller pages
may be put if there is less data provided. The maximum page size the service
supports is 4MB.
'''
MAX_PAGE_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, request_session=None, connection_string=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
'''
self.blob_type = _BlobTypes.PageBlob
super(PageBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string)
def create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Creates a new Page Blob.
See create_blob_from_* for high level functions that handle the
creation and upload of large blobs with automatic chunking and
progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param int content_length:
Required. This header specifies the maximum size
for the page blob, up to 1 TB. The page blob size must be aligned
to a 512-byte boundary.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param int sequence_number:
The sequence number is a user-controlled value that you can use to
track requests. The value of the sequence number must be between 0
and 2^63 - 1.The default value is 0.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the new Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._create_blob(
container_name,
blob_name,
content_length,
content_settings=content_settings,
sequence_number=sequence_number,
metadata=metadata,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Updates a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes page:
Content of the page.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._update_page(
container_name,
blob_name,
page,
start_range,
end_range,
validate_content=validate_content,
lease_id=lease_id,
if_sequence_number_lte=if_sequence_number_lte,
if_sequence_number_lt=if_sequence_number_lt,
if_sequence_number_eq=if_sequence_number_eq,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def clear_page(
self, container_name, blob_name, start_range, end_range,
lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Clears a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'clear',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
return self._perform_request(request, _parse_page_properties)
def get_page_ranges(
self, container_name, blob_name, snapshot=None, start_range=None,
end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Returns the list of valid page ranges for a Page Blob or snapshot
of a page blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve information
from.
:param int start_range:
Start of byte range to use for getting valid page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting valid page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of valid Page Ranges for the Page Blob.
:rtype: list of :class:`~azure.storage.blob.models.PageRange`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def get_page_ranges_diff(
self, container_name, blob_name, previous_snapshot, snapshot=None,
start_range=None, end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
The response will include only the pages that are different between either a
recent snapshot or the current blob and a previous snapshot, including pages
that were cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str previous_snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a previous blob snapshot to be compared
against a more recent snapshot or the current blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a more recent blob snapshot to be compared
against a previous snapshot (previous_snapshot).
:param int start_range:
Start of byte range to use for getting different page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting different page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of different Page Ranges for the Page Blob.
:rtype: list of :class:`~azure.storage.blob.models.PageRange`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('previous_snapshot', previous_snapshot)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'prevsnapshot': _to_str(previous_snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def set_sequence_number(
self, container_name, blob_name, sequence_number_action, sequence_number=None,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Sets the blob sequence number.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str sequence_number_action:
This property indicates how the service should modify the blob's sequence
number. See :class:`.SequenceNumberAction` for more information.
:param str sequence_number:
This property sets the blob's sequence number. The sequence number is a
user-controlled property that you can use to track requests and manage
concurrency issues.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('sequence_number_action', sequence_number_action)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-sequence-number': _to_str(sequence_number),
'x-ms-sequence-number-action': _to_str(sequence_number_action),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
def resize_blob(
self, container_name, blob_name, content_length,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Resizes a page blob to the specified size. If the specified value is less
than the current size of the blob, then all pages above the specified value
are cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int content_length:
Size to resize blob to.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
#----Convenience APIs-----------------------------------------------------
def create_blob_from_path(
self, container_name, blob_name, file_path, content_settings=None,
metadata=None, validate_content=False, progress_callback=None, max_connections=2,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file path, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
progress_callback=progress_callback,
max_connections=max_connections,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
def create_blob_from_stream(
self, container_name, blob_name, stream, count, content_settings=None,
metadata=None, validate_content=False, progress_callback=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file/stream, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened file/stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is required, a page
blob cannot be created if the count is unknown.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set the blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use. Note that parallel upload
requires the stream to be seekable.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
if count < 0:
raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
if count % _PAGE_ALIGNMENT != 0:
raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
cek, iv, encryption_data = None, None, None
if self.key_encryption_key is not None:
cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
response = self._create_blob(
container_name=container_name,
blob_name=blob_name,
content_length=count,
content_settings=content_settings,
metadata=metadata,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
encryption_data=encryption_data
)
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_PAGE_SIZE,
stream=stream,
max_connections=max_connections,
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_PageBlobChunkUploader,
if_match=response.etag,
timeout=timeout,
content_encryption_key=cek,
initialization_vector=iv
)
def create_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Creates a new blob from an array of bytes, or updates the content
of an existing blob, with automatic chunking and progress
notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the byte array.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_type_bytes('blob', blob)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
progress_callback=progress_callback,
max_connections=max_connections,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
#-----Helper methods-----------------------------------------------------
def _create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
encryption_data=None):
'''
See create_blob for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
:param str _encryption_data:
The JSON formatted encryption metadata to upload as a part of the blob.
This should only be passed internally from other methods and only applied
when uploading entire blob contents immediately follows creation of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'x-ms-blob-sequence-number': _to_str(sequence_number),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
if encryption_data is not None:
request.headers['x-ms-meta-encryptiondata'] = encryption_data
return self._perform_request(request, _parse_base_properties)
def _update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
See update_page for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'update',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
request.body = _get_data_bytes_only('page', page)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_page_properties) | [
"#-------------------------------------------------------------------------\r\n",
"# Copyright (c) Microsoft. All rights reserved.\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"#--------------------------------------------------------------------------\r\n",
"from .._error import (\r\n",
" _validate_not_none,\r\n",
" _validate_type_bytes,\r\n",
" _validate_encryption_required,\r\n",
" _validate_encryption_unsupported,\r\n",
" _ERROR_VALUE_NEGATIVE,\r\n",
")\r\n",
"from .._common_conversion import (\r\n",
" _int_to_str,\r\n",
" _to_str,\r\n",
" _datetime_to_utc_string,\r\n",
" _get_content_md5,\r\n",
")\r\n",
"from .._serialization import (\r\n",
" _get_data_bytes_only,\r\n",
" _add_metadata_headers,\r\n",
")\r\n",
"from .._http import HTTPRequest\r\n",
"from ._error import (\r\n",
" _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,\r\n",
")\r\n",
"from ._upload_chunking import (\r\n",
" _PageBlobChunkUploader,\r\n",
" _upload_blob_chunks,\r\n",
")\r\n",
"from .models import (\r\n",
" _BlobTypes,\r\n",
" PageBlobProperties,\r\n",
")\r\n",
"from .._constants import (\r\n",
" SERVICE_HOST_BASE,\r\n",
" DEFAULT_PROTOCOL,\r\n",
")\r\n",
"from ._encryption import _generate_blob_encryption_data\r\n",
"from ._serialization import (\r\n",
" _get_path,\r\n",
" _validate_and_format_range_headers,\r\n",
")\r\n",
"from ._deserialization import (\r\n",
" _convert_xml_to_page_ranges,\r\n",
" _parse_page_properties,\r\n",
" _parse_base_properties,\r\n",
")\r\n",
"from .baseblobservice import BaseBlobService\r\n",
"from os import path\r\n",
"import sys\r\n",
"if sys.version_info >= (3,):\r\n",
" from io import BytesIO\r\n",
"else:\r\n",
" from cStringIO import StringIO as BytesIO\r\n",
"\r\n",
"# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT\r\n",
"_PAGE_ALIGNMENT = 512\r\n",
"\r\n",
"\r\n",
"class PageBlobService(BaseBlobService):\r\n",
" '''\r\n",
" Page blobs are a collection of 512-byte pages optimized for random read and\r\n",
" write operations. To create a page blob, you initialize the page blob and\r\n",
" specify the maximum size the page blob will grow. To add or update the\r\n",
" contents of a page blob, you write a page or pages by specifying an offset\r\n",
" and a range that align to 512-byte page boundaries. A write to a page blob\r\n",
" can overwrite just one page, some pages, or up to 4 MB of the page blob.\r\n",
" Writes to page blobs happen in-place and are immediately committed to the\r\n",
" blob. The maximum size for a page blob is 1 TB.\r\n",
"\r\n",
" :ivar int MAX_PAGE_SIZE: \r\n",
" The size of the pages put by create_blob_from_* methods. Smaller pages \r\n",
" may be put if there is less data provided. The maximum page size the service \r\n",
" supports is 4MB.\r\n",
" '''\r\n",
"\r\n",
" MAX_PAGE_SIZE = 4 * 1024 * 1024\r\n",
"\r\n",
" def __init__(self, account_name=None, account_key=None, sas_token=None, \r\n",
" is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,\r\n",
" custom_domain=None, request_session=None, connection_string=None):\r\n",
" '''\r\n",
" :param str account_name:\r\n",
" The storage account name. This is used to authenticate requests \r\n",
" signed with an account key and to construct the storage endpoint. It \r\n",
" is required unless a connection string is given, or if a custom \r\n",
" domain is used with anonymous authentication.\r\n",
" :param str account_key:\r\n",
" The storage account key. This is used for shared key authentication. \r\n",
" If neither account key or sas token is specified, anonymous access \r\n",
" will be used.\r\n",
" :param str sas_token:\r\n",
" A shared access signature token to use to authenticate requests \r\n",
" instead of the account key. If account key and sas token are both \r\n",
" specified, account key will be used to sign. If neither are \r\n",
" specified, anonymous access will be used.\r\n",
" :param bool is_emulated:\r\n",
" Whether to use the emulator. Defaults to False. If specified, will \r\n",
" override all other parameters besides connection string and request \r\n",
" session.\r\n",
" :param str protocol:\r\n",
" The protocol to use for requests. Defaults to https.\r\n",
" :param str endpoint_suffix:\r\n",
" The host base component of the url, minus the account name. Defaults \r\n",
" to Azure (core.windows.net). Override this to use the China cloud \r\n",
" (core.chinacloudapi.cn).\r\n",
" :param str custom_domain:\r\n",
" The custom domain to use. This can be set in the Azure Portal. For \r\n",
" example, 'www.mydomain.com'.\r\n",
" :param requests.Session request_session:\r\n",
" The session object to use for http requests.\r\n",
" :param str connection_string:\r\n",
" If specified, this will override all other parameters besides \r\n",
" request session. See\r\n",
" http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/\r\n",
" for the connection string format.\r\n",
" '''\r\n",
" self.blob_type = _BlobTypes.PageBlob\r\n",
" super(PageBlobService, self).__init__(\r\n",
" account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, \r\n",
" custom_domain, request_session, connection_string)\r\n",
"\r\n",
" def create_blob(\r\n",
" self, container_name, blob_name, content_length, content_settings=None,\r\n",
" sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,\r\n",
" if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Creates a new Page Blob.\r\n",
"\r\n",
" See create_blob_from_* for high level functions that handle the\r\n",
" creation and upload of large blobs with automatic chunking and\r\n",
" progress notifications.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of blob to create or update.\r\n",
" :param int content_length:\r\n",
" Required. This header specifies the maximum size\r\n",
" for the page blob, up to 1 TB. The page blob size must be aligned\r\n",
" to a 512-byte boundary.\r\n",
" :param ~azure.storage.blob.models.ContentSettings content_settings:\r\n",
" ContentSettings object used to set properties on the blob.\r\n",
" :param int sequence_number:\r\n",
" The sequence number is a user-controlled value that you can use to\r\n",
" track requests. The value of the sequence number must be between 0\r\n",
" and 2^63 - 1.The default value is 0.\r\n",
" :param metadata:\r\n",
" Name-value pairs associated with the blob as metadata.\r\n",
" :type metadata: a dict mapping str to str\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: ETag and last modified properties for the new Page Blob\r\n",
" :rtype: :class:`~azure.storage.blob.models.ResourceProperties`\r\n",
" '''\r\n",
" _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) \r\n",
"\r\n",
" return self._create_blob(\r\n",
" container_name,\r\n",
" blob_name,\r\n",
" content_length,\r\n",
" content_settings=content_settings,\r\n",
" sequence_number=sequence_number,\r\n",
" metadata=metadata,\r\n",
" lease_id=lease_id,\r\n",
" if_modified_since=if_modified_since,\r\n",
" if_unmodified_since=if_unmodified_since,\r\n",
" if_match=if_match,\r\n",
" if_none_match=if_none_match,\r\n",
" timeout=timeout\r\n",
" )\r\n",
"\r\n",
" def update_page(\r\n",
" self, container_name, blob_name, page, start_range, end_range,\r\n",
" validate_content=False, lease_id=None, if_sequence_number_lte=None,\r\n",
" if_sequence_number_lt=None, if_sequence_number_eq=None,\r\n",
" if_modified_since=None, if_unmodified_since=None,\r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Updates a range of pages.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param bytes page:\r\n",
" Content of the page.\r\n",
" :param int start_range:\r\n",
" Start of byte range to use for writing to a section of the blob.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.\r\n",
" :param int end_range:\r\n",
" End of byte range to use for writing to a section of the blob.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.\r\n",
" :param bool validate_content:\r\n",
" If true, calculates an MD5 hash of the page content. The storage \r\n",
" service checks the hash of the content that has arrived\r\n",
" with the hash that was sent. This is primarily valuable for detecting \r\n",
" bitflips on the wire if using http instead of https as https (the default) \r\n",
" will already validate. Note that this MD5 hash is not stored with the \r\n",
" blob.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param int if_sequence_number_lte:\r\n",
" If the blob's sequence number is less than or equal to\r\n",
" the specified value, the request proceeds; otherwise it fails.\r\n",
" :param int if_sequence_number_lt:\r\n",
" If the blob's sequence number is less than the specified\r\n",
" value, the request proceeds; otherwise it fails.\r\n",
" :param int if_sequence_number_eq:\r\n",
" If the blob's sequence number is equal to the specified\r\n",
" value, the request proceeds; otherwise it fails.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify an ETag value for this conditional\r\n",
" header to write the page only if the blob's ETag value matches the\r\n",
" value specified. If the values do not match, the Blob service fails.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify an ETag value for this conditional\r\n",
" header to write the page only if the blob's ETag value does not\r\n",
" match the value specified. If the values are identical, the Blob\r\n",
" service fails.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: ETag and last modified properties for the updated Page Blob\r\n",
" :rtype: :class:`~azure.storage.blob.models.ResourceProperties`\r\n",
" '''\r\n",
"\r\n",
" _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)\r\n",
" \r\n",
" return self._update_page(\r\n",
" container_name,\r\n",
" blob_name,\r\n",
" page,\r\n",
" start_range,\r\n",
" end_range,\r\n",
" validate_content=validate_content,\r\n",
" lease_id=lease_id,\r\n",
" if_sequence_number_lte=if_sequence_number_lte,\r\n",
" if_sequence_number_lt=if_sequence_number_lt,\r\n",
" if_sequence_number_eq=if_sequence_number_eq,\r\n",
" if_modified_since=if_modified_since,\r\n",
" if_unmodified_since=if_unmodified_since,\r\n",
" if_match=if_match,\r\n",
" if_none_match=if_none_match,\r\n",
" timeout=timeout\r\n",
" )\r\n",
"\r\n",
" def clear_page(\r\n",
" self, container_name, blob_name, start_range, end_range,\r\n",
" lease_id=None, if_sequence_number_lte=None,\r\n",
" if_sequence_number_lt=None, if_sequence_number_eq=None,\r\n",
" if_modified_since=None, if_unmodified_since=None,\r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Clears a range of pages.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param int start_range:\r\n",
" Start of byte range to use for writing to a section of the blob.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.\r\n",
" :param int end_range:\r\n",
" End of byte range to use for writing to a section of the blob.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param int if_sequence_number_lte:\r\n",
" If the blob's sequence number is less than or equal to\r\n",
" the specified value, the request proceeds; otherwise it fails.\r\n",
" :param int if_sequence_number_lt:\r\n",
" If the blob's sequence number is less than the specified\r\n",
" value, the request proceeds; otherwise it fails.\r\n",
" :param int if_sequence_number_eq:\r\n",
" If the blob's sequence number is equal to the specified\r\n",
" value, the request proceeds; otherwise it fails.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify an ETag value for this conditional\r\n",
" header to write the page only if the blob's ETag value matches the\r\n",
" value specified. If the values do not match, the Blob service fails.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify an ETag value for this conditional\r\n",
" header to write the page only if the blob's ETag value does not\r\n",
" match the value specified. If the values are identical, the Blob\r\n",
" service fails.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: ETag and last modified properties for the updated Page Blob\r\n",
" :rtype: :class:`~azure.storage.blob.models.ResourceProperties`\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
"\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'PUT'\r\n",
" request.host_locations = self._get_host_locations()\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'page',\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-page-write': 'clear',\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),\r\n",
" 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),\r\n",
" 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match)\r\n",
" }\r\n",
" _validate_and_format_range_headers(\r\n",
" request,\r\n",
" start_range,\r\n",
" end_range,\r\n",
" align_to_page=True)\r\n",
"\r\n",
" return self._perform_request(request, _parse_page_properties)\r\n",
"\r\n",
" def get_page_ranges(\r\n",
" self, container_name, blob_name, snapshot=None, start_range=None,\r\n",
" end_range=None, lease_id=None, if_modified_since=None,\r\n",
" if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Returns the list of valid page ranges for a Page Blob or snapshot\r\n",
" of a page blob.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param str snapshot:\r\n",
" The snapshot parameter is an opaque DateTime value that,\r\n",
" when present, specifies the blob snapshot to retrieve information\r\n",
" from.\r\n",
" :param int start_range:\r\n",
" Start of byte range to use for getting valid page ranges.\r\n",
" If no end_range is given, all bytes after the start_range will be searched.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-, etc.\r\n",
" :param int end_range:\r\n",
" End of byte range to use for getting valid page ranges.\r\n",
" If end_range is given, start_range must be provided.\r\n",
" This range will return valid page ranges for from the offset start up to\r\n",
" offset end.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-, etc.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: A list of valid Page Ranges for the Page Blob.\r\n",
" :rtype: list of :class:`~azure.storage.blob.models.PageRange`\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'GET'\r\n",
" request.host_locations = self._get_host_locations(secondary=True)\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'pagelist',\r\n",
" 'snapshot': _to_str(snapshot),\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match),\r\n",
" }\r\n",
" if start_range is not None:\r\n",
" _validate_and_format_range_headers(\r\n",
" request,\r\n",
" start_range,\r\n",
" end_range,\r\n",
" start_range_required=False,\r\n",
" end_range_required=False,\r\n",
" align_to_page=True)\r\n",
"\r\n",
" return self._perform_request(request, _convert_xml_to_page_ranges)\r\n",
"\r\n",
" def get_page_ranges_diff(\r\n",
" self, container_name, blob_name, previous_snapshot, snapshot=None,\r\n",
" start_range=None, end_range=None, lease_id=None, if_modified_since=None,\r\n",
" if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" The response will include only the pages that are different between either a\r\n",
" recent snapshot or the current blob and a previous snapshot, including pages\r\n",
" that were cleared.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param str previous_snapshot:\r\n",
" The snapshot parameter is an opaque DateTime value that\r\n",
" specifies a previous blob snapshot to be compared\r\n",
" against a more recent snapshot or the current blob.\r\n",
" :param str snapshot:\r\n",
" The snapshot parameter is an opaque DateTime value that\r\n",
" specifies a more recent blob snapshot to be compared\r\n",
" against a previous snapshot (previous_snapshot).\r\n",
" :param int start_range:\r\n",
" Start of byte range to use for getting different page ranges.\r\n",
" If no end_range is given, all bytes after the start_range will be searched.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-, etc.\r\n",
" :param int end_range:\r\n",
" End of byte range to use for getting different page ranges.\r\n",
" If end_range is given, start_range must be provided.\r\n",
" This range will return valid page ranges for from the offset start up to\r\n",
" offset end.\r\n",
" Pages must be aligned with 512-byte boundaries, the start offset\r\n",
" must be a modulus of 512 and the end offset must be a modulus of\r\n",
" 512-1. Examples of valid byte ranges are 0-511, 512-, etc.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: A list of different Page Ranges for the Page Blob.\r\n",
" :rtype: list of :class:`~azure.storage.blob.models.PageRange`\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('previous_snapshot', previous_snapshot)\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'GET'\r\n",
" request.host_locations = self._get_host_locations(secondary=True)\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'pagelist',\r\n",
" 'snapshot': _to_str(snapshot),\r\n",
" 'prevsnapshot': _to_str(previous_snapshot),\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match),\r\n",
" }\r\n",
" if start_range is not None:\r\n",
" _validate_and_format_range_headers(\r\n",
" request,\r\n",
" start_range,\r\n",
" end_range,\r\n",
" start_range_required=False,\r\n",
" end_range_required=False,\r\n",
" align_to_page=True)\r\n",
"\r\n",
" return self._perform_request(request, _convert_xml_to_page_ranges)\r\n",
"\r\n",
" def set_sequence_number(\r\n",
" self, container_name, blob_name, sequence_number_action, sequence_number=None,\r\n",
" lease_id=None, if_modified_since=None, if_unmodified_since=None,\r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" \r\n",
" '''\r\n",
" Sets the blob sequence number.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param str sequence_number_action:\r\n",
" This property indicates how the service should modify the blob's sequence\r\n",
" number. See :class:`.SequenceNumberAction` for more information.\r\n",
" :param str sequence_number:\r\n",
" This property sets the blob's sequence number. The sequence number is a\r\n",
" user-controlled property that you can use to track requests and manage\r\n",
" concurrency issues.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: ETag and last modified properties for the updated Page Blob\r\n",
" :rtype: :class:`~azure.storage.blob.models.ResourceProperties`\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('sequence_number_action', sequence_number_action)\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'PUT'\r\n",
" request.host_locations = self._get_host_locations()\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'properties',\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-blob-sequence-number': _to_str(sequence_number),\r\n",
" 'x-ms-sequence-number-action': _to_str(sequence_number_action),\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match),\r\n",
" }\r\n",
"\r\n",
" return self._perform_request(request, _parse_page_properties)\r\n",
"\r\n",
" def resize_blob(\r\n",
" self, container_name, blob_name, content_length,\r\n",
" lease_id=None, if_modified_since=None, if_unmodified_since=None,\r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" \r\n",
" '''\r\n",
" Resizes a page blob to the specified size. If the specified value is less\r\n",
" than the current size of the blob, then all pages above the specified value\r\n",
" are cleared.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of existing blob.\r\n",
" :param int content_length:\r\n",
" Size to resize blob to.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds.\r\n",
" :return: ETag and last modified properties for the updated Page Blob\r\n",
" :rtype: :class:`~azure.storage.blob.models.ResourceProperties`\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('content_length', content_length)\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'PUT'\r\n",
" request.host_locations = self._get_host_locations()\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'properties',\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-blob-content-length': _to_str(content_length),\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match),\r\n",
" }\r\n",
"\r\n",
" return self._perform_request(request, _parse_page_properties)\r\n",
"\r\n",
" #----Convenience APIs-----------------------------------------------------\r\n",
"\r\n",
" def create_blob_from_path(\r\n",
" self, container_name, blob_name, file_path, content_settings=None,\r\n",
" metadata=None, validate_content=False, progress_callback=None, max_connections=2,\r\n",
" lease_id=None, if_modified_since=None, if_unmodified_since=None, \r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Creates a new blob from a file path, or updates the content of an\r\n",
" existing blob, with automatic chunking and progress notifications.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of blob to create or update.\r\n",
" :param str file_path:\r\n",
" Path of the file to upload as the blob content.\r\n",
" :param ~azure.storage.blob.models.ContentSettings content_settings:\r\n",
" ContentSettings object used to set blob properties.\r\n",
" :param metadata:\r\n",
" Name-value pairs associated with the blob as metadata.\r\n",
" :type metadata: a dict mapping str to str\r\n",
" :param bool validate_content:\r\n",
" If true, calculates an MD5 hash for each page of the blob. The storage \r\n",
" service checks the hash of the content that has arrived with the hash \r\n",
" that was sent. This is primarily valuable for detecting bitflips on \r\n",
" the wire if using http instead of https as https (the default) will \r\n",
" already validate. Note that this MD5 hash is not stored with the \r\n",
" blob.\r\n",
" :param progress_callback:\r\n",
" Callback for progress with signature function(current, total) where\r\n",
" current is the number of bytes transfered so far, and total is the\r\n",
" size of the blob, or None if the total size is unknown.\r\n",
" :type progress_callback: callback function in format of func(current, total)\r\n",
" :param int max_connections:\r\n",
" Maximum number of parallel connections to use.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds. This method may make \r\n",
" multiple calls to the Azure service and the timeout will apply to \r\n",
" each call individually.\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('file_path', file_path)\r\n",
"\r\n",
" count = path.getsize(file_path)\r\n",
" with open(file_path, 'rb') as stream:\r\n",
" self.create_blob_from_stream(\r\n",
" container_name=container_name,\r\n",
" blob_name=blob_name,\r\n",
" stream=stream,\r\n",
" count=count,\r\n",
" content_settings=content_settings,\r\n",
" metadata=metadata,\r\n",
" validate_content=validate_content,\r\n",
" progress_callback=progress_callback,\r\n",
" max_connections=max_connections,\r\n",
" lease_id=lease_id,\r\n",
" if_modified_since=if_modified_since,\r\n",
" if_unmodified_since=if_unmodified_since,\r\n",
" if_match=if_match,\r\n",
" if_none_match=if_none_match,\r\n",
" timeout=timeout)\r\n",
"\r\n",
"\r\n",
" def create_blob_from_stream(\r\n",
" self, container_name, blob_name, stream, count, content_settings=None,\r\n",
" metadata=None, validate_content=False, progress_callback=None,\r\n",
" max_connections=2, lease_id=None, if_modified_since=None,\r\n",
" if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Creates a new blob from a file/stream, or updates the content of an\r\n",
" existing blob, with automatic chunking and progress notifications.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of blob to create or update.\r\n",
" :param io.IOBase stream:\r\n",
" Opened file/stream to upload as the blob content.\r\n",
" :param int count:\r\n",
" Number of bytes to read from the stream. This is required, a page\r\n",
" blob cannot be created if the count is unknown.\r\n",
" :param ~azure.storage.blob.models.ContentSettings content_settings:\r\n",
" ContentSettings object used to set the blob properties.\r\n",
" :param metadata:\r\n",
" Name-value pairs associated with the blob as metadata.\r\n",
" :type metadata: a dict mapping str to str\r\n",
" :param bool validate_content:\r\n",
" If true, calculates an MD5 hash for each page of the blob. The storage \r\n",
" service checks the hash of the content that has arrived with the hash \r\n",
" that was sent. This is primarily valuable for detecting bitflips on \r\n",
" the wire if using http instead of https as https (the default) will \r\n",
" already validate. Note that this MD5 hash is not stored with the \r\n",
" blob.\r\n",
" :param progress_callback:\r\n",
" Callback for progress with signature function(current, total) where\r\n",
" current is the number of bytes transfered so far, and total is the\r\n",
" size of the blob, or None if the total size is unknown.\r\n",
" :type progress_callback: callback function in format of func(current, total)\r\n",
" :param int max_connections:\r\n",
" Maximum number of parallel connections to use. Note that parallel upload \r\n",
" requires the stream to be seekable.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds. This method may make \r\n",
" multiple calls to the Azure service and the timeout will apply to \r\n",
" each call individually.\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('stream', stream)\r\n",
" _validate_not_none('count', count)\r\n",
" _validate_encryption_required(self.require_encryption, self.key_encryption_key)\r\n",
"\r\n",
" if count < 0:\r\n",
" raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))\r\n",
"\r\n",
" if count % _PAGE_ALIGNMENT != 0:\r\n",
" raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))\r\n",
"\r\n",
" cek, iv, encryption_data = None, None, None\r\n",
" if self.key_encryption_key is not None:\r\n",
" cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)\r\n",
"\r\n",
" response = self._create_blob(\r\n",
" container_name=container_name,\r\n",
" blob_name=blob_name,\r\n",
" content_length=count,\r\n",
" content_settings=content_settings,\r\n",
" metadata=metadata,\r\n",
" lease_id=lease_id,\r\n",
" if_modified_since=if_modified_since,\r\n",
" if_unmodified_since=if_unmodified_since,\r\n",
" if_match=if_match,\r\n",
" if_none_match=if_none_match,\r\n",
" timeout=timeout,\r\n",
" encryption_data=encryption_data\r\n",
" )\r\n",
"\r\n",
" _upload_blob_chunks(\r\n",
" blob_service=self,\r\n",
" container_name=container_name,\r\n",
" blob_name=blob_name,\r\n",
" blob_size=count,\r\n",
" block_size=self.MAX_PAGE_SIZE,\r\n",
" stream=stream,\r\n",
" max_connections=max_connections,\r\n",
" progress_callback=progress_callback,\r\n",
" validate_content=validate_content,\r\n",
" lease_id=lease_id,\r\n",
" uploader_class=_PageBlobChunkUploader,\r\n",
" if_match=response.etag,\r\n",
" timeout=timeout,\r\n",
" content_encryption_key=cek,\r\n",
" initialization_vector=iv\r\n",
" )\r\n",
"\r\n",
" def create_blob_from_bytes(\r\n",
" self, container_name, blob_name, blob, index=0, count=None,\r\n",
" content_settings=None, metadata=None, validate_content=False, \r\n",
" progress_callback=None, max_connections=2, lease_id=None, \r\n",
" if_modified_since=None, if_unmodified_since=None, if_match=None, \r\n",
" if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" Creates a new blob from an array of bytes, or updates the content\r\n",
" of an existing blob, with automatic chunking and progress\r\n",
" notifications.\r\n",
"\r\n",
" :param str container_name:\r\n",
" Name of existing container.\r\n",
" :param str blob_name:\r\n",
" Name of blob to create or update.\r\n",
" :param bytes blob:\r\n",
" Content of blob as an array of bytes.\r\n",
" :param int index:\r\n",
" Start index in the byte array.\r\n",
" :param int count:\r\n",
" Number of bytes to upload. Set to None or negative value to upload\r\n",
" all bytes starting from index.\r\n",
" :param ~azure.storage.blob.models.ContentSettings content_settings:\r\n",
" ContentSettings object used to set blob properties.\r\n",
" :param metadata:\r\n",
" Name-value pairs associated with the blob as metadata.\r\n",
" :type metadata: a dict mapping str to str\r\n",
" :param bool validate_content:\r\n",
" If true, calculates an MD5 hash for each page of the blob. The storage \r\n",
" service checks the hash of the content that has arrived with the hash \r\n",
" that was sent. This is primarily valuable for detecting bitflips on \r\n",
" the wire if using http instead of https as https (the default) will \r\n",
" already validate. Note that this MD5 hash is not stored with the \r\n",
" blob.\r\n",
" :param progress_callback:\r\n",
" Callback for progress with signature function(current, total) where\r\n",
" current is the number of bytes transfered so far, and total is the\r\n",
" size of the blob, or None if the total size is unknown.\r\n",
" :type progress_callback: callback function in format of func(current, total)\r\n",
" :param int max_connections:\r\n",
" Maximum number of parallel connections to use.\r\n",
" :param str lease_id:\r\n",
" Required if the blob has an active lease.\r\n",
" :param datetime if_modified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC. \r\n",
" Specify this header to perform the operation only\r\n",
" if the resource has been modified since the specified time.\r\n",
" :param datetime if_unmodified_since:\r\n",
" A DateTime value. Azure expects the date value passed in to be UTC.\r\n",
" If timezone is included, any non-UTC datetimes will be converted to UTC.\r\n",
" If a date is passed in without timezone info, it is assumed to be UTC.\r\n",
" Specify this header to perform the operation only if\r\n",
" the resource has not been modified since the specified date/time.\r\n",
" :param str if_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header to perform\r\n",
" the operation only if the resource's ETag matches the value specified.\r\n",
" :param str if_none_match:\r\n",
" An ETag value, or the wildcard character (*). Specify this header\r\n",
" to perform the operation only if the resource's ETag does not match\r\n",
" the value specified. Specify the wildcard character (*) to perform\r\n",
" the operation only if the resource does not exist, and fail the\r\n",
" operation if it does exist.\r\n",
" :param int timeout:\r\n",
" The timeout parameter is expressed in seconds. This method may make \r\n",
" multiple calls to the Azure service and the timeout will apply to \r\n",
" each call individually.\r\n",
" '''\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('blob', blob)\r\n",
" _validate_type_bytes('blob', blob)\r\n",
"\r\n",
" if index < 0:\r\n",
" raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))\r\n",
"\r\n",
" if count is None or count < 0:\r\n",
" count = len(blob) - index\r\n",
"\r\n",
" stream = BytesIO(blob)\r\n",
" stream.seek(index)\r\n",
"\r\n",
" self.create_blob_from_stream(\r\n",
" container_name=container_name,\r\n",
" blob_name=blob_name,\r\n",
" stream=stream,\r\n",
" count=count,\r\n",
" content_settings=content_settings,\r\n",
" metadata=metadata,\r\n",
" validate_content=validate_content,\r\n",
" lease_id=lease_id,\r\n",
" progress_callback=progress_callback,\r\n",
" max_connections=max_connections,\r\n",
" if_modified_since=if_modified_since,\r\n",
" if_unmodified_since=if_unmodified_since,\r\n",
" if_match=if_match,\r\n",
" if_none_match=if_none_match,\r\n",
" timeout=timeout)\r\n",
"\r\n",
" #-----Helper methods-----------------------------------------------------\r\n",
"\r\n",
" def _create_blob(\r\n",
" self, container_name, blob_name, content_length, content_settings=None,\r\n",
" sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,\r\n",
" if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,\r\n",
" encryption_data=None):\r\n",
" '''\r\n",
" See create_blob for more details. This helper method\r\n",
" allows for encryption or other such special behavior because\r\n",
" it is safely handled by the library. These behaviors are\r\n",
" prohibited in the public version of this function.\r\n",
" :param str _encryption_data:\r\n",
" The JSON formatted encryption metadata to upload as a part of the blob.\r\n",
" This should only be passed internally from other methods and only applied\r\n",
" when uploading entire blob contents immediately follows creation of the blob.\r\n",
" '''\r\n",
"\r\n",
" _validate_not_none('container_name', container_name)\r\n",
" _validate_not_none('blob_name', blob_name)\r\n",
" _validate_not_none('content_length', content_length)\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'PUT'\r\n",
" request.host_locations = self._get_host_locations()\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {'timeout': _int_to_str(timeout)}\r\n",
" request.headers = {\r\n",
" 'x-ms-blob-type': _to_str(self.blob_type),\r\n",
" 'x-ms-blob-content-length': _to_str(content_length),\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'x-ms-blob-sequence-number': _to_str(sequence_number),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match)\r\n",
" }\r\n",
" _add_metadata_headers(metadata, request)\r\n",
" if content_settings is not None:\r\n",
" request.headers.update(content_settings._to_headers())\r\n",
"\r\n",
" if encryption_data is not None:\r\n",
" request.headers['x-ms-meta-encryptiondata'] = encryption_data\r\n",
"\r\n",
" return self._perform_request(request, _parse_base_properties)\r\n",
"\r\n",
" def _update_page(\r\n",
" self, container_name, blob_name, page, start_range, end_range,\r\n",
" validate_content=False, lease_id=None, if_sequence_number_lte=None,\r\n",
" if_sequence_number_lt=None, if_sequence_number_eq=None,\r\n",
" if_modified_since=None, if_unmodified_since=None,\r\n",
" if_match=None, if_none_match=None, timeout=None):\r\n",
" '''\r\n",
" See update_page for more details. This helper method\r\n",
" allows for encryption or other such special behavior because\r\n",
" it is safely handled by the library. These behaviors are\r\n",
" prohibited in the public version of this function.\r\n",
" '''\r\n",
"\r\n",
" request = HTTPRequest()\r\n",
" request.method = 'PUT'\r\n",
" request.host_locations = self._get_host_locations()\r\n",
" request.path = _get_path(container_name, blob_name)\r\n",
" request.query = {\r\n",
" 'comp': 'page',\r\n",
" 'timeout': _int_to_str(timeout),\r\n",
" }\r\n",
" request.headers = {\r\n",
" 'x-ms-page-write': 'update',\r\n",
" 'x-ms-lease-id': _to_str(lease_id),\r\n",
" 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),\r\n",
" 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),\r\n",
" 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),\r\n",
" 'If-Modified-Since': _datetime_to_utc_string(if_modified_since),\r\n",
" 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),\r\n",
" 'If-Match': _to_str(if_match),\r\n",
" 'If-None-Match': _to_str(if_none_match)\r\n",
" }\r\n",
" _validate_and_format_range_headers(\r\n",
" request,\r\n",
" start_range,\r\n",
" end_range,\r\n",
" align_to_page=True)\r\n",
" request.body = _get_data_bytes_only('page', page)\r\n",
"\r\n",
" if validate_content:\r\n",
" computed_md5 = _get_content_md5(request.body)\r\n",
" request.headers['Content-MD5'] = _to_str(computed_md5)\r\n",
"\r\n",
" return self._perform_request(request, _parse_page_properties)"
] | [
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0.012345679012345678,
0.022988505747126436,
0,
0,
0,
0,
0,
0.01282051282051282,
0.010101010101010102,
0.011764705882352941,
0,
0,
0.01282051282051282,
0.024096385542168676,
0.01282051282051282,
0,
0,
0.024096385542168676,
0.012345679012345678,
0,
0,
0.012658227848101266,
0.012345679012345678,
0.013333333333333334,
0,
0,
0.012345679012345678,
0.012195121951219513,
0,
0,
0,
0,
0.024096385542168676,
0.0125,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0.011764705882352941,
0.023529411764705882,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.020618556701030927,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0.023809523809523808,
0.02247191011235955,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.00980392156862745,
0,
0.012195121951219513,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.00980392156862745,
0,
0.012195121951219513,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.023529411764705882,
0,
0.011627906976744186,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0.01694915254237288,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.011764705882352941,
0.011904761904761904,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0.1,
0,
0.012048192771084338,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0.0125,
0,
0,
0,
0.01098901098901099,
0.013333333333333334,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0.023809523809523808,
0.012195121951219513,
0.012195121951219513,
0.012658227848101266,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0.023529411764705882,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0.023809523809523808,
0.012195121951219513,
0.012195121951219513,
0.012658227848101266,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0.022988505747126436,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.0125,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013888888888888888,
0.014705882352941176,
0.013333333333333334,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0.023809523809523808,
0.012195121951219513,
0.012195121951219513,
0.012658227848101266,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.023529411764705882,
0,
0,
0,
0,
0.011627906976744186,
0.011904761904761904,
0,
0,
0,
0.011111111111111112,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0,
0,
0.011764705882352941,
0.011904761904761904,
0.03125,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.011494252873563218,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406
] | 1,094 | 0.002476 | false |
#!/usr/bin/env python
import os
import csv, sys, json
def run_cuffdiff_docker(samples_host_dir_1,samples_host_dir_2,samples_name_1,samples_name_2,
organism_I,host_indexes_dir_I,
host_dirname_O, threads = 1,
library_norm_method = 'quartile', fdr = 0.05,
library_type ='fr-firststrand',
index_type_I = '.gtf',
more_options=None):
'''Process RNA sequencing data
INPUT:
samples_host_dir_1 = list of sample directories for each replicate in sample 1
samples_host_dir_2 = list of sample directories for each replicate in sample 2
samples_name_1 = sample name for sample 1
samples_name_2 = sample name for sample 2
organism_I = name of index
host_indexes_dir_I = directory for indexes
index_type_I = string for index extention (e.g., '.gtf' or '.gff')
host_dirname_O = location for output on the host
EXAMPLE:
samples_name_1 = 140818_11_OxicEvo04EcoliGlcM9_Broth-4
samples_name_2 = 140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1
samples_host_dir_1 = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/140818_11_OxicEvo04EcoliGlcM9_Broth-4.bam (remote storage location)
samples_host_dir_2 = /media/proline/dmccloskey/Resequencing_RNA/fastq/140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1/140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1.bam (remote storage location)
organism_I = e_coli
host_indexes_dir_I = /media/proline/dmccloskey/Resequencing_RNA/indexes/ (remote storage location)
host_dirname_O = /media/proline/dmccloskey/Resequencing_RNA/fastq/ (remote storage location)
'''
#1. create a container named rnaseq using sequencing utilities
#2. mount the host file
#3. run docker
docker_mount_1 = '/media/Sequencing/fastq/'
docker_mount_2 = '/media/Sequencing/indexes/'
samples_message = samples_name_1 + "_vs_" + samples_name_2;
user_output = '/home/user/'+samples_message;
container_name = 'cuffdiff';
# make the samples mount for the container
samples_mount = "";
docker_name_dir_1 = [];
docker_name_dir_2 = [];
for sample in samples_host_dir_1.split(','):
filename = sample.split('/')[-1];
samples_mount += "-v " + sample + ":" + docker_mount_1 + filename + " ";
docker_name_dir_1.append(docker_mount_1 + sample.split('/')[-1])
for sample in samples_host_dir_2.split(','):
filename = sample.split('/')[-1];
samples_mount += "-v " + sample + ":" + docker_mount_1 + filename + " ";
docker_name_dir_2.append(docker_mount_1 + sample.split('/')[-1])
samples_mount = samples_mount[:-1];
docker_name_dir_1_str = ','.join(docker_name_dir_1)
docker_name_dir_2_str = ','.join(docker_name_dir_2)
if not more_options:
more_options = 'None';
rnaseq_cmd = ("run_cuffdiff(['%s'],['%s'],'%s','%s','%s','%s',indexes_dir='%s',threads=%s,library_norm_method='%s',fdr=%s,library_type='%s',index_type='%s',more_options=%s);" \
%(docker_name_dir_1_str,docker_name_dir_2_str,samples_name_1,samples_name_2,\
organism_I,user_output,docker_mount_2,threads,library_norm_method,fdr,library_type,index_type_I,more_options));
python_cmd = ("from sequencing_utilities.cuffdiff import run_cuffdiff;%s" %(rnaseq_cmd));
docker_run = ('docker run -u=root --name=%s %s -v %s:%s dmccloskey/sequencing_utilities python3 -c "%s"' %(container_name,samples_mount,host_indexes_dir_I,docker_mount_2,python_cmd));
os.system("echo %s" %(docker_run));
os.system(docker_run);
#copy the output directory file out of the docker container into the host dir
docker_cp = ("docker cp %s:%s/ %s/%s" %(container_name,user_output,host_dirname_O,samples_message));
os.system(docker_cp)
#delete the container and the container content:
cmd = ('docker rm -v %s' %(container_name));
os.system(cmd);
def run_cuffdiff_docker_fromCsvOrFile(filename_csv_I = None,filename_list_I = []):
'''Call run_cuffdiff_docker on a list of parameters
INPUT:
filename_list_I = [{sample_name_1:...,sample_name_2:...,},...]
'''
if filename_csv_I:
filename_list_I = read_csv(filename_csv_I);
for row_cnt,row in enumerate(filename_list_I):
cmd = ("echo running cuffdiff for samples %s vs. %s" %(row['samples_name_1'],row['samples_name_2']));
os.system(cmd);
run_cuffdiff_docker(row['samples_host_dir_1'],row['samples_host_dir_2'],
row['samples_name_1'],row['samples_name_2'],
row['organism_I'],row['host_indexes_dir_I'],
row['host_dirname_O'],
row['threads'],row['library_norm_method'],
row['fdr'],row['library_type'],
row['index_type_I'],
row['more_options']);
def read_csv(filename):
"""read table data from csv file"""
data_O = [];
try:
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile);
try:
keys = reader.fieldnames;
for row in reader:
data_O.append(row);
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e));
except IOError as e:
sys.exit('%s does not exist' % e);
return data_O;
def main_singleFile():
"""Run cuffdiff using docker
e.g. python3 run_cuffdiff_docker.py ...
"""
from argparse import ArgumentParser
parser = ArgumentParser("process RNAseq data")
parser.add_argument("samples_host_dir_1", help="""list of .bam files for samples_1""")
parser.add_argument("samples_host_dir_2", help="""list of .bam files for samples_2""")
parser.add_argument("samples_name_1", help="""sample name for samples_1""")
parser.add_argument("samples_name_2", help="""sample name for samples_2""")
parser.add_argument("organism_I", help="""name of index""")
parser.add_argument("host_indexes_dir_I", help="""directory for indexes""")
parser.add_argument("host_dirname_O", help="""location for output on the host""")
parser.add_argument("threads", help="""number of processors to use""")
parser.add_argument("library_norm_method", help="""method for library normalization""")
parser.add_argument("fdr", help="""false discover rate""")
parser.add_argument("library_type", help="""the type of library used""")
parser.add_argument("index_type_I", help="""index file type (.gtf or .gff)""")
parser.add_argument("more_options", help="""string representation of additional cuffdiff options""")
args = parser.parse_args()
run_cuffdiff_docker(args.samples_host_dir_1,args.samples_host_dir_2,
args.samples_name_1,args.samples_name_2,
args.organism_I,args.host_indexes_dir_I,
args.host_dirname_O,
args.threads,args.library_norm_method,
args.fdr,args.library_type,
args.index_type_I,args.more_options);
def main_batchFile():
"""process RNAseq data using docker in batch
e.g. python3 run_cuffdiff_docker.py '/media/proline/dmccloskey/Resequencing_RNA/cuffdiff_files.csv' []
"""
from argparse import ArgumentParser
parser = ArgumentParser("process RNAseq data")
parser.add_argument("filename_csv_I", help="""list of files and parameters in a .csv""")
parser.add_argument("filename_list_I", help="""list of files and parameters e.g. [{sample_name_1:...,sample_name_2:...,},...]""")
args = parser.parse_args()
run_cuffdiff_docker_fromCsvOrFile(args.filename_csv_I,args.filename_list_I);
if __name__ == "__main__":
#main_singleFile();
main_batchFile(); | [
"#!/usr/bin/env python\n",
"import os\n",
"import csv, sys, json\n",
"\n",
"def run_cuffdiff_docker(samples_host_dir_1,samples_host_dir_2,samples_name_1,samples_name_2,\n",
" organism_I,host_indexes_dir_I,\n",
" host_dirname_O, threads = 1,\n",
" library_norm_method = 'quartile', fdr = 0.05,\n",
" library_type ='fr-firststrand',\n",
" index_type_I = '.gtf',\n",
" more_options=None):\n",
" '''Process RNA sequencing data\n",
" INPUT:\n",
" samples_host_dir_1 = list of sample directories for each replicate in sample 1\n",
" samples_host_dir_2 = list of sample directories for each replicate in sample 2\n",
" samples_name_1 = sample name for sample 1\n",
" samples_name_2 = sample name for sample 2\n",
" organism_I = name of index\n",
" host_indexes_dir_I = directory for indexes\n",
" index_type_I = string for index extention (e.g., '.gtf' or '.gff')\n",
" host_dirname_O = location for output on the host\n",
"\n",
" EXAMPLE:\n",
" samples_name_1 = 140818_11_OxicEvo04EcoliGlcM9_Broth-4\n",
" samples_name_2 = 140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1\n",
" samples_host_dir_1 = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/140818_11_OxicEvo04EcoliGlcM9_Broth-4.bam (remote storage location)\n",
" samples_host_dir_2 = /media/proline/dmccloskey/Resequencing_RNA/fastq/140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1/140716_0_OxicEvo04pgiEcoliGlcM9_Broth-1.bam (remote storage location)\n",
" organism_I = e_coli\n",
" host_indexes_dir_I = /media/proline/dmccloskey/Resequencing_RNA/indexes/ (remote storage location)\n",
" host_dirname_O = /media/proline/dmccloskey/Resequencing_RNA/fastq/ (remote storage location)\n",
" '''\n",
" #1. create a container named rnaseq using sequencing utilities\n",
" #2. mount the host file\n",
" #3. run docker\n",
" docker_mount_1 = '/media/Sequencing/fastq/'\n",
" docker_mount_2 = '/media/Sequencing/indexes/'\n",
"\n",
" samples_message = samples_name_1 + \"_vs_\" + samples_name_2;\n",
"\n",
" user_output = '/home/user/'+samples_message;\n",
" container_name = 'cuffdiff';\n",
" \n",
" # make the samples mount for the container\n",
" samples_mount = \"\";\n",
" docker_name_dir_1 = [];\n",
" docker_name_dir_2 = [];\n",
" for sample in samples_host_dir_1.split(','):\n",
" filename = sample.split('/')[-1];\n",
" samples_mount += \"-v \" + sample + \":\" + docker_mount_1 + filename + \" \";\n",
" docker_name_dir_1.append(docker_mount_1 + sample.split('/')[-1])\n",
" for sample in samples_host_dir_2.split(','):\n",
" filename = sample.split('/')[-1];\n",
" samples_mount += \"-v \" + sample + \":\" + docker_mount_1 + filename + \" \";\n",
" docker_name_dir_2.append(docker_mount_1 + sample.split('/')[-1])\n",
" samples_mount = samples_mount[:-1];\n",
" docker_name_dir_1_str = ','.join(docker_name_dir_1)\n",
" docker_name_dir_2_str = ','.join(docker_name_dir_2)\n",
" if not more_options:\n",
" more_options = 'None';\n",
"\n",
" rnaseq_cmd = (\"run_cuffdiff(['%s'],['%s'],'%s','%s','%s','%s',indexes_dir='%s',threads=%s,library_norm_method='%s',fdr=%s,library_type='%s',index_type='%s',more_options=%s);\" \\\n",
" %(docker_name_dir_1_str,docker_name_dir_2_str,samples_name_1,samples_name_2,\\\n",
" organism_I,user_output,docker_mount_2,threads,library_norm_method,fdr,library_type,index_type_I,more_options));\n",
" python_cmd = (\"from sequencing_utilities.cuffdiff import run_cuffdiff;%s\" %(rnaseq_cmd));\n",
" docker_run = ('docker run -u=root --name=%s %s -v %s:%s dmccloskey/sequencing_utilities python3 -c \"%s\"' %(container_name,samples_mount,host_indexes_dir_I,docker_mount_2,python_cmd));\n",
" os.system(\"echo %s\" %(docker_run));\n",
" os.system(docker_run);\n",
" #copy the output directory file out of the docker container into the host dir\n",
" docker_cp = (\"docker cp %s:%s/ %s/%s\" %(container_name,user_output,host_dirname_O,samples_message));\n",
" os.system(docker_cp)\n",
" #delete the container and the container content:\n",
" cmd = ('docker rm -v %s' %(container_name));\n",
" os.system(cmd);\n",
" \n",
"def run_cuffdiff_docker_fromCsvOrFile(filename_csv_I = None,filename_list_I = []):\n",
" '''Call run_cuffdiff_docker on a list of parameters\n",
" INPUT:\n",
" filename_list_I = [{sample_name_1:...,sample_name_2:...,},...]\n",
" '''\n",
" if filename_csv_I:\n",
" filename_list_I = read_csv(filename_csv_I);\n",
" for row_cnt,row in enumerate(filename_list_I):\n",
" cmd = (\"echo running cuffdiff for samples %s vs. %s\" %(row['samples_name_1'],row['samples_name_2']));\n",
" os.system(cmd);\n",
" run_cuffdiff_docker(row['samples_host_dir_1'],row['samples_host_dir_2'],\n",
" row['samples_name_1'],row['samples_name_2'],\n",
" row['organism_I'],row['host_indexes_dir_I'],\n",
" row['host_dirname_O'],\n",
" row['threads'],row['library_norm_method'],\n",
" row['fdr'],row['library_type'],\n",
" row['index_type_I'],\n",
" row['more_options']);\n",
" \n",
"def read_csv(filename):\n",
" \"\"\"read table data from csv file\"\"\"\n",
" data_O = [];\n",
" try:\n",
" with open(filename, 'r') as csvfile:\n",
" reader = csv.DictReader(csvfile);\n",
" try:\n",
" keys = reader.fieldnames;\n",
" for row in reader:\n",
" data_O.append(row);\n",
" except csv.Error as e:\n",
" sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e));\n",
" except IOError as e:\n",
" sys.exit('%s does not exist' % e);\n",
" return data_O;\n",
"\n",
"def main_singleFile():\n",
" \"\"\"Run cuffdiff using docker\n",
" e.g. python3 run_cuffdiff_docker.py ...\n",
" \"\"\"\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser(\"process RNAseq data\")\n",
" parser.add_argument(\"samples_host_dir_1\", help=\"\"\"list of .bam files for samples_1\"\"\")\n",
" parser.add_argument(\"samples_host_dir_2\", help=\"\"\"list of .bam files for samples_2\"\"\")\n",
" parser.add_argument(\"samples_name_1\", help=\"\"\"sample name for samples_1\"\"\")\n",
" parser.add_argument(\"samples_name_2\", help=\"\"\"sample name for samples_2\"\"\")\n",
" parser.add_argument(\"organism_I\", help=\"\"\"name of index\"\"\")\n",
" parser.add_argument(\"host_indexes_dir_I\", help=\"\"\"directory for indexes\"\"\")\n",
" parser.add_argument(\"host_dirname_O\", help=\"\"\"location for output on the host\"\"\")\n",
" parser.add_argument(\"threads\", help=\"\"\"number of processors to use\"\"\")\n",
" parser.add_argument(\"library_norm_method\", help=\"\"\"method for library normalization\"\"\")\n",
" parser.add_argument(\"fdr\", help=\"\"\"false discover rate\"\"\")\n",
" parser.add_argument(\"library_type\", help=\"\"\"the type of library used\"\"\")\n",
" parser.add_argument(\"index_type_I\", help=\"\"\"index file type (.gtf or .gff)\"\"\")\n",
" parser.add_argument(\"more_options\", help=\"\"\"string representation of additional cuffdiff options\"\"\")\n",
" args = parser.parse_args()\n",
" run_cuffdiff_docker(args.samples_host_dir_1,args.samples_host_dir_2,\n",
" args.samples_name_1,args.samples_name_2,\n",
" args.organism_I,args.host_indexes_dir_I,\n",
" args.host_dirname_O,\n",
" args.threads,args.library_norm_method,\n",
" args.fdr,args.library_type,\n",
" args.index_type_I,args.more_options);\n",
"\n",
"\n",
"def main_batchFile():\n",
" \"\"\"process RNAseq data using docker in batch\n",
" e.g. python3 run_cuffdiff_docker.py '/media/proline/dmccloskey/Resequencing_RNA/cuffdiff_files.csv' []\n",
" \"\"\"\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser(\"process RNAseq data\")\n",
" parser.add_argument(\"filename_csv_I\", help=\"\"\"list of files and parameters in a .csv\"\"\")\n",
" parser.add_argument(\"filename_list_I\", help=\"\"\"list of files and parameters e.g. [{sample_name_1:...,sample_name_2:...,},...]\"\"\")\n",
" args = parser.parse_args()\n",
" run_cuffdiff_docker_fromCsvOrFile(args.filename_csv_I,args.filename_list_I);\n",
"\n",
"if __name__ == \"__main__\":\n",
" #main_singleFile();\n",
" main_batchFile();"
] | [
0,
0,
0.045454545454545456,
0,
0.053763440860215055,
0.0392156862745098,
0.061224489795918366,
0.07692307692307693,
0.0392156862745098,
0.0967741935483871,
0.02564102564102564,
0,
0,
0.012048192771084338,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.005555555555555556,
0.005434782608695652,
0,
0.009708737864077669,
0.010309278350515464,
0,
0.014925373134328358,
0.03571428571428571,
0.05263157894736842,
0,
0,
0,
0.015625,
0,
0.02040816326530612,
0.030303030303030304,
0.2,
0,
0.041666666666666664,
0.03571428571428571,
0.03571428571428571,
0,
0.023809523809523808,
0.024691358024691357,
0,
0,
0.023809523809523808,
0.024691358024691357,
0,
0.025,
0,
0,
0,
0.03225806451612903,
0,
0.011049723756906077,
0.08139534883720931,
0.09166666666666666,
0.031914893617021274,
0.03723404255319149,
0.05,
0.037037037037037035,
0.024390243902439025,
0.05714285714285714,
0,
0.018867924528301886,
0.04081632653061224,
0.05,
0.2,
0.08433734939759036,
0,
0,
0,
0,
0,
0.019230769230769232,
0.0196078431372549,
0.03636363636363636,
0.041666666666666664,
0.024691358024691357,
0.0136986301369863,
0.0136986301369863,
0,
0.014084507042253521,
0.016666666666666666,
0.02127659574468085,
0.02,
0.1,
0.041666666666666664,
0,
0.058823529411764705,
0,
0,
0.021739130434782608,
0,
0.023809523809523808,
0,
0.025,
0,
0.024096385542168676,
0,
0.023255813953488372,
0.05263157894736842,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0.01098901098901099,
0.01098901098901099,
0,
0,
0,
0,
0.011627906976744186,
0,
0.010869565217391304,
0,
0,
0.012048192771084338,
0.009523809523809525,
0,
0.0136986301369863,
0.031746031746031744,
0.031746031746031744,
0.023255813953488372,
0.03278688524590164,
0.04,
0.05,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0.010752688172043012,
0.007462686567164179,
0,
0.037037037037037035,
0,
0.037037037037037035,
0.041666666666666664,
0.09523809523809523
] | 152 | 0.020588 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data.Market import *
from datetime import datetime, timedelta
### <summary>
### Algorithm used for regression tests purposes
### </summary>
### <meta name="tag" content="regression test" />
class RegressionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(10000000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY", Resolution.Tick)
self.AddEquity("BAC", Resolution.Minute)
self.AddEquity("AIG", Resolution.Hour)
self.AddEquity("IBM", Resolution.Daily)
self.__lastTradeTicks = self.StartDate
self.__lastTradeTradeBars = self.__lastTradeTicks
self.__tradeEvery = timedelta(minutes=1)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
if self.Time - self.__lastTradeTradeBars < self.__tradeEvery:
return
self.__lastTradeTradeBars = self.Time
for kvp in data.Bars:
period = kvp.Value.Period.total_seconds()
if self.roundTime(self.Time, period) != self.Time:
pass
symbol = kvp.Key
holdings = self.Portfolio[symbol]
if not holdings.Invested:
self.MarketOrder(symbol, 10)
else:
self.MarketOrder(symbol, -holdings.Quantity)
def roundTime(self, dt=None, roundTo=60):
"""Round a datetime object to any time laps in seconds
dt : datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
"""
if dt == None : dt = datetime.now()
seconds = (dt - dt.min).seconds
# // is a floor division, not a comment on following line:
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + timedelta(0,rounding-seconds,-dt.microsecond) | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Data.Market import *\n",
"from datetime import datetime, timedelta\n",
"\n",
"### <summary>\n",
"### Algorithm used for regression tests purposes\n",
"### </summary>\n",
"### <meta name=\"tag\" content=\"regression test\" />\n",
"class RegressionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" '''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''\n",
"\n",
" self.SetStartDate(2013,10,7) #Set Start Date\n",
" self.SetEndDate(2013,10,11) #Set End Date\n",
" self.SetCash(10000000) #Set Strategy Cash\n",
" # Find more symbols here: http://quantconnect.com/data\n",
" self.AddEquity(\"SPY\", Resolution.Tick)\n",
" self.AddEquity(\"BAC\", Resolution.Minute)\n",
" self.AddEquity(\"AIG\", Resolution.Hour)\n",
" self.AddEquity(\"IBM\", Resolution.Daily)\n",
"\n",
" self.__lastTradeTicks = self.StartDate\n",
" self.__lastTradeTradeBars = self.__lastTradeTicks\n",
" self.__tradeEvery = timedelta(minutes=1)\n",
"\n",
"\n",
" def OnData(self, data):\n",
" '''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''\n",
" if self.Time - self.__lastTradeTradeBars < self.__tradeEvery:\n",
" return\n",
" self.__lastTradeTradeBars = self.Time\n",
"\n",
" for kvp in data.Bars:\n",
" period = kvp.Value.Period.total_seconds()\n",
"\n",
" if self.roundTime(self.Time, period) != self.Time:\n",
" pass\n",
"\n",
" symbol = kvp.Key\n",
" holdings = self.Portfolio[symbol]\n",
"\n",
" if not holdings.Invested:\n",
" self.MarketOrder(symbol, 10)\n",
" else:\n",
" self.MarketOrder(symbol, -holdings.Quantity)\n",
"\n",
"\n",
" def roundTime(self, dt=None, roundTo=60):\n",
" \"\"\"Round a datetime object to any time laps in seconds\n",
" dt : datetime object, default now.\n",
" roundTo : Closest number of seconds to round to, default 1 minute.\n",
" \"\"\"\n",
" if dt == None : dt = datetime.now()\n",
" seconds = (dt - dt.min).seconds\n",
" # // is a floor division, not a comment on following line:\n",
" rounding = (seconds+roundTo/2) // roundTo * roundTo\n",
" return dt + timedelta(0,rounding-seconds,-dt.microsecond)"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0.02564102564102564,
0.024390243902439025,
0,
0.07142857142857142,
0.02040816326530612,
0.06666666666666667,
0.02,
0.025,
0,
0,
0.006578947368421052,
0,
0.05454545454545454,
0.05660377358490566,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0.06818181818181818,
0,
0,
0,
0.046153846153846156
] | 78 | 0.009033 | false |
import os
from ctypes import cast, c_void_p
import pybgfx as bgfx
runtimeDir = "../runtime/"
def loadMem(filePath):
filePath = runtimeDir + filePath
with open(filePath, "rb") as f:
read_data = f.read()
size = len(read_data)
memory = bgfx.copy(cast(read_data, c_void_p), size)
return memory
return None
def loadShader(shaderName):
shaderPath = "shaders/dx9/"
rendererType = bgfx.get_renderer_type()
shaderPath = {
bgfx.BGFX_RENDERER_TYPE_DIRECT3D11.value: "shaders/dx11/",
bgfx.BGFX_RENDERER_TYPE_DIRECT3D12.value: "shaders/dx11/",
bgfx.BGFX_RENDERER_TYPE_OPENGL.value: "shaders/glsl/",
bgfx.BGFX_RENDERER_TYPE_METAL.value: "shaders/metal/",
bgfx.BGFX_RENDERER_TYPE_OPENGLES.value: "shaders/gles/",
}.get(rendererType, "shaders/dx9/")
filePath = shaderPath + shaderName + ".bin"
fileMemory = loadMem(filePath)
shader = bgfx.create_shader(fileMemory)
return shader
def loadProgram(vsName, fsName):
vsh = loadShader(vsName)
fsh = bgfx.BGFX_INVALID_HANDLE
if (fsName != None):
fsh = loadShader(fsName)
return bgfx.create_program(vsh, fsh, True)
| [
"import os\n",
"\n",
"from ctypes import cast, c_void_p\n",
"\n",
"import pybgfx as bgfx\n",
"\n",
"runtimeDir = \"../runtime/\"\n",
"\n",
"\n",
"def loadMem(filePath):\n",
" filePath = runtimeDir + filePath\n",
" with open(filePath, \"rb\") as f:\n",
" read_data = f.read()\n",
" size = len(read_data)\n",
" memory = bgfx.copy(cast(read_data, c_void_p), size)\n",
" return memory\n",
" return None\n",
"\n",
"\n",
"def loadShader(shaderName):\n",
" shaderPath = \"shaders/dx9/\"\n",
" rendererType = bgfx.get_renderer_type()\n",
" shaderPath = {\n",
" bgfx.BGFX_RENDERER_TYPE_DIRECT3D11.value: \"shaders/dx11/\",\n",
" bgfx.BGFX_RENDERER_TYPE_DIRECT3D12.value: \"shaders/dx11/\",\n",
" bgfx.BGFX_RENDERER_TYPE_OPENGL.value: \"shaders/glsl/\",\n",
" bgfx.BGFX_RENDERER_TYPE_METAL.value: \"shaders/metal/\",\n",
" bgfx.BGFX_RENDERER_TYPE_OPENGLES.value: \"shaders/gles/\",\n",
" }.get(rendererType, \"shaders/dx9/\")\n",
" filePath = shaderPath + shaderName + \".bin\"\n",
" fileMemory = loadMem(filePath)\n",
" shader = bgfx.create_shader(fileMemory)\n",
" return shader\n",
"\n",
"\n",
"def loadProgram(vsName, fsName):\n",
" vsh = loadShader(vsName)\n",
" fsh = bgfx.BGFX_INVALID_HANDLE\n",
" if (fsName != None):\n",
" fsh = loadShader(fsName)\n",
" return bgfx.create_program(vsh, fsh, True)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0
] | 41 | 0.000976 | false |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# platformtools
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
# Herramientas responsables de adaptar los diferentes
# cuadros de dialogo a una plataforma en concreto,
# en este caso plex
# ------------------------------------------------------------
import os
from core import config
def dialog_ok(heading, line1, line2="", line3=""):
return True
def dialog_notification(heading, message, icon=0, time=5000, sound=True):
return True
def dialog_yesno(heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=""):
return True
def dialog_select(heading, list):
return 1
def dialog_progress(heading, line1, line2="", line3=""):
class Dialog(object):
def __init__(self,heading, line1, line2="", line3=""):
self.canceled = False
pass
def iscanceled(self):
return self.canceled
def update(self,percent, text):
return True
def close(self):
self.canceled = True
return True
return Dialog(heading, line1, line2, line3)
def dialog_progress_bg(heading, message=""):
pass
def dialog_input(default="", heading="", hidden=False):
return default
def dialog_numeric(type, heading, default=""):
pass
def itemlist_refresh():
pass
def itemlist_update(item):
pass
def render_items(itemlist, parentitem):
pass
def is_playing():
return False
def play_video(item):
pass
def stop_video():
pass
def show_channel_settings(list_controls=None, dict_values=None, caption="", channel="", callback=None, item=None, custom_button = None, channelpath=None):
'''
Muestra un cuadro de configuracion personalizado para cada canal y guarda los datos al cerrarlo.
Parametros: ver descripcion en plex_config_menu.SettingsWindow
'''
from platformcode import plex_config_menu
return plex_config_menu.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=caption, callback=callback, item=item, custom_button=custom_button, channelpath=channelpath)
def show_recaptcha(key, referer):
return None | [
"# -*- coding: utf-8 -*-\n",
"#------------------------------------------------------------\n",
"# pelisalacarta - XBMC Plugin\n",
"# platformtools\n",
"# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n",
"#------------------------------------------------------------\n",
"# Herramientas responsables de adaptar los diferentes \n",
"# cuadros de dialogo a una plataforma en concreto,\n",
"# en este caso plex\n",
"# ------------------------------------------------------------\n",
"\n",
"import os\n",
"from core import config\n",
"\n",
"def dialog_ok(heading, line1, line2=\"\", line3=\"\"):\n",
" return True\n",
" \n",
"def dialog_notification(heading, message, icon=0, time=5000, sound=True):\n",
" return True\n",
"\n",
"def dialog_yesno(heading, line1, line2=\"\", line3=\"\", nolabel=\"No\", yeslabel=\"Si\", autoclose=\"\"):\n",
" return True\n",
" \n",
"def dialog_select(heading, list): \n",
" return 1\n",
" \n",
"def dialog_progress(heading, line1, line2=\"\", line3=\"\"):\n",
" class Dialog(object):\n",
" def __init__(self,heading, line1, line2=\"\", line3=\"\"):\n",
" self.canceled = False\n",
" pass\n",
" \n",
" def iscanceled(self):\n",
" return self.canceled\n",
" \n",
" def update(self,percent, text):\n",
" return True\n",
" \n",
" def close(self):\n",
" self.canceled = True\n",
" return True\n",
" return Dialog(heading, line1, line2, line3)\n",
"\n",
"def dialog_progress_bg(heading, message=\"\"):\n",
" pass\n",
"\n",
"def dialog_input(default=\"\", heading=\"\", hidden=False):\n",
" return default\n",
"\n",
"def dialog_numeric(type, heading, default=\"\"):\n",
" pass\n",
" \n",
"def itemlist_refresh():\n",
" pass\n",
"\n",
"def itemlist_update(item):\n",
" pass\n",
"\n",
"def render_items(itemlist, parentitem):\n",
" pass\n",
" \n",
"def is_playing():\n",
" return False\n",
"\n",
"def play_video(item):\n",
" pass\n",
"\n",
"def stop_video():\n",
" pass\n",
"\n",
"def show_channel_settings(list_controls=None, dict_values=None, caption=\"\", channel=\"\", callback=None, item=None, custom_button = None, channelpath=None):\n",
" '''\n",
" Muestra un cuadro de configuracion personalizado para cada canal y guarda los datos al cerrarlo.\n",
" \n",
" Parametros: ver descripcion en plex_config_menu.SettingsWindow\n",
" '''\n",
" from platformcode import plex_config_menu\n",
" return plex_config_menu.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=caption, callback=callback, item=item, custom_button=custom_button, channelpath=channelpath)\n",
" \n",
"def show_recaptcha(key, referer):\n",
" return None"
] | [
0,
0.016129032258064516,
0,
0,
0,
0.016129032258064516,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0.2,
0.013513513513513514,
0,
0,
0.020618556701030927,
0,
0.3333333333333333,
0.05714285714285714,
0,
0.2,
0.017543859649122806,
0.041666666666666664,
0.01694915254237288,
0.03571428571428571,
0.09090909090909091,
0.14285714285714285,
0,
0.037037037037037035,
0.14285714285714285,
0.027777777777777776,
0.05555555555555555,
0.14285714285714285,
0,
0.037037037037037035,
0.05555555555555555,
0.021739130434782608,
0,
0.022222222222222223,
0,
0,
0.017857142857142856,
0,
0,
0.02127659574468085,
0,
0.1111111111111111,
0.041666666666666664,
0,
0,
0.037037037037037035,
0,
0,
0.025,
0,
0.2,
0.05555555555555555,
0,
0,
0.045454545454545456,
0,
0,
0.05555555555555555,
0,
0,
0.025806451612903226,
0,
0.009900990099009901,
0.2,
0,
0,
0,
0.004878048780487805,
0.5,
0.029411764705882353,
0.06666666666666667
] | 81 | 0.039878 | false |
# ============================================================
# KCleaner - Version 2.9 by D. Lanik (2017)
# ------------------------------------------------------------
# Clean up Kodi
# ------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# ============================================================
import time
import xbmc
import xbmcaddon
import os
from distutils.util import strtobool
from default import DeleteFiles
from default import CompactDatabases
from default import CleanTextures
from default import deleteAddonData
# ============================================================
# Define Settings Monitor Class
# ============================================================
class SettingMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
def onSettingsChanged(self):
GetSetting()
# ============================================================
# Get settings
# ============================================================
def GetSetting():
global booBackgroundRun
global lastrundays
__addon__ = xbmcaddon.Addon(id='script.kcleaner')
booBackgroundRun = bool(strtobool(str(__addon__.getSetting('autoclean').title())))
auto_interval = int(__addon__.getSetting('auto_interval'))
if auto_interval == 0:
lastrundays = 1
elif auto_interval == 1:
lastrundays = 7
elif auto_interval == 2:
lastrundays = 30
elif auto_interval == 3:
lastrundays = 90
else:
lastrundays = 0
xbmc.log('KCLEANER SERVICE >> SETTINGS CHANGED >> SERVICE RUN: ' + str(booBackgroundRun))
xbmc.log('KCLEANER SERVICE >> SETTINGS CHANGED >> RUN EVERY DAYS: ' + str(lastrundays))
# ============================================================
# Run cleaning according to settings
# ============================================================
def AutoClean():
global __addon__
global __addonname__
intMbDel = 0
intMbCom = 0
intMbTxt = 0
intMbAdn = 0
auto_cache = bool(strtobool(str(__addon__.getSetting('auto_cache').title())))
auto_packages = bool(strtobool(str(__addon__.getSetting('auto_packages').title())))
auto_thumbnails = bool(strtobool(str(__addon__.getSetting('auto_thumbnails').title())))
auto_addons = bool(strtobool(str(__addon__.getSetting('auto_addons').title())))
auto_compact = bool(strtobool(str(__addon__.getSetting('auto_compact').title())))
auto_textures = bool(strtobool(str(__addon__.getSetting('auto_textures').title())))
auto_userdata = bool(strtobool(str(__addon__.getSetting('auto_userdata').title())))
auto_notification = int(__addon__.getSetting('auto_notification'))
if auto_notification == 0:
a_progress = 1
a_notif = 1
elif auto_notification == 1:
a_progress = 1
a_notif = 0
elif auto_notification == 2:
a_progress = 2
a_notif = 1
elif auto_notification == 3:
a_progress = 2
a_notif = 0
actionToken = []
if auto_cache:
actionToken.append("cache")
if auto_packages:
actionToken.append("packages")
if auto_thumbnails:
actionToken.append("thumbnails")
if auto_addons:
actionToken.append("addons")
if os.path.exists('/private/var/mobile/Library/Caches/AppleTV/Video/Other'):
actionToken.append("atv")
intC, intMbDel = DeleteFiles(actionToken, a_progress)
if auto_textures:
intC, intMbTxt = CleanTextures(a_progress)
if auto_compact:
intC, intMbCom = CompactDatabases(a_progress)
if auto_userdata:
intC, intMbAdn = deleteAddonData(a_progress)
intMbTot = intMbDel + intMbCom + intMbTxt + intMbAdn
mess = __addon__.getLocalizedString(30112) # Mb
mess2 = " (%0.2f %s)" % (intMbTot, mess,)
strMess = __addon__.getLocalizedString(30031) + mess2 # Cleanup [COLOR red]done[/COLOR].
if a_notif == 1:
xbmc.executebuiltin("XBMC.Notification(%s,%s,5000,%s)" % (__addonname__.encode('utf8'), strMess, __addon__.getAddonInfo('icon')))
# ============================================================
# ------------------------------------------------------------
# Main
# ------------------------------------------------------------
# ============================================================
__addon__ = xbmcaddon.Addon(id='script.kcleaner')
__addonwd__ = xbmc.translatePath(__addon__.getAddonInfo('path').decode("utf-8"))
__addondir__ = xbmc.translatePath(__addon__.getAddonInfo('profile').decode('utf8'))
__addonname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
booBackgroundRun = False
lastrundays = 0
__addon__.setSetting('lock', 'false')
if __name__ == '__main__':
xbmc.log("KCLEANER SERVICE >> STARTED VERSION %s" % (__version__))
booBackgroundRun = bool(strtobool(str(__addon__.getSetting('autoclean').title())))
auto_lastrun = __addon__.getSetting('auto_lastrun')
date_now = int(round(time.time()))
if auto_lastrun != "":
date_auto_lastrun = int(auto_lastrun)
time_difference = date_now - date_auto_lastrun
time_difference_days = int(time_difference) / 86400
else:
__addon__.setSetting('auto_lastrun', str(int(date_now - 31536000)))
date_auto_lastrun = 365
time_difference_days = 365
auto_interval = int(__addon__.getSetting('auto_interval'))
if auto_interval == 0:
lastrundays = 1
elif auto_interval == 1:
lastrundays = 7
elif auto_interval == 2:
lastrundays = 30
elif auto_interval == 3:
lastrundays = 90
else:
lastrundays = 0
autostart_delay = int(__addon__.getSetting('autostart_delay'))
if booBackgroundRun:
xbmc.log("KCLEANER SERVICE >> SERVICE INIT >> LAST RUN " + str(time_difference_days) + " DAYS AGO, SET TO RUN EVERY " + str(lastrundays) + " DAYS, WITH DELAY OF " + str(autostart_delay) + " MINUTE(S)")
if time_difference_days > lastrundays or lastrundays == 0:
xbmc.sleep(autostart_delay * 60000)
if __addon__.getSetting('lock') != 'true':
__addon__.setSetting('lock', 'true')
xbmc.log('KCLEANER SERVICE >> RUNNING AUTO...')
AutoClean()
__addon__.setSetting('auto_lastrun', str(int(round(time.time()))))
__addon__.setSetting('lock', 'false')
else:
xbmc.log("KCLEANER SERVICE >> SERVICE OFF")
monitor = xbmc.Monitor()
monsettings = SettingMonitor()
iCounter = 0
while True:
if monitor.waitForAbort(2): # Sleep/wait for abort
xbmc.log('KCLEANER SERVICE >> EXIT')
break # Abort was requested while waiting. Exit the while loop.
else:
if booBackgroundRun:
iCounter += 1
if iCounter > 1800:
iCounter = 0
date_now = int(round(time.time()))
time_difference = date_now - date_auto_lastrun
time_difference_days = int(time_difference) / 86400
xbmc.log("KCLEANER SERVICE >> LAST RUN " + str(time_difference_days) + " DAYS AGO, SET TO RUN EVERY " + str(lastrundays) + " DAYS (NOW: " + str(date_now) + ")")
if time_difference_days > lastrundays:
if __addon__.getSetting('lock') != 'true':
__addon__.setSetting('lock', 'true')
xbmc.log('KCLEANER SERVICE >> RUNNING AUTO...')
AutoClean()
date_auto_lastrun = int(round(time.time()))
__addon__.setSetting('auto_lastrun', str(date_auto_lastrun))
__addon__.setSetting('lock', 'false')
xbmc.log('KCLEANER SERVICE >> END AUTO...')
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
# ------------------------------------------------------------
| [
"# ============================================================\n",
"# KCleaner - Version 2.9 by D. Lanik (2017)\n",
"# ------------------------------------------------------------\n",
"# Clean up Kodi\n",
"# ------------------------------------------------------------\n",
"# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)\n",
"# ============================================================\n",
"\n",
"import time\n",
"import xbmc\n",
"import xbmcaddon\n",
"import os\n",
"from distutils.util import strtobool\n",
"from default import DeleteFiles\n",
"from default import CompactDatabases\n",
"from default import CleanTextures\n",
"from default import deleteAddonData\n",
"\n",
"# ============================================================\n",
"# Define Settings Monitor Class\n",
"# ============================================================\n",
"\n",
"\n",
"class SettingMonitor(xbmc.Monitor):\n",
" def __init__(self, *args, **kwargs):\n",
" xbmc.Monitor.__init__(self)\n",
"\n",
" def onSettingsChanged(self):\n",
" GetSetting()\n",
"\n",
"# ============================================================\n",
"# Get settings\n",
"# ============================================================\n",
"\n",
"\n",
"def GetSetting():\n",
" global booBackgroundRun\n",
" global lastrundays\n",
"\n",
" __addon__ = xbmcaddon.Addon(id='script.kcleaner')\n",
"\n",
" booBackgroundRun = bool(strtobool(str(__addon__.getSetting('autoclean').title())))\n",
"\n",
" auto_interval = int(__addon__.getSetting('auto_interval'))\n",
"\n",
" if auto_interval == 0:\n",
" lastrundays = 1\n",
" elif auto_interval == 1:\n",
" lastrundays = 7\n",
" elif auto_interval == 2:\n",
" lastrundays = 30\n",
" elif auto_interval == 3:\n",
" lastrundays = 90\n",
" else:\n",
" lastrundays = 0\n",
"\n",
" xbmc.log('KCLEANER SERVICE >> SETTINGS CHANGED >> SERVICE RUN: ' + str(booBackgroundRun))\n",
" xbmc.log('KCLEANER SERVICE >> SETTINGS CHANGED >> RUN EVERY DAYS: ' + str(lastrundays))\n",
"\n",
"# ============================================================\n",
"# Run cleaning according to settings\n",
"# ============================================================\n",
"\n",
"\n",
"def AutoClean():\n",
" global __addon__\n",
" global __addonname__\n",
"\n",
" intMbDel = 0\n",
" intMbCom = 0\n",
" intMbTxt = 0\n",
" intMbAdn = 0\n",
"\n",
" auto_cache = bool(strtobool(str(__addon__.getSetting('auto_cache').title())))\n",
" auto_packages = bool(strtobool(str(__addon__.getSetting('auto_packages').title())))\n",
" auto_thumbnails = bool(strtobool(str(__addon__.getSetting('auto_thumbnails').title())))\n",
" auto_addons = bool(strtobool(str(__addon__.getSetting('auto_addons').title())))\n",
" auto_compact = bool(strtobool(str(__addon__.getSetting('auto_compact').title())))\n",
" auto_textures = bool(strtobool(str(__addon__.getSetting('auto_textures').title())))\n",
" auto_userdata = bool(strtobool(str(__addon__.getSetting('auto_userdata').title())))\n",
" auto_notification = int(__addon__.getSetting('auto_notification'))\n",
"\n",
" if auto_notification == 0:\n",
" a_progress = 1\n",
" a_notif = 1\n",
" elif auto_notification == 1:\n",
" a_progress = 1\n",
" a_notif = 0\n",
" elif auto_notification == 2:\n",
" a_progress = 2\n",
" a_notif = 1\n",
" elif auto_notification == 3:\n",
" a_progress = 2\n",
" a_notif = 0\n",
"\n",
" actionToken = []\n",
"\n",
" if auto_cache:\n",
" actionToken.append(\"cache\")\n",
" if auto_packages:\n",
" actionToken.append(\"packages\")\n",
" if auto_thumbnails:\n",
" actionToken.append(\"thumbnails\")\n",
" if auto_addons:\n",
" actionToken.append(\"addons\")\n",
"\n",
" if os.path.exists('/private/var/mobile/Library/Caches/AppleTV/Video/Other'):\n",
" actionToken.append(\"atv\")\n",
"\n",
" intC, intMbDel = DeleteFiles(actionToken, a_progress)\n",
"\n",
" if auto_textures:\n",
" intC, intMbTxt = CleanTextures(a_progress)\n",
"\n",
" if auto_compact:\n",
" intC, intMbCom = CompactDatabases(a_progress)\n",
"\n",
" if auto_userdata:\n",
" intC, intMbAdn = deleteAddonData(a_progress)\n",
"\n",
" intMbTot = intMbDel + intMbCom + intMbTxt + intMbAdn\n",
" mess = __addon__.getLocalizedString(30112) # Mb\n",
" mess2 = \" (%0.2f %s)\" % (intMbTot, mess,)\n",
" strMess = __addon__.getLocalizedString(30031) + mess2 # Cleanup [COLOR red]done[/COLOR].\n",
"\n",
" if a_notif == 1:\n",
" xbmc.executebuiltin(\"XBMC.Notification(%s,%s,5000,%s)\" % (__addonname__.encode('utf8'), strMess, __addon__.getAddonInfo('icon')))\n",
"\n",
"# ============================================================\n",
"# ------------------------------------------------------------\n",
"# Main\n",
"# ------------------------------------------------------------\n",
"# ============================================================\n",
"\n",
"\n",
"__addon__ = xbmcaddon.Addon(id='script.kcleaner')\n",
"__addonwd__ = xbmc.translatePath(__addon__.getAddonInfo('path').decode(\"utf-8\"))\n",
"__addondir__ = xbmc.translatePath(__addon__.getAddonInfo('profile').decode('utf8'))\n",
"__addonname__ = __addon__.getAddonInfo('name')\n",
"__version__ = __addon__.getAddonInfo('version')\n",
"\n",
"booBackgroundRun = False\n",
"lastrundays = 0\n",
"\n",
"__addon__.setSetting('lock', 'false')\n",
"\n",
"if __name__ == '__main__':\n",
" xbmc.log(\"KCLEANER SERVICE >> STARTED VERSION %s\" % (__version__))\n",
"\n",
" booBackgroundRun = bool(strtobool(str(__addon__.getSetting('autoclean').title())))\n",
"\n",
" auto_lastrun = __addon__.getSetting('auto_lastrun')\n",
" date_now = int(round(time.time()))\n",
"\n",
" if auto_lastrun != \"\":\n",
" date_auto_lastrun = int(auto_lastrun)\n",
" time_difference = date_now - date_auto_lastrun\n",
" time_difference_days = int(time_difference) / 86400\n",
" else:\n",
" __addon__.setSetting('auto_lastrun', str(int(date_now - 31536000)))\n",
" date_auto_lastrun = 365\n",
" time_difference_days = 365\n",
"\n",
" auto_interval = int(__addon__.getSetting('auto_interval'))\n",
"\n",
" if auto_interval == 0:\n",
" lastrundays = 1\n",
" elif auto_interval == 1:\n",
" lastrundays = 7\n",
" elif auto_interval == 2:\n",
" lastrundays = 30\n",
" elif auto_interval == 3:\n",
" lastrundays = 90\n",
" else:\n",
" lastrundays = 0\n",
"\n",
" autostart_delay = int(__addon__.getSetting('autostart_delay'))\n",
"\n",
" if booBackgroundRun:\n",
" xbmc.log(\"KCLEANER SERVICE >> SERVICE INIT >> LAST RUN \" + str(time_difference_days) + \" DAYS AGO, SET TO RUN EVERY \" + str(lastrundays) + \" DAYS, WITH DELAY OF \" + str(autostart_delay) + \" MINUTE(S)\")\n",
"\n",
" if time_difference_days > lastrundays or lastrundays == 0:\n",
" xbmc.sleep(autostart_delay * 60000)\n",
"\n",
" if __addon__.getSetting('lock') != 'true':\n",
" __addon__.setSetting('lock', 'true')\n",
" xbmc.log('KCLEANER SERVICE >> RUNNING AUTO...')\n",
" AutoClean()\n",
" __addon__.setSetting('auto_lastrun', str(int(round(time.time()))))\n",
" __addon__.setSetting('lock', 'false')\n",
" else:\n",
" xbmc.log(\"KCLEANER SERVICE >> SERVICE OFF\")\n",
"\n",
" monitor = xbmc.Monitor()\n",
" monsettings = SettingMonitor()\n",
"\n",
" iCounter = 0\n",
"\n",
" while True:\n",
" if monitor.waitForAbort(2): # Sleep/wait for abort\n",
" xbmc.log('KCLEANER SERVICE >> EXIT')\n",
" break # Abort was requested while waiting. Exit the while loop.\n",
" else:\n",
" if booBackgroundRun:\n",
" iCounter += 1\n",
"\n",
" if iCounter > 1800:\n",
" iCounter = 0\n",
" date_now = int(round(time.time()))\n",
" time_difference = date_now - date_auto_lastrun\n",
" time_difference_days = int(time_difference) / 86400\n",
"\n",
" xbmc.log(\"KCLEANER SERVICE >> LAST RUN \" + str(time_difference_days) + \" DAYS AGO, SET TO RUN EVERY \" + str(lastrundays) + \" DAYS (NOW: \" + str(date_now) + \")\")\n",
"\n",
" if time_difference_days > lastrundays:\n",
" if __addon__.getSetting('lock') != 'true':\n",
" __addon__.setSetting('lock', 'true')\n",
" xbmc.log('KCLEANER SERVICE >> RUNNING AUTO...')\n",
" AutoClean()\n",
" date_auto_lastrun = int(round(time.time()))\n",
" __addon__.setSetting('auto_lastrun', str(date_auto_lastrun))\n",
" __addon__.setSetting('lock', 'false')\n",
" xbmc.log('KCLEANER SERVICE >> END AUTO...')\n",
"\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n",
"# ------------------------------------------------------------\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.011363636363636364,
0.010869565217391304,
0.011904761904761904,
0.011627906976744186,
0.011363636363636364,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0,
0,
0.007246376811594203,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.004761904761904762,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0055248618784530384,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 234 | 0.000949 | false |
import copy
import random
import sys
import time
import Tkinter
from tkMessageBox import *
BOARD_WIDTH = 10 # 10
BOARD_HEIGHT = 24 # 24
UNIT_X = 18 # 18
UNIT_Y = 18
# Centrering av tetrominoerna (alltså klossarna)
PIECE_INIT_X = 3 # 3
PIECE_INIT_Y = -4 # -4
DIFFICULTY = 1
BACKGROUND_COLOR = '#0f0f0f'
#===============================================================================
I_PIECE_EASY = 1
L_PIECE_EASY = 3
O_PIECE_EASY = 4
I_PIECE_NORMAL = 1
J_PIECE_NORMAL = 2
L_PIECE_NORMAL = 3
O_PIECE_NORMAL = 4
S_PIECE_NORMAL = 8
T_PIECE_NORMAL = 9
Z_PIECE_NORMAL = 10
I_PIECE_HARD = 1
J_PIECE_HARD = 2
L_PIECE_HARD = 3
O_PIECE_HARD = 4
X_PIECE_HARD = 5
H_PIECE_HARD = 6
Y_PIECE_HARD = 7
# Svårighetsgrad | 0-2 | 0 = easy, 1 = normal, 2 = hard
if DIFFICULTY == 0:
ALL_PIECES = [
I_PIECE_EASY,
L_PIECE_EASY,
O_PIECE_EASY
]
if DIFFICULTY == 1:
ALL_PIECES = [
I_PIECE_NORMAL,
J_PIECE_NORMAL,
L_PIECE_NORMAL,
O_PIECE_NORMAL,
S_PIECE_NORMAL,
T_PIECE_NORMAL,
Z_PIECE_NORMAL
]
if DIFFICULTY == 2:
ALL_PIECES = [
I_PIECE_HARD,
J_PIECE_HARD,
L_PIECE_HARD,
O_PIECE_HARD,
X_PIECE_HARD,
H_PIECE_HARD,
Y_PIECE_HARD
]
PIECE_COLOR = { #Färg på alla bitar
I_PIECE_EASY: "#F95760",
I_PIECE_NORMAL: "#F95760",
I_PIECE_HARD: "#F95760",
J_PIECE_NORMAL: "#FDB171",
J_PIECE_HARD: "#FDB171",
L_PIECE_EASY: "#F1C76D",
L_PIECE_NORMAL: "#F1C76D",
L_PIECE_HARD: "#F1C76D",
O_PIECE_EASY: "#5DCFA2",
O_PIECE_NORMAL: "#5DCFA2",
O_PIECE_HARD: "#5DCFA2",
X_PIECE_HARD: "#D85BAA",
H_PIECE_HARD: "#42C3D7",
Y_PIECE_HARD: "#80809C",
S_PIECE_NORMAL: "#D85BAA",
T_PIECE_NORMAL: "#42C3D7",
Z_PIECE_NORMAL: "#80809C"
}
if DIFFICULTY == 0:
PIECE_SHAPE = {
I_PIECE_EASY: [(1, 0), (1, 1)],
L_PIECE_EASY: [(1, 0), (1, 1), (2, 1)],
O_PIECE_EASY: [(1, 0), (1, 1), (2, 0), (2, 1)]
}
elif DIFFICULTY == 1:
PIECE_SHAPE = {
I_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (1, 3)],
J_PIECE_NORMAL: [(1, 1), (1, 2), (1, 3), (2, 1)],
L_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (2, 2)],
O_PIECE_NORMAL: [(1, 1), (1, 2), (2, 1), (2, 2)],
S_PIECE_NORMAL: [(1, 1), (1, 2), (2, 2), (2, 3)],
T_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (2, 1)],
Z_PIECE_NORMAL: [(1, 2), (1, 3), (2, 1), (2, 2)]
}
elif DIFFICULTY == 2:
PIECE_SHAPE = {
I_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (1, 3), (1, 4)],
J_PIECE_HARD: [(1, 0), (1, 2), (2, 0), (2, 1), (2, 2)],
L_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 2), (3, 2)],
O_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 0), (2, 2), (3, 0), (3, 1), (3, 2)],
X_PIECE_HARD: [(1, 0), (1, 2), (2, 1), (3, 0), (3, 2)],
H_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 1), (3, 0), (3, 1), (3, 2)],
Y_PIECE_HARD: [(1, 0), (2, 1), (2, 2), (3, 0)]
}
"""
shape = lambda pc: [((z >> 2) + 1, z & 3) for z in range(16) if (pc >> z) & 1]
"""
def new_piece(): #Gör att nästa bit som skapas är en slumpmässigt vald av bitarna i "ALL_PIECES"
p = random.choice(ALL_PIECES)
p_shape = copy.deepcopy(PIECE_SHAPE[p])
return p_shape, p
#===============================================================================
"""
npx = px + (-1 if keys == "Left" else (1 if keys == "Right" else 0))
npiece = [(j, 3 - i) for (i, j) in piece] if keys == "Up" else piece #rotate
if not collide(npiece, npx, py):
piece, px = npiece, npx
if keys == "Down":
py = (j for j in range(py, BOARD_HEIGHT) if collide(piece, px, j + 1)).next()
"""
def move_piece_left(): #Flyttar biten till vänster om den inte blir blockerad
global px
npx = px - 1
if not collide(piece, npx, py):
px = npx
def move_piece_right(): #Flyttar biten till höger om den inte blir blockerad
global px
npx = px + 1
if not collide(piece, npx, py):
px = npx
def rotate_piece(): #Roterar biten om den kan roteras utan att krocka i något runtom den
global piece
npiece = [(j, 3 - i) for (i, j) in piece]
if not collide(npiece, px, py):
piece = npiece
def fall_piece(): #Gör att biten faller nedåt 1 ruta i taget tills den antingen landar på en bit eller på botten av spel-planen
global py
for j in range(py, BOARD_HEIGHT):
py = j
if collide(piece, px, j + 1):
return
#===============================================================================
def map_to_ui_x(i):
return i * UNIT_X
def map_to_ui_y(j):
return j * UNIT_Y
def ui_create_rect(i, j, color):
assert isinstance(i, int), i
assert isinstance(j, int), j
x0 = map_to_ui_x(i)
y0 = map_to_ui_y(j)
x1 = map_to_ui_x(i + 1)
y1 = map_to_ui_y(j + 1)
scr.create_rectangle(x0, y0, x1, y1, fill=color)
def redraw_ui():
piece_region = [(i + px, j + py) for i, j in piece]
scr.delete("all")
for i, j in [(i, j) for i in range(BOARD_WIDTH) for j in range(BOARD_HEIGHT)]:
if (i, j) in piece_region:
color = PIECE_COLOR[pc]
else:
color = PIECE_COLOR.get(board[j][i], BACKGROUND_COLOR)
ui_create_rect(i, j, color)
#===============================================================================
def reset_score():
global score
score = 0
def get_score():
return score
def incr_score(value):
global score
assert isinstance(value, int), value
score += value
#===============================================================================
"""
collide = lambda piece, px, py: [1 for (i, j) in piece if board[j + py][i + px]]
"""
def collide(piece, px, py):
assert isinstance(px, int), px
assert isinstance(py, int), py
for (i, j) in piece:
x = px + i
y = py + j
if not (0 <= x < BOARD_WIDTH):
return True
if y >= BOARD_HEIGHT:
return True
if y < 0:
continue
if board[y][x]:
return True
return False
#===============================================================================
def new_board_lines(num):
assert isinstance(num, int), num
return [[0] * BOARD_WIDTH for j in range(num)]
board = new_board_lines(BOARD_HEIGHT)
def place_piece(piece, px, py, pc): #Tillåter dig att flytta biten åt höger/vänster/nedåt så länge den inte blockeras av en annan bit eller planens kant/botten
"""
for i, j in piece:
board[j + py][i + px] = pc
"""
for i, j in piece:
x = px + i
y = py + j
if not (0 <= x < BOARD_WIDTH):
continue
if not (0 <= y < BOARD_HEIGHT):
continue
board[y][x] = pc
def clear_complete_lines():
global board
nb = [l for l in board if 0 in l] #
s = len(board) - len(nb)
if s:
board = new_board_lines(s) + nb
return s
def tjena():
global board
nb = [l for l in board if 0 in l] #
s = len(board) - len(nb)
if s:
board = new_board_lines(s) + nb
return s
#===============================================================================
def game_over():
showerror("Answer", "GAME OVER: score %i" % get_score())
#restore_tetris()
#tjena()
#===============================================================================
def tick(e=None):
global piece, px, py, pc
keys = e.keysym if e else "" # get key event
if keys == 'Left':
move_piece_left()
elif keys == 'Right':
move_piece_right()
elif keys == 'Up':
rotate_piece()
elif keys == 'Down':
fall_piece()
if e == None:
if collide(piece, px, py + 1):
if py < 0:
game_over()
return #Ger gameover och anropar game_over funktionen när bitarna kommer för högt upp så att spelet inte kan fortsättas
place_piece(piece, px, py, pc)
piece, pc = new_piece()
px, py = PIECE_INIT_X, PIECE_INIT_Y
else:
py += 1
s = clear_complete_lines()
if s:
incr_score(2 ** s)
print score
scr.after(300, tick)
redraw_ui() #Uppdaterar UI hela tiden
#===============================================================================
board = None
piece = None
pc = None
px = PIECE_INIT_X
py = PIECE_INIT_Y
score = 0
scr = None
def ctime():
ctime = str(time.time())
return ctime
def init_tetris(): #Anropar alla funktioner som behövs för att börja spela, spelplan, bitarna som kommer användas och att scoren startar på 0
global board, piece, pc, scr
board = new_board_lines(BOARD_HEIGHT)
piece, pc = new_piece()
reset_score()
scr = Tkinter.Canvas(width=map_to_ui_x(BOARD_WIDTH), height=map_to_ui_y(BOARD_HEIGHT), bg=BACKGROUND_COLOR)
scr.after(300, tick)
scr.bind_all("<Key>", tick)
scr.pack()
scr.mainloop()
# for line in board: print '\t'.join(str(v) for v in line)
# print len(board)
# print px,py
if __name__ == '__main__':
init_tetris() | [
"import copy\n",
"import random\n",
"import sys\n",
"import time\n",
"import Tkinter\n",
"from tkMessageBox import *\n",
"\n",
"BOARD_WIDTH = 10 # 10\n",
"BOARD_HEIGHT = 24 # 24\n",
"\n",
"UNIT_X = 18 # 18\n",
"UNIT_Y = 18 \n",
"\n",
"# Centrering av tetrominoerna (alltså klossarna)\n",
"PIECE_INIT_X = 3 # 3\n",
"PIECE_INIT_Y = -4 # -4\n",
"\n",
"DIFFICULTY = 1\n",
"\n",
"BACKGROUND_COLOR = '#0f0f0f'\n",
"\n",
"#===============================================================================\n",
"\n",
"I_PIECE_EASY = 1\n",
"L_PIECE_EASY = 3\n",
"O_PIECE_EASY = 4\n",
"\n",
"I_PIECE_NORMAL = 1\n",
"J_PIECE_NORMAL = 2\n",
"L_PIECE_NORMAL = 3\n",
"O_PIECE_NORMAL = 4\n",
"S_PIECE_NORMAL = 8\n",
"T_PIECE_NORMAL = 9\n",
"Z_PIECE_NORMAL = 10\n",
"\n",
"I_PIECE_HARD = 1\n",
"J_PIECE_HARD = 2\n",
"L_PIECE_HARD = 3\n",
"O_PIECE_HARD = 4\n",
"X_PIECE_HARD = 5\n",
"H_PIECE_HARD = 6\n",
"Y_PIECE_HARD = 7\n",
"\n",
"# Svårighetsgrad | 0-2 | 0 = easy, 1 = normal, 2 = hard\n",
"\n",
"if DIFFICULTY == 0:\n",
"\n",
" ALL_PIECES = [\n",
" I_PIECE_EASY,\n",
" L_PIECE_EASY,\n",
" O_PIECE_EASY\n",
" ]\n",
"\n",
"if DIFFICULTY == 1:\n",
"\n",
" ALL_PIECES = [\n",
" I_PIECE_NORMAL,\n",
" J_PIECE_NORMAL,\n",
" L_PIECE_NORMAL,\n",
" O_PIECE_NORMAL,\n",
" S_PIECE_NORMAL,\n",
" T_PIECE_NORMAL,\n",
" Z_PIECE_NORMAL\n",
" ]\n",
"\n",
"if DIFFICULTY == 2:\n",
"\n",
" ALL_PIECES = [\n",
" I_PIECE_HARD,\n",
" J_PIECE_HARD,\n",
" L_PIECE_HARD,\n",
" O_PIECE_HARD,\n",
" X_PIECE_HARD,\n",
" H_PIECE_HARD,\n",
" Y_PIECE_HARD\n",
" ]\n",
"\n",
"PIECE_COLOR = { #Färg på alla bitar\n",
" I_PIECE_EASY: \"#F95760\",\n",
" I_PIECE_NORMAL: \"#F95760\",\n",
" I_PIECE_HARD: \"#F95760\",\n",
"\n",
" J_PIECE_NORMAL: \"#FDB171\",\n",
" J_PIECE_HARD: \"#FDB171\",\n",
"\n",
" L_PIECE_EASY: \"#F1C76D\",\n",
" L_PIECE_NORMAL: \"#F1C76D\",\n",
" L_PIECE_HARD: \"#F1C76D\",\n",
"\n",
" O_PIECE_EASY: \"#5DCFA2\",\n",
" O_PIECE_NORMAL: \"#5DCFA2\",\n",
" O_PIECE_HARD: \"#5DCFA2\",\n",
"\n",
" X_PIECE_HARD: \"#D85BAA\",\n",
" H_PIECE_HARD: \"#42C3D7\",\n",
" Y_PIECE_HARD: \"#80809C\",\n",
"\n",
" S_PIECE_NORMAL: \"#D85BAA\",\n",
" T_PIECE_NORMAL: \"#42C3D7\",\n",
" Z_PIECE_NORMAL: \"#80809C\"\n",
"}\n",
"\n",
"if DIFFICULTY == 0:\n",
"\n",
" PIECE_SHAPE = {\n",
" I_PIECE_EASY: [(1, 0), (1, 1)],\n",
" L_PIECE_EASY: [(1, 0), (1, 1), (2, 1)],\n",
" O_PIECE_EASY: [(1, 0), (1, 1), (2, 0), (2, 1)]\n",
" }\n",
"\n",
"elif DIFFICULTY == 1:\n",
" PIECE_SHAPE = {\n",
" I_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (1, 3)],\n",
" J_PIECE_NORMAL: [(1, 1), (1, 2), (1, 3), (2, 1)],\n",
" L_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (2, 2)],\n",
" O_PIECE_NORMAL: [(1, 1), (1, 2), (2, 1), (2, 2)],\n",
" S_PIECE_NORMAL: [(1, 1), (1, 2), (2, 2), (2, 3)],\n",
" T_PIECE_NORMAL: [(1, 0), (1, 1), (1, 2), (2, 1)],\n",
" Z_PIECE_NORMAL: [(1, 2), (1, 3), (2, 1), (2, 2)]\n",
"}\n",
"\n",
"elif DIFFICULTY == 2:\n",
" PIECE_SHAPE = {\n",
" I_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (1, 3), (1, 4)],\n",
" J_PIECE_HARD: [(1, 0), (1, 2), (2, 0), (2, 1), (2, 2)],\n",
" L_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 2), (3, 2)],\n",
" O_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 0), (2, 2), (3, 0), (3, 1), (3, 2)],\n",
" X_PIECE_HARD: [(1, 0), (1, 2), (2, 1), (3, 0), (3, 2)],\n",
" H_PIECE_HARD: [(1, 0), (1, 1), (1, 2), (2, 1), (3, 0), (3, 1), (3, 2)],\n",
" Y_PIECE_HARD: [(1, 0), (2, 1), (2, 2), (3, 0)]\n",
" }\n",
"\n",
"\"\"\"\n",
"shape = lambda pc: [((z >> 2) + 1, z & 3) for z in range(16) if (pc >> z) & 1]\n",
"\"\"\"\n",
"\n",
"def new_piece(): #Gör att nästa bit som skapas är en slumpmässigt vald av bitarna i \"ALL_PIECES\"\n",
" p = random.choice(ALL_PIECES)\n",
" p_shape = copy.deepcopy(PIECE_SHAPE[p])\n",
" return p_shape, p \n",
"\n",
"\n",
"#===============================================================================\n",
"\"\"\"\n",
" npx = px + (-1 if keys == \"Left\" else (1 if keys == \"Right\" else 0)) \n",
" npiece = [(j, 3 - i) for (i, j) in piece] if keys == \"Up\" else piece #rotate\n",
" if not collide(npiece, npx, py):\n",
" piece, px = npiece, npx\n",
" if keys == \"Down\":\n",
" py = (j for j in range(py, BOARD_HEIGHT) if collide(piece, px, j + 1)).next()\n",
"\"\"\"\n",
"def move_piece_left(): #Flyttar biten till vänster om den inte blir blockerad\n",
" global px\n",
" npx = px - 1\n",
" if not collide(piece, npx, py):\n",
" px = npx \n",
"\n",
"\n",
"def move_piece_right(): #Flyttar biten till höger om den inte blir blockerad\n",
" global px\n",
" npx = px + 1\n",
" if not collide(piece, npx, py):\n",
" px = npx \n",
"\n",
"\n",
"def rotate_piece(): #Roterar biten om den kan roteras utan att krocka i något runtom den\n",
" global piece\n",
" npiece = [(j, 3 - i) for (i, j) in piece]\n",
" if not collide(npiece, px, py):\n",
" piece = npiece \n",
"\n",
"\n",
"def fall_piece(): #Gör att biten faller nedåt 1 ruta i taget tills den antingen landar på en bit eller på botten av spel-planen\n",
" global py\n",
" for j in range(py, BOARD_HEIGHT):\n",
" py = j\n",
" if collide(piece, px, j + 1):\n",
" return \n",
"\n",
"\n",
"#===============================================================================\n",
"\n",
"def map_to_ui_x(i): \n",
" return i * UNIT_X \n",
"\n",
"def map_to_ui_y(j):\n",
" return j * UNIT_Y \n",
"\n",
"def ui_create_rect(i, j, color):\n",
" assert isinstance(i, int), i\n",
" assert isinstance(j, int), j\n",
" x0 = map_to_ui_x(i)\n",
" y0 = map_to_ui_y(j)\n",
" x1 = map_to_ui_x(i + 1)\n",
" y1 = map_to_ui_y(j + 1)\n",
" scr.create_rectangle(x0, y0, x1, y1, fill=color)\n",
"\n",
"def redraw_ui():\n",
" piece_region = [(i + px, j + py) for i, j in piece]\n",
"\n",
" scr.delete(\"all\")\n",
" for i, j in [(i, j) for i in range(BOARD_WIDTH) for j in range(BOARD_HEIGHT)]:\n",
" if (i, j) in piece_region:\n",
" color = PIECE_COLOR[pc]\n",
" else:\n",
" color = PIECE_COLOR.get(board[j][i], BACKGROUND_COLOR)\n",
" ui_create_rect(i, j, color)\n",
"\n",
"#===============================================================================\n",
"\n",
"def reset_score():\n",
" global score\n",
" score = 0\n",
"\n",
"\n",
"def get_score():\n",
" return score \n",
"\n",
"\n",
"def incr_score(value):\n",
" global score\n",
" assert isinstance(value, int), value\n",
" score += value\n",
"\n",
"#===============================================================================\n",
"\n",
"\"\"\"\n",
"collide = lambda piece, px, py: [1 for (i, j) in piece if board[j + py][i + px]]\n",
"\"\"\"\n",
"def collide(piece, px, py): \n",
" assert isinstance(px, int), px\n",
" assert isinstance(py, int), py\n",
" for (i, j) in piece:\n",
" x = px + i\n",
" y = py + j\n",
" if not (0 <= x < BOARD_WIDTH):\n",
" return True\n",
" if y >= BOARD_HEIGHT:\n",
" return True\n",
" if y < 0:\n",
" continue\n",
" if board[y][x]:\n",
" return True\n",
" return False \n",
"\n",
"#===============================================================================\n",
"\n",
"def new_board_lines(num):\n",
" assert isinstance(num, int), num\n",
" return [[0] * BOARD_WIDTH for j in range(num)]\n",
"\n",
"board = new_board_lines(BOARD_HEIGHT)\n",
"\n",
"def place_piece(piece, px, py, pc): #Tillåter dig att flytta biten åt höger/vänster/nedåt så länge den inte blockeras av en annan bit eller planens kant/botten\n",
" \"\"\"\n",
" for i, j in piece:\n",
" board[j + py][i + px] = pc\n",
" \"\"\"\n",
" for i, j in piece:\n",
" x = px + i\n",
" y = py + j\n",
" if not (0 <= x < BOARD_WIDTH):\n",
" continue\n",
" if not (0 <= y < BOARD_HEIGHT):\n",
" continue\n",
" board[y][x] = pc \n",
"\n",
"def clear_complete_lines():\n",
" global board\n",
" nb = [l for l in board if 0 in l] # \n",
" s = len(board) - len(nb)\n",
" if s:\n",
" board = new_board_lines(s) + nb\n",
" return s\n",
"\n",
"def tjena():\n",
" global board\n",
" nb = [l for l in board if 0 in l] # \n",
" s = len(board) - len(nb)\n",
" if s:\n",
" board = new_board_lines(s) + nb\n",
" return s\n",
"\n",
"#===============================================================================\n",
"\n",
"def game_over():\n",
" showerror(\"Answer\", \"GAME OVER: score %i\" % get_score())\n",
" #restore_tetris()\n",
" #tjena()\n",
"\n",
"#===============================================================================\n",
"\n",
"def tick(e=None):\n",
" global piece, px, py, pc\n",
"\n",
" keys = e.keysym if e else \"\" # get key event\n",
"\n",
" if keys == 'Left':\n",
" move_piece_left()\n",
" elif keys == 'Right':\n",
" move_piece_right()\n",
" elif keys == 'Up':\n",
" rotate_piece()\n",
" elif keys == 'Down':\n",
" fall_piece()\n",
"\n",
"\n",
" if e == None:\n",
" if collide(piece, px, py + 1):\n",
" if py < 0:\n",
" game_over()\n",
" return #Ger gameover och anropar game_over funktionen när bitarna kommer för högt upp så att spelet inte kan fortsättas\n",
"\n",
" place_piece(piece, px, py, pc)\n",
"\n",
" piece, pc = new_piece()\n",
" px, py = PIECE_INIT_X, PIECE_INIT_Y\n",
"\n",
" else:\n",
" py += 1\n",
"\n",
" s = clear_complete_lines()\n",
" if s:\n",
" incr_score(2 ** s) \n",
" print score\n",
"\n",
" scr.after(300, tick)\n",
"\n",
" redraw_ui() #Uppdaterar UI hela tiden\n",
"\n",
"#===============================================================================\n",
"\n",
"board = None\n",
"piece = None\n",
"pc = None\n",
"px = PIECE_INIT_X\n",
"py = PIECE_INIT_Y\n",
"score = 0\n",
"scr = None\n",
"\n",
"def ctime():\n",
" ctime = str(time.time())\n",
" return ctime\n",
"\n",
"def init_tetris(): #Anropar alla funktioner som behövs för att börja spela, spelplan, bitarna som kommer användas och att scoren startar på 0\n",
" global board, piece, pc, scr\n",
" board = new_board_lines(BOARD_HEIGHT)\n",
" piece, pc = new_piece() \n",
" reset_score()\n",
"\n",
" scr = Tkinter.Canvas(width=map_to_ui_x(BOARD_WIDTH), height=map_to_ui_y(BOARD_HEIGHT), bg=BACKGROUND_COLOR)\n",
" scr.after(300, tick)\n",
" scr.bind_all(\"<Key>\", tick)\n",
" scr.pack()\n",
" scr.mainloop()\n",
"\n",
"# for line in board: print '\\t'.join(str(v) for v in line)\n",
"# print len(board)\n",
" # print px,py\n",
"\n",
"if __name__ == '__main__':\n",
"\n",
" init_tetris()"
] | [
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0.043478260869565216,
0,
0.058823529411764705,
0.07692307692307693,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.5,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041237113402061855,
0,
0,
0.04,
0,
0,
0.024691358024691357,
0,
0.013513513513513514,
0.012048192771084338,
0,
0,
0,
0.011627906976744186,
0,
0.038461538461538464,
0,
0,
0,
0.05555555555555555,
0,
0,
0.025974025974025976,
0,
0,
0,
0.05555555555555555,
0,
0,
0.033707865168539325,
0,
0,
0,
0.041666666666666664,
0,
0,
0.0234375,
0,
0,
0,
0,
0.05,
0,
0,
0.024691358024691357,
0,
0.047619047619047616,
0.043478260869565216,
0,
0.05,
0.043478260869565216,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0.25,
0.012345679012345678,
0,
0.06896551724137931,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0.024691358024691357,
0,
0.038461538461538464,
0,
0,
0,
0.02631578947368421,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0.03571428571428571,
0,
0.07317073170731707,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0.07317073170731707,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0.058823529411764705,
0.015384615384615385,
0.038461538461538464,
0.058823529411764705,
0,
0.024691358024691357,
0,
0.05555555555555555,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.014598540145985401,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0.047619047619047616,
0,
0.024691358024691357,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.028169014084507043,
0,
0,
0.034482758620689655,
0,
0,
0.008928571428571428,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0.037037037037037035,
0,
0.058823529411764705
] | 363 | 0.00973 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem299.py
#
# Three similar triangles
# =======================
# Published on Saturday, 3rd July 2010, 01:00 am
#
# Four points with integer coordinates are selected:A(a, 0), B(b, 0), C(0, c)
# and D(0, d), with 0 a b and 0 c d. Point P, also with integer
# coordinates, is chosen on the line AC so that the three triangles ABP, CDP
# and BDP are all similar. It is easy to prove that the three triangles can be
# similar, only if a=c. So, given that a=c, we are looking for triplets (a,b,d)
# such that at least one point P (with integer coordinates) exists on AC,
# making the three triangles ABP, CDP and BDP all similar. For example, if
# (a,b,d)=(2,3,4), it can be easily verified that point P(1,1) satisfies the
# above condition. Note that the triplets (2,3,4) and (2,4,3) are considered
# as distinct, although point P(1,1) is common for both. If b+d 100, there are
# 92 distinct triplets (a,b,d) such that point P exists. If b+d 100 000,
# there are 320471 distinct triplets (a,b,d) such that point P exists. If b+d
# 100 000 000, how many distinct triplets (a,b,d) are there such that point P
# exists?
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem299.py\n",
"#\n",
"# Three similar triangles\n",
"# =======================\n",
"# Published on Saturday, 3rd July 2010, 01:00 am\n",
"#\n",
"# Four points with integer coordinates are selected:A(a, 0), B(b, 0), C(0, c)\n",
"# and D(0, d), with 0 a b and 0 c d. Point P, also with integer\n",
"# coordinates, is chosen on the line AC so that the three triangles ABP, CDP\n",
"# and BDP are all similar. It is easy to prove that the three triangles can be\n",
"# similar, only if a=c. So, given that a=c, we are looking for triplets (a,b,d)\n",
"# such that at least one point P (with integer coordinates) exists on AC,\n",
"# making the three triangles ABP, CDP and BDP all similar. For example, if\n",
"# (a,b,d)=(2,3,4), it can be easily verified that point P(1,1) satisfies the\n",
"# above condition. Note that the triplets (2,3,4) and (2,4,3) are considered\n",
"# as distinct, although point P(1,1) is common for both. If b+d 100, there are\n",
"# 92 distinct triplets (a,b,d) such that point P exists. If b+d 100 000,\n",
"# there are 320471 distinct triplets (a,b,d) such that point P exists. If b+d\n",
"# 100 000 000, how many distinct triplets (a,b,d) are there such that point P\n",
"# exists?\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 29 | 0.004151 | false |
# coding:utf-8
'''
angle game augmented reality v0.1 with python
winxos 2016-03-12
'''
import cv2
import numpy as np
import time
from datetime import datetime
import math
WIN_NAME="ANGLE AR v1"
class board:
mask = None
board_field = None
showflag = True
solved = False
img_ans = None
sum = 0 #for fps
pt = 0 #for fps
img=None
def __init__(self):
self.cam = cv2.VideoCapture(0)
self.cam.set(3, 800)
self.cam.set(4, 600)
w = self.cam.get(3)
h = self.cam.get(4)
print w, h
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print (x, y)
if x < 100 and y < 100: #
if self.img != None: #
cv2.imwrite(datetime.now().strftime("%m%d%H%M%S") + ".png", self.img_ans) #
print "save png file to:\n", datetime.now().strftime("%m%d%H%M%S") + ".png"
def exact_img(self, win, img, cnt):#warp exact
pass
def add_warp_img(self,src,sub,cnt):
r,c,_ = src.shape
pts = cnt.reshape(4, 2)
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
M = cv2.getPerspectiveTransform(np.array([[0,0],[599,0],[599,599],[0,599]],np.float32), rect)
return cv2.warpPerspective(sub, M,(c,r))
def get_state(self,img):
t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
w = img.shape[1]
h = img.shape[0]
return ans
def find_roi(self, img):#find main board
img = cv2.GaussianBlur(img, (3, 3), 0) #
#t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)
img = cv2.Canny(img, 10, 100)
kernel = np.ones((3,3), np.uint8)
#img = cv2.erode(img, kernel)
#img = cv2.dilate(img, kernel)
#img = cv2.erode(img, kernel)
lines = cv2.HoughLinesP(img, 1, math.pi / 180,100,None,30,10)
circles = cv2.HoughCircles(img,cv2.cv.CV_HOUGH_GRADIENT,1,200)
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(self.img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(self.img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
if len(lines)>0:
for x1,y1,x2,y2 in lines[0]:
cv2.line(self.img,(x1,y1),(x2,y2),(0,255,0),2)
#cnts, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[1:]
cv2.imshow("img",img)
#cv2.drawContours(self.img, cnts,-1, (0,0,255),1)
def get_fps(self, t):
self.sum += t
self.pt += 1
if self.pt > 100:
self.pt = 1
self.sum = t
return int(self.pt / self.sum)
def run(self): #
while True:
st = time.clock()
ret, self.img = self.cam.read() #
try:
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.find_roi(self.gray)
cv2.putText(self.img, "fps:" + str(self.get_fps((time.clock() - st))),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 250, 0), 1)
key = cv2.waitKey(20)
if(key == 27):
break #
if self.img_ans != None:
#ret, mask = cv2.threshold(cv2.cvtColor(self.img_ans,cv2.COLOR_BGR2GRAY), 1, 255, cv2.THRESH_BINARY)
#mask_inv = cv2.bitwise_not(mask) #
#img1_bg = cv2.bitwise_and(self.img,self.img,mask = mask_inv)#
#img2_fg = cv2.bitwise_and(self.img_ans,self.img_ans,mask = mask) #
#self.img = cv2.add(img1_bg,img2_fg)
self.img = cv2.add(self.img,self.img_ans)
self.solved = True
cv2.imshow(WIN_NAME, self.img)
cv2.setMouseCallback(WIN_NAME, self.on_mouse) #
except Exception,e:
print(e)
self.cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
b = board()
b.run()
| [
"# coding:utf-8\n",
"'''\n",
"angle game augmented reality v0.1 with python\n",
"winxos 2016-03-12\n",
"'''\n",
"import cv2\n",
"import numpy as np\n",
"import time\n",
"from datetime import datetime\n",
"import math\n",
"WIN_NAME=\"ANGLE AR v1\"\n",
"\n",
"class board:\n",
" mask = None\n",
" board_field = None\n",
" showflag = True\n",
" solved = False\n",
" img_ans = None\n",
" sum = 0 #for fps\n",
" pt = 0 #for fps\n",
" img=None\n",
" def __init__(self):\n",
" self.cam = cv2.VideoCapture(0)\n",
" self.cam.set(3, 800)\n",
" self.cam.set(4, 600)\n",
" w = self.cam.get(3)\n",
" h = self.cam.get(4)\n",
" print w, h\n",
"\n",
" def on_mouse(self, event, x, y, flags, param):\n",
" if event == cv2.EVENT_LBUTTONDOWN:\n",
" print (x, y)\n",
" if x < 100 and y < 100: #\n",
" if self.img != None: #\n",
" cv2.imwrite(datetime.now().strftime(\"%m%d%H%M%S\") + \".png\", self.img_ans) #\n",
" print \"save png file to:\\n\", datetime.now().strftime(\"%m%d%H%M%S\") + \".png\"\n",
" def exact_img(self, win, img, cnt):#warp exact\n",
" pass\n",
" def add_warp_img(self,src,sub,cnt):\n",
" r,c,_ = src.shape\n",
" pts = cnt.reshape(4, 2)\n",
" rect = np.zeros((4, 2), dtype=\"float32\")\n",
" s = pts.sum(axis=1)\n",
" rect[0] = pts[np.argmin(s)]\n",
" rect[2] = pts[np.argmax(s)]\n",
" diff = np.diff(pts, axis=1)\n",
" rect[1] = pts[np.argmin(diff)]\n",
" rect[3] = pts[np.argmax(diff)]\n",
" M = cv2.getPerspectiveTransform(np.array([[0,0],[599,0],[599,599],[0,599]],np.float32), rect)\n",
" return cv2.warpPerspective(sub, M,(c,r))\n",
" def get_state(self,img):\n",
" t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)\n",
" w = img.shape[1]\n",
" h = img.shape[0]\n",
"\n",
" return ans\n",
"\n",
" def find_roi(self, img):#find main board\n",
" img = cv2.GaussianBlur(img, (3, 3), 0) #\n",
" #t, img = cv2.threshold(img,0,255,cv2.THRESH_OTSU)\n",
" img = cv2.Canny(img, 10, 100)\n",
" kernel = np.ones((3,3), np.uint8)\n",
" #img = cv2.erode(img, kernel)\n",
" #img = cv2.dilate(img, kernel)\n",
" #img = cv2.erode(img, kernel)\n",
" lines = cv2.HoughLinesP(img, 1, math.pi / 180,100,None,30,10)\n",
" circles = cv2.HoughCircles(img,cv2.cv.CV_HOUGH_GRADIENT,1,200)\n",
" if circles is not None:\n",
"\t # convert the (x, y) coordinates and radius of the circles to integers\n",
"\t circles = np.round(circles[0, :]).astype(\"int\")\n",
"\t # loop over the (x, y) coordinates and radius of the circles\n",
"\t for (x, y, r) in circles:\n",
"\t\t # draw the circle in the output image, then draw a rectangle\n",
"\t\t # corresponding to the center of the circle\n",
"\t\t cv2.circle(self.img, (x, y), r, (0, 255, 0), 4)\n",
"\t\t cv2.rectangle(self.img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n",
" \n",
" if len(lines)>0:\n",
" for x1,y1,x2,y2 in lines[0]: \n",
" cv2.line(self.img,(x1,y1),(x2,y2),(0,255,0),2) \n",
" #cnts, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
" #cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[1:]\n",
" cv2.imshow(\"img\",img)\n",
" #cv2.drawContours(self.img, cnts,-1, (0,0,255),1)\n",
" def get_fps(self, t):\n",
" self.sum += t\n",
" self.pt += 1\n",
" if self.pt > 100:\n",
" self.pt = 1\n",
" self.sum = t\n",
" return int(self.pt / self.sum)\n",
"\n",
" def run(self): #\n",
" while True:\n",
" st = time.clock()\n",
" ret, self.img = self.cam.read() #\n",
" try:\n",
" self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n",
" self.find_roi(self.gray)\n",
" cv2.putText(self.img, \"fps:\" + str(self.get_fps((time.clock() - st))),\n",
" (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 250, 0), 1)\n",
" key = cv2.waitKey(20)\n",
" if(key == 27):\n",
" break #\n",
" if self.img_ans != None:\n",
" #ret, mask = cv2.threshold(cv2.cvtColor(self.img_ans,cv2.COLOR_BGR2GRAY), 1, 255, cv2.THRESH_BINARY)\n",
" #mask_inv = cv2.bitwise_not(mask) #\n",
" #img1_bg = cv2.bitwise_and(self.img,self.img,mask = mask_inv)#\n",
" #img2_fg = cv2.bitwise_and(self.img_ans,self.img_ans,mask = mask) #\n",
" #self.img = cv2.add(img1_bg,img2_fg)\n",
" self.img = cv2.add(self.img,self.img_ans)\n",
" self.solved = True\n",
" cv2.imshow(WIN_NAME, self.img)\n",
" cv2.setMouseCallback(WIN_NAME, self.on_mouse) #\n",
" except Exception,e:\n",
" print(e)\n",
" self.cam.release()\n",
" cv2.destroyAllWindows()\n",
"\n",
"if __name__ == '__main__':\n",
" b = board()\n",
" b.run()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0.09523809523809523,
0.1,
0.07692307692307693,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0.025,
0.010309278350515464,
0.010416666666666666,
0.058823529411764705,
0,
0.1,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.08823529411764706,
0.04081632653061224,
0.06896551724137931,
0.05172413793103448,
0,
0,
0,
0,
0,
0.044444444444444446,
0,
0.01694915254237288,
0,
0.023809523809523808,
0.02631578947368421,
0.02564102564102564,
0.02631578947368421,
0.05714285714285714,
0.04225352112676056,
0,
0.025,
0.03508771929824561,
0.02857142857142857,
0.05714285714285714,
0.028169014084507043,
0.037037037037037035,
0.034482758620689655,
0.03529411764705882,
1,
0.04,
0.09302325581395349,
0.13846153846153847,
0.022727272727272728,
0.014705882352941176,
0.03333333333333333,
0.017241379310344827,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.012048192771084338,
0,
0,
0,
0.024390243902439025,
0.01652892561983471,
0.017857142857142856,
0.024096385542168676,
0.022727272727272728,
0.017543859649122806,
0.016129032258064516,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0.037037037037037035,
0,
0
] | 122 | 0.02605 | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011-2015 German Aerospace Center DLR
(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),
Institute of System Dynamics and Control
All rights reserved.
This file is licensed under the "BSD New" license
(see also http://opensource.org/licenses/BSD-3-Clause):
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the German Aerospace Center nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import collections
import FMUError
import xml.etree.ElementTree as etree
def defaultNone(x, default):
if x is None:
return default
else:
return x
def getAttribute(root, attr):
a = root.get(attr)
if a is None:
pass
return a
class EnumerationItem:
def __init__(self):
self.value = None
self.description = None
class SimpleType:
''' Class for description of simple data types in FMI
Some values are optional, them not being defined is signaled by a value of None
'''
def __init__(self, x):
''' Populate data from x
@type x: ElemenTree element holding a type description
'''
self.basicType = str(x.tag)
self.description = x.get('description')
self.quantity = x.get('quantity')
self.unit = x.get('unit')
self.displayUnit = x.get('displayUnit')
self.relativeQuantity = defaultNone(x.get('relativeQuantity'), 'false')
self.min = x.get('min')
self.max = x.get('max')
self.nominal = x.get('nominal')
self.unbounded = defaultNone(x.get('unbounded'), 'false')
self.item = dict()
for item in x.findall('Item'):
name = item.get('name')
self.item[name] = EnumerationItem()
self.item[name].value = item.get('value')
self.item[name].description = item.get('description')
class ScalarVariableType(SimpleType):
''' Class for description of data types in FMI scalar variables
Some values are optional, them not being defined is signaled by a value of None
'''
def __init__(self, x):
''' Populate data from x
@type x: ElemenTree element holding a type description
'''
SimpleType.__init__(self, x)
self.declaredType = x.get('declaredType')
self.start = x.get('start')
self.derivative = x.get('derivative')
self.reinit = defaultNone(x.get('reinit'), 'false')
def updateDefaults(self, defaults):
''' Update some elements of the class by default values given in a SimpleType class
@type defaults: SimpleType class holding a type description
'''
if defaults.quantity is not None:
self.quantity = defaults.quantity
if defaults.unit is not None:
self.unit = defaults.unit
if defaults.displayUnit is not None:
self.displayUnit = defaults.displayUnit
if defaults.relativeQuantity is not None:
self.relativeQuantity = defaults.relativeQuantity
if defaults.min is not None:
self.min = defaults.min
if defaults.max is not None:
self.max = defaults.max
if defaults.nominal is not None:
self.nominal = defaults.nominal
if defaults.unbounded is not None:
self.unbounded = defaults.unbounded
class FMIScalarVariable:
''' Class for description of Scalar Variables
'''
def __init__(self, scalarVariableType=None, reference=None, description=None, causality=None, variability=None, initial=None, canHandleMultipleSetPerTimeInstant=None, annotations=None):
self.type = scalarVariableType
self.valueReference = reference
self.description = description
self.causality = causality
self.variability = variability
self.initial = initial
self.canHandleMultipleSetPerTimeInstant = canHandleMultipleSetPerTimeInstant
self.annotations = annotations
class FMITypeAttributes:
def __init__(self):
self.modelIdentifier = None
self.needsExecutionTool = 'false'
self.canBeInstantiatedOnlyOncePerProcess = 'false'
self.canNotUseMemoryManagementFunctions = 'false'
self.canGetAndSetFMUstate = 'false'
self.canSerializeFMUstate = 'false'
self.providesDirectionalDerivative = 'false'
self.sourceFile = []
class ModelExchange(FMITypeAttributes):
def __init__(self):
FMITypeAttributes.__init__(self)
self. completedIntegratorStepNotNeeded = 'false'
class CoSimulation(FMITypeAttributes):
def __init__(self):
FMITypeAttributes.__init__(self)
self.canHandleVariableCommunicationStepSize = 'false'
self.canInterpolateInputs = 'false'
self.maxOutputDerivativeOrder = '0'
self.canRunAsynchronuously = 'false'
class Unit:
def __init__(self):
self.kg = 0
self.m = 0
self.s = 0
self.A = 0
self.K = 0
self.mol = 0
self.cd = 0
self.rad = 0
self.factor = 0
self.offset = 0
self.displayUnit = dict()
def update(self, member, value):
if value is not None:
if member in ['factor', 'offset']:
setattr(self, member, float(value))
else:
setattr(self, member, int(value))
class DisplayUnit:
def __init__(self):
self.factor = 1.0
self.offset = 0.0
def update(self, member, value):
if value is not None:
setattr(self, member, float(value))
class DependencyStructure:
def __init__(self, index=None, dependencies=None, dependenciesKind=None):
self.index = index
self.dependencies = dependencies
self.dependenciesKind = dependenciesKind
class ModelStructure:
def __init__(self):
self.outputs = []
self.derivatives = []
self.initialUnknowns = []
class FMIDescription:
''' This object holds the description of an Functional Mock-up Interface for Model Exchange
It parses an XML-file description as defined by FMI Version 2.0
The model description (FMI) is usually part of a Functional Mock-Up Unit (FMU)
'''
def __init__(self, xmlFile):
''' Create FMIDescription from XML-file
@param xmlFile: File object of the describing XML-Document
'''
''' initialization of variables and more visible public interface '''
self.me = None
self.cs = None
self.units = {}
self.types = {}
self.logCategories = {}
self.defaultStartTime = None
self.defaultStopTime = None
self.defaultTolerance = None
self.defaultStepSize = None
self.vendorAnnotations = []
self.scalarVariables = collections.OrderedDict()
self.modelStructure = None
self.fmiVersion = None
self.modelName = None
self.guid = None
self.description = None
self.author = None
self.version = None
self.copyright = None
self.license = None
self.generationTool = None
self.generationDateAndTime = None
self.variableNamingConvention = 'flat'
self.numberOfEventIndicators = None
if xmlFile is None:
return
''' Parse the file '''
try:
_document = etree.parse(xmlFile)
except BaseException as e:
print 'Error when parsing FMU\'s xml-file. Error: ', e
raise FMUError.FMUError('Error when parsing FMU\'s xml-file.\n' + str(e) + '\n')
_docroot = _document.getroot()
if _docroot.tag != 'fmiModelDescription':
raise FMUError.FMUError('XML-File type not recognized!\n')
''' Parse the global FMI Model Description Attributes '''
self.fmiVersion = _docroot.get('fmiVersion')
self.modelName = _docroot.get('modelName')
self.guid = _docroot.get('guid')
self.description = _docroot.get('description')
self.author = _docroot.get('author')
self.version = _docroot.get('version')
self.copyright = _docroot.get('copyright')
self.license = _docroot.get('license')
self.generationTool = _docroot.get('generationTool')
self.generationDateAndTime = _docroot.get('generationDateAndTime')
self.variableNamingConvention = _docroot.get('variableNamingConvention')
self.numberOfEventIndicators = _docroot.get('numberOfEventIndicators')
''' Child nodes are each parsed by their own subroutine '''
for child in _docroot:
if child.tag == 'ModelExchange':
self._parseModelExchange(child)
elif child.tag == 'CoSimulation':
self._parseCoSimulation(child)
elif child.tag == 'UnitDefinitions':
self._parseUnitDefinitions(child)
elif child.tag == 'TypeDefinitions':
self._parseTypeDefinitions(child)
elif child.tag == 'LogCategories':
self._parseLogCategories(child)
elif child.tag == 'DefaultExperiment':
self._parseDefaultExperiment(child)
elif child.tag == 'VendorAnnotations':
self._parseVendorAnnotations(child)
elif child.tag == 'ModelVariables':
self._parseModelVariables(child)
elif child.tag == 'ModelStructure':
self._parseModelStructure(child)
else:
print('Unknown tag in FMI Model: %s\n' % child.tag)
''' Update type values in scalar variables - use defaults from simple type definitions '''
for var in self.scalarVariables.itervalues():
if var.type.declaredType is not None:
var.type.updateDefaults(self.types[var.type.declaredType])
self.numberOfContinuousStates = len(self.modelStructure.derivatives) if self.modelStructure is not None else 0
def _parseMEandCS(self, root, output):
output.modelIdentifier = getAttribute(root, 'modelIdentifier')
output.needsExecutionTool = defaultNone(getAttribute(root, 'needsExecutionTool'), 'false')
output.canBeInstantiatedOnlyOncePerProcess = defaultNone(getAttribute(root, 'canBeInstantiatedOnlyOncePerProcess'), 'false')
output.canNotUseMemoryManagementFunctions = defaultNone(getAttribute(root,'canNotUseMemoryManagementFunctions'), 'false')
output.canGetAndSetFMUstate = defaultNone(getAttribute(root,'canGetAndSetFMUstate'), 'false')
output.canSerializeFMUstate = defaultNone(getAttribute(root,'canSerializeFMUstate'), 'false')
output.providesDirectionalDerivative = defaultNone(getAttribute(root,'providesDirectionalDerivative'), 'false')
output.sourceFile = []
children = root._children
for child in children:
if child.tag == 'SourceFiles':
allFiles = child._children
for x in allFiles:
output.sourceFile.append(x.get('name'))
else:
print('Unknown tag in FMI model: %s\n' % child.tag)
def _parseModelExchange(self, root):
self.me = ModelExchange()
self._parseMEandCS(root, self.me)
self.me.completedIntegratorStepNotNeeded = defaultNone(getAttribute(root,'completedIntegratorStepNotNeeded'), 'false')
def _parseCoSimulation(self, root):
self.cs = CoSimulation()
self._parseMEandCS(root, self.cs)
self.cs.canHandleVariableCommunicationStepSize = defaultNone(getAttribute(root,'canHandleVariableCommunicationStepSize'), 'false')
self.cs.canInterpolateInputs = defaultNone(getAttribute(root,'canInterpolateInputs'), 'false')
self.cs.maxOutputDerivativeOrder = defaultNone(getAttribute(root,'maxOutputDerivativeOrder'), '0')
self.cs.canRunAsynchronuously = defaultNone(getAttribute(root,'canRunAsynchronuously'), 'false')
def _parseUnitDefinitions(self, root):
''' Parse Unit definitions.
@param root: ElemenTree element holding unit definitions
'''
for unit in root:
if unit.tag != 'Unit':
print('Unknown tag in unit definitions of FMI Model: %s\n' % unit.tag)
else:
unitName = unit.get('name')
self.units[unitName] = Unit()
children = unit._children
for child in children:
if child.tag == 'BaseUnit':
self.units[unitName].update('kg', child.get('kg'))
self.units[unitName].update('m', child.get('m'))
self.units[unitName].update('s', child.get('s'))
self.units[unitName].update('A', child.get('A'))
self.units[unitName].update('K', child.get('K'))
self.units[unitName].update('mol', child.get('mol'))
self.units[unitName].update('cd', child.get('cd'))
self.units[unitName].update('rad', child.get('rad'))
self.units[unitName].update('factor', child.get('factor'))
self.units[unitName].update('offset', child.get('offset'))
elif child.tag == 'DisplayUnit':
dUnitName = child.get('name')
self.units[unitName].displayUnit[dUnitName] = DisplayUnit()
self.units[unitName].displayUnit[dUnitName].update('factor', child.get('factor'))
self.units[unitName].displayUnit[dUnitName].update('offset', child.get('offset'))
else:
print('Unknown tag in unit definitions of FMI Model: %s\n' % child.tag)
def _parseTypeDefinitions(self, root):
''' Parse Type descriptions.
@type root: ElemenTree element holding type definitions
'''
''' Most functionality has be encapsulated in FMIType for Scalar Variables use a similar definition of types.
According to standard, type has one and only one child. It can therefore be accessed safely by type[0]
'''
for x in root:
if x.tag != 'SimpleType':
''' The current FMI definition only knows type SimpleType '''
raise FMUError.FMUError('TypeDefinitions defining non-type.\n')
if len(x) != 1:
raise FMUError.FMUError('Bad type description for: ' + x + '\n')
self.types[x.get('name')] = SimpleType(x[0])
def _parseLogCategories(self, root):
for child in root:
if child.tag == 'Category':
self.logCategories[child.get('name')] = child.get('description')
else:
print('Unknown tag in logCategories for FMI model: %s\n' % child.tag)
def _parseDefaultExperiment(self, child):
self.defaultStartTime = child.get('startTime')
self.defaultStopTime = child.get('stopTime')
self.defaultTolerance = child.get('tolerance')
self.defaultStepSize = child.get('stepSize')
def _parseVendorAnnotations(self, root):
# Only the tool names are read
for child in root:
if child.tag == 'Tool':
self.vendorAnnotations.append(child.get('name'))
else:
print('Unknown tag in VendorAnnotations for FMI model: %s\n' % child.tag)
def _parseModelVariables(self, root):
''' Parse Model Variables
@type root: ElemenTree element holding Model Variable definitions
'''
''' See documentation for: '_parseTypes' '''
for scalar in root:
if scalar.tag != 'ScalarVariable':
''' The current FMI definition only knows scalar values '''
raise FMUError.FMUError('ModelVariables definition unknown.\n')
annotations = []
for x in scalar:
if x.tag == 'Annotations':
# Only name of tools are read
for y in x:
annotations.append(y.get('name'))
else:
scalarVariableType = ScalarVariableType(x)
scalarName = scalar.get('name')
reference = scalar.get('valueReference')
description = scalar.get('description')
causality = defaultNone(scalar.get('causality'), 'local')
variability = defaultNone(scalar.get('variability'), 'continuous')
initial = scalar.get('initial')
canHandleMultipleSetPerTimeInstant = scalar.get('canHandleMultipleSetPerTimeInstant')
annotations = annotations
self.scalarVariables[scalarName] = FMIScalarVariable(scalarVariableType, reference, description, causality, variability, initial, canHandleMultipleSetPerTimeInstant, annotations)
def _parseModelStructure(self, root):
self.modelStructure = ModelStructure()
for child in root:
if child.tag == 'Outputs':
for x in child:
if x.tag == 'Unknown':
self.modelStructure.outputs.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind')))
else:
print('Unknown tag in ModelStructure for FMI model: %s\n' % x.tag)
elif child.tag == 'Derivatives':
for x in child:
if x.tag == 'Unknown':
self.modelStructure.derivatives.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind')))
else:
print('Unknown tag in ModelStructure for FMI model: %s\n' % x.tag)
elif child.tag == 'InitialUnknowns':
for x in child:
if x.tag == 'Unknown':
self.modelStructure.initialUnknowns.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind')))
else:
print('Unknown tag in ModelStructure for FMI model: %s\n' % x.tag)
else:
print('Unknown tag in ModelStructure for FMI model: %s\n' % child.tag)
if __name__ == '__main__':
''' This is for testing and development only! '''
''' Read FMI description file (directly from zip-file)'''
fmi = FMIDescription(open('d:/modelDescription_Rectifier.xml'))
print "Attributes"
print "*************"
print fmi.fmiVersion
print fmi.guid
print fmi.generationTool
print "Units"
print "*************"
print fmi.units.keys()
print fmi.units['K'].K, fmi.units['K'].A
print fmi.units['K'].displayUnit['degC'].factor, fmi.units['K'].displayUnit['degC'].offset
print "Types"
print "*************"
print fmi.types.keys()
print fmi.types['Modelica.SIunits.Voltage'].basicType
print fmi.types['Modelica.SIunits.Voltage'].description
print "Vendor Annotations"
print "*************"
print fmi.vendorAnnotations
print "ScalarVariables"
print "***************"
print fmi.scalarVariables.keys()
print fmi.scalarVariables['Capacitor1.p.v'].type
print fmi.scalarVariables['Capacitor1.p.v'].type.unit
print fmi.scalarVariables['Capacitor1.p.v'].valueReference
print fmi.scalarVariables['Capacitor1.p.v'].variability
print "ModelStructure"
print "***************"
print fmi.modelStructure.outputs[0].index
print fmi.modelStructure.outputs[0].dependencies
print fmi.modelStructure.outputs[0].dependenciesKind
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"'''\n",
"Copyright (C) 2011-2015 German Aerospace Center DLR\n",
"(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),\n",
"Institute of System Dynamics and Control\n",
"All rights reserved.\n",
"\n",
"This file is licensed under the \"BSD New\" license\n",
"(see also http://opensource.org/licenses/BSD-3-Clause):\n",
"\n",
"Redistribution and use in source and binary forms, with or without modification,\n",
"are permitted provided that the following conditions are met:\n",
" - Redistributions of source code must retain the above copyright notice,\n",
" this list of conditions and the following disclaimer.\n",
" - Redistributions in binary form must reproduce the above copyright notice,\n",
" this list of conditions and the following disclaimer in the documentation\n",
" and/or other materials provided with the distribution.\n",
" - Neither the name of the German Aerospace Center nor the names of its contributors\n",
" may be used to endorse or promote products derived from this software\n",
" without specific prior written permission.\n",
"\n",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n",
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n",
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n",
"IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n",
"INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n",
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n",
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n",
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n",
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n",
"EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n",
"'''\n",
"\n",
"\n",
"import collections\n",
"\n",
"import FMUError\n",
"import xml.etree.ElementTree as etree\n",
"\n",
"\n",
"def defaultNone(x, default):\n",
" if x is None:\n",
" return default\n",
" else:\n",
" return x\n",
" \n",
"def getAttribute(root, attr):\n",
" a = root.get(attr)\n",
" if a is None:\n",
" pass\n",
" return a\n",
"\n",
"class EnumerationItem:\n",
" def __init__(self):\n",
" self.value = None\n",
" self.description = None\n",
"\n",
"\n",
"\n",
"class SimpleType:\n",
" ''' Class for description of simple data types in FMI\n",
" Some values are optional, them not being defined is signaled by a value of None\n",
" '''\n",
" def __init__(self, x):\n",
" ''' Populate data from x\n",
" @type x: ElemenTree element holding a type description\n",
" '''\n",
" self.basicType = str(x.tag)\n",
" self.description = x.get('description')\n",
" self.quantity = x.get('quantity')\n",
" self.unit = x.get('unit')\n",
" self.displayUnit = x.get('displayUnit')\n",
" self.relativeQuantity = defaultNone(x.get('relativeQuantity'), 'false')\n",
" self.min = x.get('min')\n",
" self.max = x.get('max')\n",
" self.nominal = x.get('nominal') \n",
" self.unbounded = defaultNone(x.get('unbounded'), 'false')\n",
" self.item = dict()\n",
" for item in x.findall('Item'):\n",
" name = item.get('name')\n",
" self.item[name] = EnumerationItem()\n",
" self.item[name].value = item.get('value')\n",
" self.item[name].description = item.get('description')\n",
" \n",
" \n",
" \n",
"\n",
"class ScalarVariableType(SimpleType):\n",
" ''' Class for description of data types in FMI scalar variables\n",
" Some values are optional, them not being defined is signaled by a value of None\n",
" '''\n",
" def __init__(self, x):\n",
" ''' Populate data from x\n",
" @type x: ElemenTree element holding a type description\n",
" ''' \n",
" SimpleType.__init__(self, x)\n",
" self.declaredType = x.get('declaredType') \n",
" self.start = x.get('start')\n",
" self.derivative = x.get('derivative')\n",
" self.reinit = defaultNone(x.get('reinit'), 'false')\n",
" \n",
" \n",
" def updateDefaults(self, defaults):\n",
" ''' Update some elements of the class by default values given in a SimpleType class\n",
" @type defaults: SimpleType class holding a type description\n",
" ''' \n",
" if defaults.quantity is not None:\n",
" self.quantity = defaults.quantity\n",
" if defaults.unit is not None:\n",
" self.unit = defaults.unit\n",
" if defaults.displayUnit is not None:\n",
" self.displayUnit = defaults.displayUnit\n",
" if defaults.relativeQuantity is not None:\n",
" self.relativeQuantity = defaults.relativeQuantity\n",
" if defaults.min is not None:\n",
" self.min = defaults.min\n",
" if defaults.max is not None:\n",
" self.max = defaults.max\n",
" if defaults.nominal is not None:\n",
" self.nominal = defaults.nominal\n",
" if defaults.unbounded is not None:\n",
" self.unbounded = defaults.unbounded\n",
" \n",
"\n",
"class FMIScalarVariable:\n",
" ''' Class for description of Scalar Variables\n",
" '''\n",
" def __init__(self, scalarVariableType=None, reference=None, description=None, causality=None, variability=None, initial=None, canHandleMultipleSetPerTimeInstant=None, annotations=None):\n",
" self.type = scalarVariableType\n",
" self.valueReference = reference\n",
" self.description = description\n",
" self.causality = causality\n",
" self.variability = variability \n",
" self.initial = initial\n",
" self.canHandleMultipleSetPerTimeInstant = canHandleMultipleSetPerTimeInstant\n",
" self.annotations = annotations\n",
"\n",
"\n",
"class FMITypeAttributes:\n",
" def __init__(self):\n",
" self.modelIdentifier = None\n",
" self.needsExecutionTool = 'false' \n",
" self.canBeInstantiatedOnlyOncePerProcess = 'false'\n",
" self.canNotUseMemoryManagementFunctions = 'false'\n",
" self.canGetAndSetFMUstate = 'false'\n",
" self.canSerializeFMUstate = 'false'\n",
" self.providesDirectionalDerivative = 'false' \n",
" self.sourceFile = []\n",
"\n",
"class ModelExchange(FMITypeAttributes):\n",
" def __init__(self):\n",
" FMITypeAttributes.__init__(self)\n",
" self. completedIntegratorStepNotNeeded = 'false' \n",
"\n",
" \n",
"class CoSimulation(FMITypeAttributes):\n",
" def __init__(self):\n",
" FMITypeAttributes.__init__(self)\n",
" self.canHandleVariableCommunicationStepSize = 'false'\n",
" self.canInterpolateInputs = 'false'\n",
" self.maxOutputDerivativeOrder = '0'\n",
" self.canRunAsynchronuously = 'false' \n",
" \n",
"\n",
"class Unit:\n",
" def __init__(self):\n",
" self.kg = 0\n",
" self.m = 0\n",
" self.s = 0\n",
" self.A = 0\n",
" self.K = 0\n",
" self.mol = 0\n",
" self.cd = 0\n",
" self.rad = 0\n",
" self.factor = 0\n",
" self.offset = 0 \n",
" self.displayUnit = dict() \n",
" def update(self, member, value):\n",
" if value is not None:\n",
" if member in ['factor', 'offset']:\n",
" setattr(self, member, float(value))\n",
" else:\n",
" setattr(self, member, int(value))\n",
" \n",
"class DisplayUnit:\n",
" def __init__(self):\n",
" self.factor = 1.0\n",
" self.offset = 0.0\n",
" def update(self, member, value):\n",
" if value is not None:\n",
" setattr(self, member, float(value))\n",
" \n",
" \n",
"class DependencyStructure:\n",
" def __init__(self, index=None, dependencies=None, dependenciesKind=None):\n",
" self.index = index\n",
" self.dependencies = dependencies\n",
" self.dependenciesKind = dependenciesKind\n",
" \n",
"class ModelStructure:\n",
" def __init__(self):\n",
" self.outputs = []\n",
" self.derivatives = []\n",
" self.initialUnknowns = []\n",
"\n",
"\n",
"class FMIDescription:\n",
" ''' This object holds the description of an Functional Mock-up Interface for Model Exchange\n",
" It parses an XML-file description as defined by FMI Version 2.0\n",
" The model description (FMI) is usually part of a Functional Mock-Up Unit (FMU)\n",
" '''\n",
" def __init__(self, xmlFile):\n",
" ''' Create FMIDescription from XML-file\n",
" @param xmlFile: File object of the describing XML-Document\n",
" '''\n",
"\n",
" ''' initialization of variables and more visible public interface '''\n",
" self.me = None\n",
" self.cs = None \n",
" self.units = {}\n",
" self.types = {}\n",
" self.logCategories = {}\n",
" self.defaultStartTime = None\n",
" self.defaultStopTime = None\n",
" self.defaultTolerance = None\n",
" self.defaultStepSize = None\n",
" self.vendorAnnotations = []\n",
" self.scalarVariables = collections.OrderedDict()\n",
" self.modelStructure = None \n",
" \n",
" \n",
" self.fmiVersion = None\n",
" self.modelName = None \n",
" self.guid = None\n",
" self.description = None\n",
" self.author = None\n",
" self.version = None\n",
" self.copyright = None\n",
" self.license = None\n",
" self.generationTool = None\n",
" self.generationDateAndTime = None\n",
" self.variableNamingConvention = 'flat' \n",
" self.numberOfEventIndicators = None\n",
" \n",
" \n",
" if xmlFile is None:\n",
" return\n",
"\n",
" ''' Parse the file '''\n",
" try:\n",
" _document = etree.parse(xmlFile)\n",
" except BaseException as e:\n",
" print 'Error when parsing FMU\\'s xml-file. Error: ', e\n",
" raise FMUError.FMUError('Error when parsing FMU\\'s xml-file.\\n' + str(e) + '\\n')\n",
" _docroot = _document.getroot()\n",
" if _docroot.tag != 'fmiModelDescription':\n",
" raise FMUError.FMUError('XML-File type not recognized!\\n') \n",
" \n",
" ''' Parse the global FMI Model Description Attributes '''\n",
" self.fmiVersion = _docroot.get('fmiVersion')\n",
" self.modelName = _docroot.get('modelName') \n",
" self.guid = _docroot.get('guid')\n",
" self.description = _docroot.get('description')\n",
" self.author = _docroot.get('author')\n",
" self.version = _docroot.get('version')\n",
" self.copyright = _docroot.get('copyright')\n",
" self.license = _docroot.get('license') \n",
" self.generationTool = _docroot.get('generationTool')\n",
" self.generationDateAndTime = _docroot.get('generationDateAndTime')\n",
" self.variableNamingConvention = _docroot.get('variableNamingConvention') \n",
" self.numberOfEventIndicators = _docroot.get('numberOfEventIndicators')\n",
" \n",
" \n",
" ''' Child nodes are each parsed by their own subroutine '''\n",
" for child in _docroot:\n",
" if child.tag == 'ModelExchange':\n",
" self._parseModelExchange(child)\n",
" elif child.tag == 'CoSimulation':\n",
" self._parseCoSimulation(child) \n",
" elif child.tag == 'UnitDefinitions':\n",
" self._parseUnitDefinitions(child)\n",
" elif child.tag == 'TypeDefinitions':\n",
" self._parseTypeDefinitions(child)\n",
" elif child.tag == 'LogCategories':\n",
" self._parseLogCategories(child)\n",
" elif child.tag == 'DefaultExperiment':\n",
" self._parseDefaultExperiment(child) \n",
" elif child.tag == 'VendorAnnotations':\n",
" self._parseVendorAnnotations(child) \n",
" elif child.tag == 'ModelVariables':\n",
" self._parseModelVariables(child)\n",
" elif child.tag == 'ModelStructure':\n",
" self._parseModelStructure(child)\n",
" else:\n",
" print('Unknown tag in FMI Model: %s\\n' % child.tag)\n",
" \n",
" ''' Update type values in scalar variables - use defaults from simple type definitions '''\n",
" for var in self.scalarVariables.itervalues():\n",
" if var.type.declaredType is not None:\n",
" var.type.updateDefaults(self.types[var.type.declaredType])\n",
" \n",
" self.numberOfContinuousStates = len(self.modelStructure.derivatives) if self.modelStructure is not None else 0 \n",
"\n",
" def _parseMEandCS(self, root, output): \n",
" output.modelIdentifier = getAttribute(root, 'modelIdentifier')\n",
" output.needsExecutionTool = defaultNone(getAttribute(root, 'needsExecutionTool'), 'false')\n",
" output.canBeInstantiatedOnlyOncePerProcess = defaultNone(getAttribute(root, 'canBeInstantiatedOnlyOncePerProcess'), 'false')\n",
" output.canNotUseMemoryManagementFunctions = defaultNone(getAttribute(root,'canNotUseMemoryManagementFunctions'), 'false')\n",
" output.canGetAndSetFMUstate = defaultNone(getAttribute(root,'canGetAndSetFMUstate'), 'false')\n",
" output.canSerializeFMUstate = defaultNone(getAttribute(root,'canSerializeFMUstate'), 'false')\n",
" output.providesDirectionalDerivative = defaultNone(getAttribute(root,'providesDirectionalDerivative'), 'false')\n",
" \n",
" output.sourceFile = []\n",
" children = root._children\n",
" for child in children:\n",
" if child.tag == 'SourceFiles':\n",
" allFiles = child._children\n",
" for x in allFiles: \n",
" output.sourceFile.append(x.get('name'))\n",
" else:\n",
" print('Unknown tag in FMI model: %s\\n' % child.tag)\n",
" \n",
" def _parseModelExchange(self, root):\n",
" self.me = ModelExchange()\n",
" self._parseMEandCS(root, self.me)\n",
" self.me.completedIntegratorStepNotNeeded = defaultNone(getAttribute(root,'completedIntegratorStepNotNeeded'), 'false')\n",
" \n",
" def _parseCoSimulation(self, root):\n",
" self.cs = CoSimulation()\n",
" self._parseMEandCS(root, self.cs) \n",
" self.cs.canHandleVariableCommunicationStepSize = defaultNone(getAttribute(root,'canHandleVariableCommunicationStepSize'), 'false')\n",
" self.cs.canInterpolateInputs = defaultNone(getAttribute(root,'canInterpolateInputs'), 'false')\n",
" self.cs.maxOutputDerivativeOrder = defaultNone(getAttribute(root,'maxOutputDerivativeOrder'), '0')\n",
" self.cs.canRunAsynchronuously = defaultNone(getAttribute(root,'canRunAsynchronuously'), 'false')\n",
"\n",
" def _parseUnitDefinitions(self, root):\n",
" ''' Parse Unit definitions.\n",
" @param root: ElemenTree element holding unit definitions\n",
" '''\n",
" for unit in root:\n",
" if unit.tag != 'Unit':\n",
" print('Unknown tag in unit definitions of FMI Model: %s\\n' % unit.tag)\n",
" else:\n",
" unitName = unit.get('name')\n",
" self.units[unitName] = Unit() \n",
" children = unit._children\n",
" for child in children:\n",
" if child.tag == 'BaseUnit':\n",
" self.units[unitName].update('kg', child.get('kg'))\n",
" self.units[unitName].update('m', child.get('m'))\n",
" self.units[unitName].update('s', child.get('s'))\n",
" self.units[unitName].update('A', child.get('A'))\n",
" self.units[unitName].update('K', child.get('K'))\n",
" self.units[unitName].update('mol', child.get('mol'))\n",
" self.units[unitName].update('cd', child.get('cd'))\n",
" self.units[unitName].update('rad', child.get('rad')) \n",
" self.units[unitName].update('factor', child.get('factor')) \n",
" self.units[unitName].update('offset', child.get('offset')) \n",
" elif child.tag == 'DisplayUnit':\n",
" dUnitName = child.get('name')\n",
" self.units[unitName].displayUnit[dUnitName] = DisplayUnit()\n",
" self.units[unitName].displayUnit[dUnitName].update('factor', child.get('factor'))\n",
" self.units[unitName].displayUnit[dUnitName].update('offset', child.get('offset')) \n",
" else:\n",
" print('Unknown tag in unit definitions of FMI Model: %s\\n' % child.tag) \n",
" \n",
" \n",
" \n",
" def _parseTypeDefinitions(self, root):\n",
" ''' Parse Type descriptions.\n",
" @type root: ElemenTree element holding type definitions\n",
" '''\n",
" ''' Most functionality has be encapsulated in FMIType for Scalar Variables use a similar definition of types.\n",
" According to standard, type has one and only one child. It can therefore be accessed safely by type[0]\n",
" '''\n",
" for x in root:\n",
" if x.tag != 'SimpleType':\n",
" ''' The current FMI definition only knows type SimpleType '''\n",
" raise FMUError.FMUError('TypeDefinitions defining non-type.\\n')\n",
" if len(x) != 1:\n",
" raise FMUError.FMUError('Bad type description for: ' + x + '\\n')\n",
" self.types[x.get('name')] = SimpleType(x[0]) \n",
" \n",
" \n",
" def _parseLogCategories(self, root):\n",
" for child in root:\n",
" if child.tag == 'Category':\n",
" self.logCategories[child.get('name')] = child.get('description')\n",
" else:\n",
" print('Unknown tag in logCategories for FMI model: %s\\n' % child.tag)\n",
" \n",
" \n",
" def _parseDefaultExperiment(self, child):\n",
" self.defaultStartTime = child.get('startTime')\n",
" self.defaultStopTime = child.get('stopTime')\n",
" self.defaultTolerance = child.get('tolerance')\n",
" self.defaultStepSize = child.get('stepSize')\n",
" \n",
" def _parseVendorAnnotations(self, root):\n",
" # Only the tool names are read\n",
" for child in root:\n",
" if child.tag == 'Tool':\n",
" self.vendorAnnotations.append(child.get('name'))\n",
" else:\n",
" print('Unknown tag in VendorAnnotations for FMI model: %s\\n' % child.tag)\n",
" \n",
" def _parseModelVariables(self, root):\n",
" ''' Parse Model Variables\n",
" @type root: ElemenTree element holding Model Variable definitions\n",
" '''\n",
" ''' See documentation for: '_parseTypes' '''\n",
" for scalar in root:\n",
" if scalar.tag != 'ScalarVariable':\n",
" ''' The current FMI definition only knows scalar values '''\n",
" raise FMUError.FMUError('ModelVariables definition unknown.\\n')\n",
" annotations = []\n",
" for x in scalar:\n",
" if x.tag == 'Annotations':\n",
" # Only name of tools are read\n",
" for y in x:\n",
" annotations.append(y.get('name')) \n",
" else:\n",
" scalarVariableType = ScalarVariableType(x) \n",
" \n",
" scalarName = scalar.get('name')\n",
" reference = scalar.get('valueReference')\n",
" description = scalar.get('description')\n",
" causality = defaultNone(scalar.get('causality'), 'local')\n",
" variability = defaultNone(scalar.get('variability'), 'continuous') \n",
" initial = scalar.get('initial')\n",
" canHandleMultipleSetPerTimeInstant = scalar.get('canHandleMultipleSetPerTimeInstant') \n",
" annotations = annotations\n",
" self.scalarVariables[scalarName] = FMIScalarVariable(scalarVariableType, reference, description, causality, variability, initial, canHandleMultipleSetPerTimeInstant, annotations)\n",
"\n",
" \n",
" def _parseModelStructure(self, root):\n",
" self.modelStructure = ModelStructure() \n",
" for child in root:\n",
" if child.tag == 'Outputs':\n",
" for x in child:\n",
" if x.tag == 'Unknown':\n",
" self.modelStructure.outputs.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind'))) \n",
" else:\n",
" print('Unknown tag in ModelStructure for FMI model: %s\\n' % x.tag) \n",
" \n",
" elif child.tag == 'Derivatives':\n",
" for x in child:\n",
" if x.tag == 'Unknown':\n",
" self.modelStructure.derivatives.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind'))) \n",
" else:\n",
" print('Unknown tag in ModelStructure for FMI model: %s\\n' % x.tag)\n",
" \n",
" elif child.tag == 'InitialUnknowns':\n",
" for x in child:\n",
" if x.tag == 'Unknown':\n",
" self.modelStructure.initialUnknowns.append(DependencyStructure(x.get('index'), x.get('dependencies'), x.get('dependenciesKind'))) \n",
" else:\n",
" print('Unknown tag in ModelStructure for FMI model: %s\\n' % x.tag) \n",
" else:\n",
" print('Unknown tag in ModelStructure for FMI model: %s\\n' % child.tag)\n",
" \n",
" \n",
" \n",
" \n",
" \n",
" \n",
"\n",
" \n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" ''' This is for testing and development only! '''\n",
"\n",
" ''' Read FMI description file (directly from zip-file)''' \n",
" fmi = FMIDescription(open('d:/modelDescription_Rectifier.xml'))\n",
"\n",
" print \"Attributes\"\n",
" print \"*************\"\n",
" print fmi.fmiVersion\n",
" print fmi.guid\n",
" print fmi.generationTool\n",
"\n",
" print \"Units\"\n",
" print \"*************\"\n",
" print fmi.units.keys()\n",
" print fmi.units['K'].K, fmi.units['K'].A\n",
" print fmi.units['K'].displayUnit['degC'].factor, fmi.units['K'].displayUnit['degC'].offset\n",
" \n",
"\n",
" print \"Types\"\n",
" print \"*************\"\n",
" print fmi.types.keys()\n",
" print fmi.types['Modelica.SIunits.Voltage'].basicType\n",
" print fmi.types['Modelica.SIunits.Voltage'].description\n",
"\n",
" print \"Vendor Annotations\"\n",
" print \"*************\"\n",
" print fmi.vendorAnnotations\n",
"\n",
" print \"ScalarVariables\"\n",
" print \"***************\"\n",
" print fmi.scalarVariables.keys()\n",
" print fmi.scalarVariables['Capacitor1.p.v'].type\n",
" print fmi.scalarVariables['Capacitor1.p.v'].type.unit\n",
" print fmi.scalarVariables['Capacitor1.p.v'].valueReference\n",
" print fmi.scalarVariables['Capacitor1.p.v'].variability\n",
" \n",
" \n",
" print \"ModelStructure\"\n",
" print \"***************\"\n",
" print fmi.modelStructure.outputs[0].index\n",
" print fmi.modelStructure.outputs[0].dependencies \n",
" print fmi.modelStructure.outputs[0].dependenciesKind\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0.010526315789473684,
0.012345679012345678,
0,
0,
0.012048192771084338,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.03333333333333333,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.1111111111111111,
0.2,
0,
0.02631578947368421,
0,
0.011363636363636364,
0,
0,
0,
0,
0.03571428571428571,
0,
0.017543859649122806,
0,
0,
0,
0.1111111111111111,
0.1111111111111111,
0.025,
0.010869565217391304,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.005263157894736842,
0,
0,
0,
0,
0.02127659574468085,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0,
0.01639344262295082,
0,
0,
0.025,
0,
0,
0.015384615384615385,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0.023809523809523808,
0.02702702702702703,
0,
0,
0,
0,
0,
0.1111111111111111,
0.05263157894736842,
0,
0,
0,
0.02702702702702703,
0,
0,
0.14285714285714285,
0.14285714285714285,
0,
0,
0,
0,
0,
0.09090909090909091,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.1111111111111111,
0.1111111111111111,
0.03225806451612903,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0.1111111111111111,
0.1111111111111111,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0.012658227848101266,
0.1111111111111111,
0,
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0.021739130434782608,
0,
0.14285714285714285,
0.1111111111111111,
0.014705882352941176,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.010101010101010102,
0,
0,
0,
0.058823529411764705,
0.016666666666666666,
0,
0.0196078431372549,
0,
0.010101010101010102,
0.007518796992481203,
0.015384615384615385,
0.0196078431372549,
0.0196078431372549,
0.016666666666666666,
0.1111111111111111,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.015748031496062992,
0.2,
0,
0,
0.02,
0.014388489208633094,
0.019417475728155338,
0.018691588785046728,
0.01904761904761905,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0.018691588785046728,
0.018691588785046728,
0,
0.01818181818181818,
0.011904761904761904,
0.009433962264150943,
0.015384615384615385,
0,
0.01904761904761905,
0.07692307692307693,
0.07692307692307693,
0.2,
0.023255813953488372,
0,
0,
0,
0.00847457627118644,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.01639344262295082,
0.2,
0.2,
0.024390243902439025,
0,
0,
0.012345679012345678,
0,
0.011627906976744186,
0.058823529411764705,
0.2,
0.021739130434782608,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0.013333333333333334,
0.07692307692307693,
0,
0,
0,
0,
0.01098901098901099,
0,
0.01818181818181818,
0,
0.005235602094240838,
0,
0.2,
0.023809523809523808,
0.01818181818181818,
0,
0,
0,
0,
0.011764705882352941,
0,
0.017391304347826087,
0.058823529411764705,
0,
0,
0,
0.011494252873563218,
0,
0.01098901098901099,
0.058823529411764705,
0,
0,
0,
0.011235955056179775,
0,
0.018691588785046728,
0,
0.011494252873563218,
0.058823529411764705,
0.058823529411764705,
0.058823529411764705,
0.058823529411764705,
0.2,
0.16666666666666666,
0,
0.2,
0,
0,
0.037037037037037035,
0,
0,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0.3333333333333333,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0.2,
0.037037037037037035,
0,
0,
0.018518518518518517,
0
] | 515 | 0.016403 | false |
# coding: UTF-8
import socket
import urllib2
import traceback
import MySQLdb
import time
from bs4 import BeautifulSoup
from complainDetail import *
timeout = 10
socket.setdefaulttimeout(timeout)
def getPageNum():
pageNum = 0
tryNum = 3
tn = 0
while tn < tryNum:
try:
f = urllib2.urlopen("http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-1.shtml")
content = f.read()
soup = BeautifulSoup(content)
divList = soup.find_all(attrs={'class':'p_page'})[0]
aList = divList.find_all('a')
lastPage = aList[-1]['href']
pageNum = lastPage.split('.')[0].split('-')[-1]
break
except Exception,e:
print e
tn = tn + 1
print uri, " access error!"
print "try ", tn, "time"
time.sleep(5)
if tn==tryNum:
print "get Page Num Error!"
return -1
return int(pageNum)
def getMaxNumOfComplain():
conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db="autocomplain")
cur=conn.cursor()
sql = "select max(num) from complainList"
cur.execute(sql)
conn.commit();
num = cur.fetchone()
#print 1
if num[0]:
return int(num[0])
else:
return 0
def exitsInDb(num):
conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db="autocomplain")
cur=conn.cursor()
sql = "select num from complainList where num=%s"%num
cur.execute(sql)
conn.commit();
num = cur.fetchone()
if num:
print "exits"
return 1 # exits
else:
return 0 # not exits
def insertDb(table, vlist):
conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db="autocomplain", charset="utf8")
cur=conn.cursor()
num=int(vlist[0])
brand=vlist[1]
family=vlist[2]
version=vlist[3]
abstract=vlist[4]
detailUrl=vlist[5]
failure=vlist[6]
published=vlist[7]
status=vlist[8]
sql = "insert into " + table + "(num,brand,family,version,abstract,detailUrl,failure,published,status,collectTime) values " + "('%s','%s','%s','%s','%s','%s','%s','%s','%s', now())"%(num,brand,family,version,abstract,detailUrl,failure,published,status)
#cur.execute("set names utf8")
#cur.execute(sql.encode("gbk"))
cur.execute(sql)
conn.commit()
def getFailureTye(str, failureType):
indexList = str.split(",")
typeStr = {}
baseIndex = ord('A')
for i in indexList:
if i != '':
alpha = i[0]
num = i[1:]
for j in failureType[ord(alpha)-baseIndex]['items']:
if j['id'] == int(num):
typeStr[failureType[ord(alpha)-baseIndex]['name'].decode("gbk")] = j['title'].decode("gbk")
break
return typeStr
def fetchFailureTye(uri):
try:
f = urllib2.urlopen(uri)
content = f.read()
rawType = content.split("=")[1]
failureType = eval(rawType)
except Exception,e:
print uri, " access error!"
return -1
return failureType
def fetchComplainList(uri):
tryNum = 3
tn = 0
while tn < tryNum:
try:
f = urllib2.urlopen(uri)
content = f.read()
soup = BeautifulSoup(content)
trList = soup.table.find_all('tr')
for i in range(1,len(trList)):
typeDict = {}
vlist = []
tdList = trList[i].find_all('td')
num = tdList[0].string
if exitsInDb(num) == 0:
#continue
vlist.append(num) # num
vlist.append(tdList[1].string) # brand
vlist.append(tdList[2].string) # family
vlist.append(tdList[3].string) # version
vlist.append(tdList[4].string) # abstract
vlist.append("http://www.12365auto.com%s"%tdList[4].a['href']) # detailUrl
fetchDetail(int(tdList[0].string),"http://www.12365auto.com%s"%tdList[4].a['href'])
flist = []
if tdList[5].string != None:
typeDict = getFailureTye(tdList[5].string, failureType)
for i in typeDict.items():
flist.append(':'.join(i))
if tdList[2]['fw'] != '':
flist.append(tdList[2]['fw'])
vlist.append(','.join(flist)) #
vlist.append(tdList[6].string) # published
vlist.append(tdList[7].em.string) # status
insertDb("complainList", vlist)
else:
continue
break
except Exception,e:
print e
tn = tn + 1
print uri, " access error!"
print "try ", tn, "time"
time.sleep(5)
if tn==tryNum:
print "Cannot fetch page!"
return -1
return 0
if __name__ == "__main__":
failureTypeUri = "http://www.12365auto.com/js/cTypeInfo.js"
failureType = fetchFailureTye(failureTypeUri)
if failureType == -1: exit(-1)
pageNum = getPageNum()
pageBegin = 1
if pageNum == -1: exit(-2)
print "total page:", pageNum
while pageBegin < pageNum:
for p in range(pageBegin, pageNum+1):
print "page:", p
complainListUri = "http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-%s.shtml"%p
fetchComplainList(complainListUri)
pageBegin = pageNum
pageNum = getPageNum()
if pageNum == -1: exit(-2)
print "Auto Complain Collection Updated"
| [
"# coding: UTF-8 \n",
"import socket\n",
"import urllib2\n",
"import traceback\n",
"import MySQLdb\n",
"import time\n",
"from bs4 import BeautifulSoup\n",
"from complainDetail import *\n",
"\n",
"timeout = 10\n",
"socket.setdefaulttimeout(timeout)\n",
"\n",
"def getPageNum():\n",
" pageNum = 0\n",
" tryNum = 3\n",
" tn = 0\n",
" while tn < tryNum:\n",
" try:\n",
" f = urllib2.urlopen(\"http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-1.shtml\")\n",
" content = f.read() \n",
" soup = BeautifulSoup(content)\n",
" divList = soup.find_all(attrs={'class':'p_page'})[0]\n",
" aList = divList.find_all('a')\n",
" lastPage = aList[-1]['href']\n",
" pageNum = lastPage.split('.')[0].split('-')[-1]\n",
" break\n",
" except Exception,e:\n",
" print e\n",
" tn = tn + 1\n",
" print uri, \" access error!\"\n",
" print \"try \", tn, \"time\"\n",
" time.sleep(5) \n",
" if tn==tryNum:\n",
" print \"get Page Num Error!\"\n",
" return -1\n",
" return int(pageNum)\n",
"\n",
"def getMaxNumOfComplain():\n",
" conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db=\"autocomplain\")\n",
" cur=conn.cursor()\n",
" sql = \"select max(num) from complainList\" \n",
" cur.execute(sql)\n",
" conn.commit();\n",
" num = cur.fetchone()\n",
" #print 1\n",
" if num[0]: \n",
" return int(num[0])\n",
" else:\n",
" return 0\n",
" \n",
"def exitsInDb(num):\n",
" conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db=\"autocomplain\")\n",
" cur=conn.cursor()\n",
" sql = \"select num from complainList where num=%s\"%num \n",
" cur.execute(sql)\n",
" conn.commit();\n",
" num = cur.fetchone() \n",
" if num: \n",
" print \"exits\" \n",
" return 1 # exits\n",
" else: \n",
" return 0 # not exits\n",
"\n",
"def insertDb(table, vlist):\n",
" conn=MySQLdb.connect(host='localhost',user='root',passwd='fnst1234',port=3306, db=\"autocomplain\", charset=\"utf8\")\n",
" cur=conn.cursor()\n",
" num=int(vlist[0])\n",
" brand=vlist[1]\t\n",
" family=vlist[2]\n",
" version=vlist[3]\n",
" abstract=vlist[4]\n",
" detailUrl=vlist[5]\n",
" failure=vlist[6]\n",
" published=vlist[7]\n",
" status=vlist[8]\t\n",
" sql = \"insert into \" + table + \"(num,brand,family,version,abstract,detailUrl,failure,published,status,collectTime) values \" + \"('%s','%s','%s','%s','%s','%s','%s','%s','%s', now())\"%(num,brand,family,version,abstract,detailUrl,failure,published,status) \n",
" #cur.execute(\"set names utf8\") \n",
" #cur.execute(sql.encode(\"gbk\"))\n",
" cur.execute(sql) \n",
" conn.commit()\n",
"\t \n",
"def getFailureTye(str, failureType):\n",
"\tindexList = str.split(\",\")\t\n",
"\ttypeStr = {}\n",
"\tbaseIndex = ord('A')\n",
"\tfor i in indexList:\n",
"\t\tif i != '':\n",
"\t\t\talpha = i[0]\n",
"\t\t\tnum = i[1:]\t\t\t\n",
"\t\t\tfor j in failureType[ord(alpha)-baseIndex]['items']:\n",
"\t\t\t\tif j['id'] == int(num):\n",
"\t\t\t\t\ttypeStr[failureType[ord(alpha)-baseIndex]['name'].decode(\"gbk\")] = j['title'].decode(\"gbk\")\n",
"\t\t\t\t\tbreak\t\t\t\n",
"\treturn typeStr\n",
"\n",
"def fetchFailureTye(uri):\n",
"\ttry:\n",
"\t f = urllib2.urlopen(uri)\n",
"\t content = f.read() \n",
"\t rawType = content.split(\"=\")[1]\n",
"\t failureType = eval(rawType) \n",
"\texcept Exception,e:\n",
"\t\tprint uri, \" access error!\"\n",
"\t\treturn -1\t\n",
"\treturn failureType\n",
"\n",
"\n",
"def fetchComplainList(uri):\n",
" tryNum = 3\n",
" tn = 0\n",
" while tn < tryNum:\n",
" try:\n",
" f = urllib2.urlopen(uri)\n",
" content = f.read()\n",
" soup = BeautifulSoup(content)\n",
" trList = soup.table.find_all('tr') \n",
" for i in range(1,len(trList)):\n",
" typeDict = {}\n",
" vlist = []\n",
" tdList = trList[i].find_all('td')\n",
" num = tdList[0].string \n",
" if exitsInDb(num) == 0: \n",
" #continue \n",
" vlist.append(num) # num \n",
" vlist.append(tdList[1].string) # brand \n",
" vlist.append(tdList[2].string) # family \n",
" vlist.append(tdList[3].string) # version \n",
" vlist.append(tdList[4].string) # abstract\n",
" vlist.append(\"http://www.12365auto.com%s\"%tdList[4].a['href']) # detailUrl \n",
" fetchDetail(int(tdList[0].string),\"http://www.12365auto.com%s\"%tdList[4].a['href']) \n",
" flist = []\n",
" if tdList[5].string != None:\n",
" typeDict = getFailureTye(tdList[5].string, failureType)\t\t\t\n",
" for i in typeDict.items():\t\t\t\t\n",
" flist.append(':'.join(i)) \n",
" if tdList[2]['fw'] != '':\t\t\t\t\n",
" flist.append(tdList[2]['fw']) \n",
" vlist.append(','.join(flist)) # \n",
" vlist.append(tdList[6].string) # published \n",
" vlist.append(tdList[7].em.string) # status \n",
" insertDb(\"complainList\", vlist)\n",
" else:\n",
" continue\n",
" break\n",
" except Exception,e:\n",
" print e\n",
" tn = tn + 1\n",
" print uri, \" access error!\"\n",
" print \"try \", tn, \"time\"\n",
" time.sleep(5) \n",
" if tn==tryNum:\n",
" print \"Cannot fetch page!\"\n",
" return -1\n",
" return 0\n",
"\n",
"if __name__ == \"__main__\":\n",
" failureTypeUri = \"http://www.12365auto.com/js/cTypeInfo.js\"\n",
" failureType = fetchFailureTye(failureTypeUri)\n",
" if failureType == -1: exit(-1)\t\n",
" pageNum = getPageNum()\n",
" pageBegin = 1 \n",
" if pageNum == -1: exit(-2)\n",
" print \"total page:\", pageNum \n",
" while pageBegin < pageNum:\n",
" for p in range(pageBegin, pageNum+1):\n",
" print \"page:\", p\n",
" complainListUri = \"http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-%s.shtml\"%p\n",
" fetchComplainList(complainListUri)\n",
" pageBegin = pageNum\n",
" pageNum = getPageNum()\n",
" if pageNum == -1: exit(-2) \n",
" print \"Auto Complain Collection Updated\"\n"
] | [
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0.011235955056179775,
0.03125,
0,
0.015384615384615385,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0.03333333333333333,
0.05263157894736842,
0,
0,
0,
0,
0.037037037037037035,
0.049019607843137254,
0.045454545454545456,
0.02,
0,
0.05263157894736842,
0,
0.07692307692307693,
0.043478260869565216,
0,
0,
0,
0.5,
0.05,
0.049019607843137254,
0.045454545454545456,
0.03225806451612903,
0,
0.05263157894736842,
0.02702702702702703,
0.07692307692307693,
0.038461538461538464,
0.04,
0.09090909090909091,
0.034482758620689655,
0,
0.03571428571428571,
0.0423728813559322,
0.045454545454545456,
0.045454545454545456,
0.1,
0.05,
0.047619047619047616,
0.045454545454545456,
0.043478260869565216,
0.047619047619047616,
0.043478260869565216,
0.09523809523809523,
0.04247104247104247,
0.05263157894736842,
0.027777777777777776,
0.04,
0,
1,
0.02702702702702703,
0.06896551724137931,
0.07142857142857142,
0.045454545454545456,
0.047619047619047616,
0.07142857142857142,
0.0625,
0.1111111111111111,
0.017857142857142856,
0.03571428571428571,
0.020618556701030927,
0.14285714285714285,
0.0625,
0,
0.038461538461538464,
0.16666666666666666,
0.06666666666666667,
0.12,
0.05405405405405406,
0.08108108108108109,
0.09523809523809523,
0.03333333333333333,
0.15384615384615385,
0.05,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0.023255813953488372,
0,
0,
0,
0.03571428571428571,
0.016666666666666666,
0.043478260869565216,
0.03571428571428571,
0.029411764705882353,
0.028985507246376812,
0.028169014084507043,
0.016129032258064516,
0.037383177570093455,
0.034782608695652174,
0,
0.02040816326530612,
0.012048192771084338,
0.0196078431372549,
0.016129032258064516,
0.02,
0.015151515151515152,
0.031746031746031744,
0.0273972602739726,
0.013333333333333334,
0,
0,
0,
0,
0.03571428571428571,
0,
0.07692307692307693,
0,
0,
0.02631578947368421,
0.05263157894736842,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0.05555555555555555,
0,
0.045454545454545456,
0.03225806451612903,
0.02702702702702703,
0,
0,
0,
0.02247191011235955,
0,
0,
0,
0.047619047619047616,
0
] | 172 | 0.035882 | false |
# This file is part of Tautulli.
#
# Tautulli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tautulli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.
import httpagentparser
import time
import plexpy
import common
import database
import datatables
import helpers
import libraries
import logger
import plextv
import session
def refresh_users():
logger.info(u"Tautulli Users :: Requesting users list refresh...")
result = plextv.PlexTV().get_full_users_list()
if result:
monitor_db = database.MonitorDatabase()
for item in result:
if item.get('shared_libraries'):
item['shared_libraries'] = ';'.join(item['shared_libraries'])
elif item.get('server_token'):
libs = libraries.Libraries().get_sections()
item['shared_libraries'] = ';'.join([str(l['section_id']) for l in libs])
keys_dict = {"user_id": item.pop('user_id')}
# Check if we've set a custom avatar if so don't overwrite it.
if keys_dict['user_id']:
avatar_urls = monitor_db.select('SELECT thumb, custom_avatar_url '
'FROM users WHERE user_id = ?',
[keys_dict['user_id']])
if avatar_urls:
if not avatar_urls[0]['custom_avatar_url'] or \
avatar_urls[0]['custom_avatar_url'] == avatar_urls[0]['thumb']:
item['custom_avatar_url'] = item['thumb']
else:
item['custom_avatar_url'] = item['thumb']
monitor_db.upsert('users', item, keys_dict)
logger.info(u"Tautulli Users :: Users list refreshed.")
return True
else:
logger.warn(u"Tautulli Users :: Unable to refresh users list.")
return False
class Users(object):
def __init__(self):
pass
def get_datatables_list(self, kwargs=None, grouping=None):
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
data_tables = datatables.DataTables()
custom_where = [['users.deleted_user', 0]]
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
if session.get_session_user_id():
custom_where.append(['users.user_id', session.get_session_user_id()])
if kwargs.get('user_id'):
custom_where.append(['users.user_id', kwargs.get('user_id')])
group_by = 'session_history.reference_id' if grouping else 'session_history.id'
columns = ['users.user_id',
'(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
THEN users.username ELSE users.friendly_name END) AS friendly_name',
'users.thumb AS user_thumb',
'users.custom_avatar_url AS custom_thumb',
'COUNT(DISTINCT %s) AS plays' % group_by,
'SUM(CASE WHEN session_history.stopped > 0 THEN (session_history.stopped - session_history.started) \
ELSE 0 END) - SUM(CASE WHEN session_history.paused_counter IS NULL THEN 0 ELSE \
session_history.paused_counter END) AS duration',
'MAX(session_history.started) AS last_seen',
'MAX(session_history.id) AS id',
'session_history_metadata.full_title AS last_played',
'session_history.ip_address',
'session_history.platform',
'session_history.player',
'session_history.rating_key',
'session_history_metadata.media_type',
'session_history_metadata.thumb',
'session_history_metadata.parent_thumb',
'session_history_metadata.grandparent_thumb',
'session_history_metadata.parent_title',
'session_history_metadata.year',
'session_history_metadata.media_index',
'session_history_metadata.parent_media_index',
'session_history_metadata.live',
'session_history_metadata.added_at',
'session_history_metadata.originally_available_at',
'session_history_metadata.guid',
'session_history_media_info.transcode_decision',
'users.do_notify as do_notify',
'users.keep_history as keep_history',
'users.allow_guest as allow_guest'
]
try:
query = data_tables.ssp_query(table_name='users',
columns=columns,
custom_where=custom_where,
group_by=['users.user_id'],
join_types=['LEFT OUTER JOIN',
'LEFT OUTER JOIN',
'LEFT OUTER JOIN'],
join_tables=['session_history',
'session_history_metadata',
'session_history_media_info'],
join_evals=[['session_history.user_id', 'users.user_id'],
['session_history.id', 'session_history_metadata.id'],
['session_history.id', 'session_history_media_info.id']],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_list: %s." % e)
return default_return
users = query['result']
rows = []
for item in users:
if item['media_type'] == 'episode' and item['parent_thumb']:
thumb = item['parent_thumb']
elif item['media_type'] == 'episode':
thumb = item['grandparent_thumb']
else:
thumb = item['thumb']
if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:
user_thumb = item['custom_thumb']
elif item['user_thumb']:
user_thumb = item['user_thumb']
else:
user_thumb = common.DEFAULT_USER_THUMB
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
row = {'user_id': item['user_id'],
'friendly_name': item['friendly_name'],
'user_thumb': user_thumb,
'plays': item['plays'],
'duration': item['duration'],
'last_seen': item['last_seen'],
'last_played': item['last_played'],
'id': item['id'],
'ip_address': item['ip_address'],
'platform': platform,
'player': item['player'],
'rating_key': item['rating_key'],
'media_type': item['media_type'],
'thumb': thumb,
'parent_title': item['parent_title'],
'year': item['year'],
'media_index': item['media_index'],
'parent_media_index': item['parent_media_index'],
'live': item['live'],
'originally_available_at': item['originally_available_at'],
'guid': item['guid'],
'transcode_decision': item['transcode_decision'],
'do_notify': helpers.checked(item['do_notify']),
'keep_history': helpers.checked(item['keep_history']),
'allow_guest': helpers.checked(item['allow_guest'])
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': session.friendly_name_to_username(rows),
'draw': query['draw']
}
return dict
def get_datatables_unique_ips(self, user_id=None, kwargs=None):
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
if not session.allow_session_user(user_id):
return default_return
data_tables = datatables.DataTables()
custom_where = ['users.user_id', user_id]
columns = ['session_history.id',
'MAX(session_history.started) AS last_seen',
'session_history.ip_address',
'COUNT(session_history.id) AS play_count',
'session_history.platform',
'session_history.player',
'session_history.rating_key',
'session_history_metadata.full_title AS last_played',
'session_history_metadata.thumb',
'session_history_metadata.parent_thumb',
'session_history_metadata.grandparent_thumb',
'session_history_metadata.media_type',
'session_history_metadata.parent_title',
'session_history_metadata.year',
'session_history_metadata.media_index',
'session_history_metadata.parent_media_index',
'session_history_metadata.live',
'session_history_metadata.added_at',
'session_history_metadata.originally_available_at',
'session_history_metadata.guid',
'session_history_media_info.transcode_decision',
'session_history.user',
'session_history.user_id as custom_user_id',
'(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
THEN users.username ELSE users.friendly_name END) AS friendly_name'
]
try:
query = data_tables.ssp_query(table_name='session_history',
columns=columns,
custom_where=[custom_where],
group_by=['ip_address'],
join_types=['JOIN',
'JOIN',
'JOIN'],
join_tables=['users',
'session_history_metadata',
'session_history_media_info'],
join_evals=[['session_history.user_id', 'users.user_id'],
['session_history.id', 'session_history_metadata.id'],
['session_history.id', 'session_history_media_info.id']],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_unique_ips: %s." % e)
return default_return
results = query['result']
rows = []
for item in results:
if item["media_type"] == 'episode' and item["parent_thumb"]:
thumb = item["parent_thumb"]
elif item["media_type"] == 'episode':
thumb = item["grandparent_thumb"]
else:
thumb = item["thumb"]
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item["platform"], item["platform"])
row = {'id': item['id'],
'last_seen': item['last_seen'],
'ip_address': item['ip_address'],
'play_count': item['play_count'],
'platform': platform,
'player': item['player'],
'last_played': item['last_played'],
'rating_key': item['rating_key'],
'thumb': thumb,
'media_type': item['media_type'],
'parent_title': item['parent_title'],
'year': item['year'],
'media_index': item['media_index'],
'parent_media_index': item['parent_media_index'],
'live': item['live'],
'originally_available_at': item['originally_available_at'],
'guid': item['guid'],
'transcode_decision': item['transcode_decision'],
'friendly_name': item['friendly_name'],
'user_id': item['custom_user_id']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': session.friendly_name_to_username(rows),
'draw': query['draw']
}
return dict
def set_config(self, user_id=None, friendly_name='', custom_thumb='', do_notify=1, keep_history=1, allow_guest=1):
if str(user_id).isdigit():
monitor_db = database.MonitorDatabase()
key_dict = {'user_id': user_id}
value_dict = {'friendly_name': friendly_name,
'custom_avatar_url': custom_thumb,
'do_notify': do_notify,
'keep_history': keep_history,
'allow_guest': allow_guest
}
try:
monitor_db.upsert('users', value_dict, key_dict)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for set_config: %s." % e)
def get_details(self, user_id=None, user=None, email=None):
default_return = {'user_id': 0,
'username': 'Local',
'friendly_name': 'Local',
'user_thumb': common.DEFAULT_USER_THUMB,
'email': '',
'is_admin': '',
'is_home_user': 0,
'is_allow_sync': 0,
'is_restricted': 0,
'do_notify': 0,
'keep_history': 1,
'allow_guest': 0,
'deleted_user': 0,
'shared_libraries': ()
}
if user_id is None and not user and not email:
return default_return
def get_user_details(user_id=user_id, user=user, email=email):
monitor_db = database.MonitorDatabase()
try:
if str(user_id).isdigit():
query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \
'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \
'allow_guest, shared_libraries ' \
'FROM users ' \
'WHERE user_id = ? '
result = monitor_db.select(query, args=[user_id])
elif user:
query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \
'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \
'allow_guest, shared_libraries ' \
'FROM users ' \
'WHERE username = ? COLLATE NOCASE '
result = monitor_db.select(query, args=[user])
elif email:
query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \
'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \
'allow_guest, shared_libraries ' \
'FROM users ' \
'WHERE email = ? COLLATE NOCASE '
result = monitor_db.select(query, args=[email])
else:
result = []
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_details: %s." % e)
result = []
user_details = {}
if result:
for item in result:
if session.get_session_user_id():
friendly_name = session.get_session_user()
elif item['friendly_name']:
friendly_name = item['friendly_name']
else:
friendly_name = item['username']
if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:
user_thumb = item['custom_thumb']
elif item['user_thumb']:
user_thumb = item['user_thumb']
else:
user_thumb = common.DEFAULT_USER_THUMB
shared_libraries = tuple(item['shared_libraries'].split(';')) if item['shared_libraries'] else ()
user_details = {'user_id': item['user_id'],
'username': item['username'],
'friendly_name': friendly_name,
'user_thumb': user_thumb,
'email': item['email'],
'is_admin': item['is_admin'],
'is_home_user': item['is_home_user'],
'is_allow_sync': item['is_allow_sync'],
'is_restricted': item['is_restricted'],
'do_notify': item['do_notify'],
'keep_history': item['keep_history'],
'deleted_user': item['deleted_user'],
'allow_guest': item['allow_guest'],
'shared_libraries': shared_libraries
}
return user_details
user_details = get_user_details(user_id=user_id, user=user)
if user_details:
return user_details
else:
logger.warn(u"Tautulli Users :: Unable to retrieve user %s from database. Requesting user list refresh."
% user_id if user_id else user)
# Let's first refresh the user list to make sure the user isn't newly added and not in the db yet
refresh_users()
user_details = get_user_details(user_id=user_id, user=user)
if user_details:
return user_details
else:
logger.warn(u"Tautulli Users :: Unable to retrieve user %s from database. Returning 'Local' user."
% user_id if user_id else user)
# If there is no user data we must return something
# Use "Local" user to retain compatibility with PlexWatch database value
return default_return
def get_watch_time_stats(self, user_id=None, grouping=None):
if not session.allow_session_user(user_id):
return []
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
monitor_db = database.MonitorDatabase()
time_queries = [1, 7, 30, 0]
user_watch_time_stats = []
group_by = 'reference_id' if grouping else 'id'
for days in time_queries:
try:
if days > 0:
if str(user_id).isdigit():
query = 'SELECT (SUM(stopped - started) - ' \
' SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END)) AS total_time, ' \
'COUNT(DISTINCT %s) AS total_plays ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= datetime("now", "-%s days", "localtime") ' \
'AND user_id = ? ' % (group_by, days)
result = monitor_db.select(query, args=[user_id])
else:
result = []
else:
if str(user_id).isdigit():
query = 'SELECT (SUM(stopped - started) - ' \
' SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END)) AS total_time, ' \
'COUNT(DISTINCT %s) AS total_plays ' \
'FROM session_history ' \
'WHERE user_id = ? ' % group_by
result = monitor_db.select(query, args=[user_id])
else:
result = []
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_watch_time_stats: %s." % e)
result = []
for item in result:
if item['total_time']:
total_time = item['total_time']
total_plays = item['total_plays']
else:
total_time = 0
total_plays = 0
row = {'query_days': days,
'total_time': total_time,
'total_plays': total_plays
}
user_watch_time_stats.append(row)
return user_watch_time_stats
def get_player_stats(self, user_id=None, grouping=None):
if not session.allow_session_user(user_id):
return []
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
monitor_db = database.MonitorDatabase()
player_stats = []
result_id = 0
group_by = 'reference_id' if grouping else 'id'
try:
if str(user_id).isdigit():
query = 'SELECT player, COUNT(DISTINCT %s) as player_count, platform ' \
'FROM session_history ' \
'WHERE user_id = ? ' \
'GROUP BY player ' \
'ORDER BY player_count DESC' % group_by
result = monitor_db.select(query, args=[user_id])
else:
result = []
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_player_stats: %s." % e)
result = []
for item in result:
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
platform_name = next((v for k, v in common.PLATFORM_NAMES.iteritems() if k in platform.lower()), 'default')
row = {'player_name': item['player'],
'platform': platform,
'platform_name': platform_name,
'total_plays': item['player_count'],
'result_id': result_id
}
player_stats.append(row)
result_id += 1
return player_stats
def get_recently_watched(self, user_id=None, limit='10'):
if not session.allow_session_user(user_id):
return []
monitor_db = database.MonitorDatabase()
recently_watched = []
if not limit.isdigit():
limit = '10'
try:
if str(user_id).isdigit():
query = 'SELECT session_history.id, session_history.media_type, guid, ' \
'session_history.rating_key, session_history.parent_rating_key, session_history.grandparent_rating_key, ' \
'title, parent_title, grandparent_title, original_title, ' \
'thumb, parent_thumb, grandparent_thumb, media_index, parent_media_index, ' \
'year, originally_available_at, added_at, live, started, user ' \
'FROM session_history_metadata ' \
'JOIN session_history ON session_history_metadata.id = session_history.id ' \
'WHERE user_id = ? ' \
'GROUP BY (CASE WHEN session_history.media_type = "track" THEN session_history.parent_rating_key ' \
' ELSE session_history.rating_key END) ' \
'ORDER BY started DESC LIMIT ?'
result = monitor_db.select(query, args=[user_id, limit])
else:
result = []
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_recently_watched: %s." % e)
result = []
for row in result:
if row['media_type'] == 'episode' and row['parent_thumb']:
thumb = row['parent_thumb']
elif row['media_type'] == 'episode':
thumb = row['grandparent_thumb']
else:
thumb = row['thumb']
recent_output = {'row_id': row['id'],
'media_type': row['media_type'],
'rating_key': row['rating_key'],
'parent_rating_key': row['parent_rating_key'],
'grandparent_rating_key': row['grandparent_rating_key'],
'title': row['title'],
'parent_title': row['parent_title'],
'grandparent_title': row['grandparent_title'],
'original_title': row['original_title'],
'thumb': thumb,
'media_index': row['media_index'],
'parent_media_index': row['parent_media_index'],
'year': row['year'],
'originally_available_at': row['originally_available_at'],
'live': row['live'],
'guid': row['guid'],
'time': row['started'],
'user': row['user']
}
recently_watched.append(recent_output)
return recently_watched
def get_users(self):
monitor_db = database.MonitorDatabase()
try:
query = 'SELECT user_id, username, friendly_name, thumb, custom_avatar_url, email, ' \
'is_admin, is_home_user, is_allow_sync, is_restricted, ' \
'do_notify, keep_history, allow_guest, server_token, shared_libraries, ' \
'filter_all, filter_movies, filter_tv, filter_music, filter_photos ' \
'FROM users WHERE deleted_user = 0'
result = monitor_db.select(query=query)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_users: %s." % e)
return None
users = []
for item in result:
user = {'user_id': item['user_id'],
'username': item['username'],
'friendly_name': item['friendly_name'] or item['username'],
'thumb': item['custom_avatar_url'] or item['thumb'],
'email': item['email'],
'is_admin': item['is_admin'],
'is_home_user': item['is_home_user'],
'is_allow_sync': item['is_allow_sync'],
'is_restricted': item['is_restricted'],
'do_notify': item['do_notify'],
'keep_history': item['keep_history'],
'allow_guest': item['allow_guest'],
'server_token': item['server_token'],
'shared_libraries': item['shared_libraries'],
'filter_all': item['filter_all'],
'filter_movies': item['filter_movies'],
'filter_tv': item['filter_tv'],
'filter_music': item['filter_music'],
'filter_photos': item['filter_photos'],
}
users.append(user)
return users
def delete_all_history(self, user_id=None):
monitor_db = database.MonitorDatabase()
try:
if str(user_id).isdigit():
logger.info(u"Tautulli Users :: Deleting all history for user id %s from database." % user_id)
session_history_media_info_del = \
monitor_db.action('DELETE FROM '
'session_history_media_info '
'WHERE session_history_media_info.id IN (SELECT session_history_media_info.id '
'FROM session_history_media_info '
'JOIN session_history ON session_history_media_info.id = session_history.id '
'WHERE session_history.user_id = ?)', [user_id])
session_history_metadata_del = \
monitor_db.action('DELETE FROM '
'session_history_metadata '
'WHERE session_history_metadata.id IN (SELECT session_history_metadata.id '
'FROM session_history_metadata '
'JOIN session_history ON session_history_metadata.id = session_history.id '
'WHERE session_history.user_id = ?)', [user_id])
session_history_del = \
monitor_db.action('DELETE FROM '
'session_history '
'WHERE session_history.user_id = ?', [user_id])
return 'Deleted all items for user_id %s.' % user_id
else:
return 'Unable to delete items. Input user_id not valid.'
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for delete_all_history: %s." % e)
def delete(self, user_id=None):
monitor_db = database.MonitorDatabase()
try:
if str(user_id).isdigit():
self.delete_all_history(user_id)
logger.info(u"Tautulli Users :: Deleting user with id %s from database." % user_id)
monitor_db.action('UPDATE users SET deleted_user = 1 WHERE user_id = ?', [user_id])
monitor_db.action('UPDATE users SET keep_history = 0 WHERE user_id = ?', [user_id])
monitor_db.action('UPDATE users SET do_notify = 0 WHERE user_id = ?', [user_id])
return 'Deleted user with id %s.' % user_id
else:
return 'Unable to delete user, user_id not valid.'
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for delete: %s." % e)
def undelete(self, user_id=None, username=None):
monitor_db = database.MonitorDatabase()
try:
if user_id and str(user_id).isdigit():
query = 'SELECT * FROM users WHERE user_id = ?'
result = monitor_db.select(query=query, args=[user_id])
if result:
logger.info(u"Tautulli Users :: Re-adding user with id %s to database." % user_id)
monitor_db.action('UPDATE users SET deleted_user = 0 WHERE user_id = ?', [user_id])
monitor_db.action('UPDATE users SET keep_history = 1 WHERE user_id = ?', [user_id])
monitor_db.action('UPDATE users SET do_notify = 1 WHERE user_id = ?', [user_id])
return True
else:
return False
elif username:
query = 'SELECT * FROM users WHERE username = ?'
result = monitor_db.select(query=query, args=[username])
if result:
logger.info(u"Tautulli Users :: Re-adding user with username %s to database." % username)
monitor_db.action('UPDATE users SET deleted_user = 0 WHERE username = ?', [username])
monitor_db.action('UPDATE users SET keep_history = 1 WHERE username = ?', [username])
monitor_db.action('UPDATE users SET do_notify = 1 WHERE username = ?', [username])
return True
else:
return False
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for undelete: %s." % e)
# Keep method for PlexWatch/Plexivity import
def get_user_id(self, user=None):
if user:
try:
monitor_db = database.MonitorDatabase()
query = 'SELECT user_id FROM users WHERE username = ?'
result = monitor_db.select_single(query, args=[user])
if result:
return result['user_id']
else:
return None
except:
return None
return None
def get_user_names(self, kwargs=None):
monitor_db = database.MonitorDatabase()
user_cond = ''
if session.get_session_user_id():
user_cond = 'AND user_id = %s ' % session.get_session_user_id()
try:
query = 'SELECT user_id, ' \
'(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
THEN users.username ELSE users.friendly_name END) AS friendly_name ' \
'FROM users ' \
'WHERE deleted_user = 0 %s' % user_cond
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_user_names: %s." % e)
return None
return session.friendly_name_to_username(result)
def get_tokens(self, user_id=None):
if user_id:
try:
monitor_db = database.MonitorDatabase()
query = 'SELECT allow_guest, user_token, server_token FROM users ' \
'WHERE user_id = ? AND deleted_user = 0'
result = monitor_db.select_single(query, args=[user_id])
if result:
tokens = {'allow_guest': result['allow_guest'],
'user_token': result['user_token'],
'server_token': result['server_token']
}
return tokens
else:
return None
except:
return None
return None
def get_filters(self, user_id=None):
import urlparse
if not user_id:
return {}
try:
monitor_db = database.MonitorDatabase()
query = 'SELECT filter_all, filter_movies, filter_tv, filter_music, filter_photos FROM users ' \
'WHERE user_id = ?'
result = monitor_db.select_single(query, args=[user_id])
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_filters: %s." % e)
result = {}
filters_list = {}
for k, v in result.iteritems():
filters = {}
for f in v.split('|'):
if 'contentRating=' in f or 'label=' in f:
filters.update(dict(urlparse.parse_qsl(f)))
filters['content_rating'] = tuple(f for f in filters.pop('contentRating', '').split(',') if f)
filters['labels'] = tuple(f for f in filters.pop('label', '').split(',') if f)
filters_list[k] = filters
return filters_list
def set_user_login(self, user_id=None, user=None, user_group=None, ip_address=None, host=None, user_agent=None, success=0):
if user_id is None or str(user_id).isdigit():
monitor_db = database.MonitorDatabase()
keys = {'timestamp': int(time.time()),
'user_id': user_id}
values = {'user': user,
'user_group': user_group,
'ip_address': ip_address,
'host': host,
'user_agent': user_agent,
'success': success}
try:
monitor_db.upsert(table_name='user_login', key_dict=keys, value_dict=values)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for set_login_log: %s." % e)
def get_datatables_user_login(self, user_id=None, kwargs=None):
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
if not session.allow_session_user(user_id):
return default_return
data_tables = datatables.DataTables()
if session.get_session_user_id():
custom_where = [['user_login.user_id', session.get_session_user_id()]]
else:
custom_where = [['user_login.user_id', user_id]] if user_id else []
columns = ['user_login.timestamp',
'user_login.user_id',
'user_login.user',
'user_login.user_group',
'user_login.ip_address',
'user_login.host',
'user_login.user_agent',
'user_login.success',
'(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
THEN users.username ELSE users.friendly_name END) AS friendly_name'
]
try:
query = data_tables.ssp_query(table_name='user_login',
columns=columns,
custom_where=custom_where,
group_by=[],
join_types=['LEFT OUTER JOIN'],
join_tables=['users'],
join_evals=[['user_login.user_id', 'users.user_id']],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for get_datatables_user_login: %s." % e)
return default_return
results = query['result']
rows = []
for item in results:
(os, browser) = httpagentparser.simple_detect(item['user_agent'])
row = {'timestamp': item['timestamp'],
'user_id': item['user_id'],
'user_group': item['user_group'],
'ip_address': item['ip_address'],
'host': item['host'],
'user_agent': item['user_agent'],
'os': os,
'browser': browser,
'success': item['success'],
'friendly_name': item['friendly_name'] or item['user']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': session.friendly_name_to_username(rows),
'draw': query['draw']
}
return dict
def delete_login_log(self):
monitor_db = database.MonitorDatabase()
try:
logger.info(u"Tautulli Users :: Clearing login logs from database.")
monitor_db.action('DELETE FROM user_login')
monitor_db.action('VACUUM')
return True
except Exception as e:
logger.warn(u"Tautulli Users :: Unable to execute database query for delete_login_log: %s." % e)
return False | [
"# This file is part of Tautulli.\n",
"#\n",
"# Tautulli is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Tautulli is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"import httpagentparser\n",
"import time\n",
"\n",
"import plexpy\n",
"import common\n",
"import database\n",
"import datatables\n",
"import helpers\n",
"import libraries\n",
"import logger\n",
"import plextv\n",
"import session\n",
"\n",
"\n",
"def refresh_users():\n",
" logger.info(u\"Tautulli Users :: Requesting users list refresh...\")\n",
" result = plextv.PlexTV().get_full_users_list()\n",
"\n",
" if result:\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" for item in result:\n",
"\n",
" if item.get('shared_libraries'):\n",
" item['shared_libraries'] = ';'.join(item['shared_libraries'])\n",
" elif item.get('server_token'):\n",
" libs = libraries.Libraries().get_sections()\n",
" item['shared_libraries'] = ';'.join([str(l['section_id']) for l in libs])\n",
"\n",
" keys_dict = {\"user_id\": item.pop('user_id')}\n",
"\n",
" # Check if we've set a custom avatar if so don't overwrite it.\n",
" if keys_dict['user_id']:\n",
" avatar_urls = monitor_db.select('SELECT thumb, custom_avatar_url '\n",
" 'FROM users WHERE user_id = ?',\n",
" [keys_dict['user_id']])\n",
" if avatar_urls:\n",
" if not avatar_urls[0]['custom_avatar_url'] or \\\n",
" avatar_urls[0]['custom_avatar_url'] == avatar_urls[0]['thumb']:\n",
" item['custom_avatar_url'] = item['thumb']\n",
" else:\n",
" item['custom_avatar_url'] = item['thumb']\n",
"\n",
" monitor_db.upsert('users', item, keys_dict)\n",
"\n",
" logger.info(u\"Tautulli Users :: Users list refreshed.\")\n",
" return True\n",
" else:\n",
" logger.warn(u\"Tautulli Users :: Unable to refresh users list.\")\n",
" return False\n",
"\n",
"\n",
"class Users(object):\n",
"\n",
" def __init__(self):\n",
" pass\n",
"\n",
" def get_datatables_list(self, kwargs=None, grouping=None):\n",
" default_return = {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" data_tables = datatables.DataTables()\n",
"\n",
" custom_where = [['users.deleted_user', 0]]\n",
"\n",
" if grouping is None:\n",
" grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES\n",
"\n",
" if session.get_session_user_id():\n",
" custom_where.append(['users.user_id', session.get_session_user_id()])\n",
"\n",
" if kwargs.get('user_id'):\n",
" custom_where.append(['users.user_id', kwargs.get('user_id')])\n",
"\n",
" group_by = 'session_history.reference_id' if grouping else 'session_history.id'\n",
"\n",
" columns = ['users.user_id',\n",
" '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = \"\" \\\n",
" THEN users.username ELSE users.friendly_name END) AS friendly_name',\n",
" 'users.thumb AS user_thumb',\n",
" 'users.custom_avatar_url AS custom_thumb',\n",
" 'COUNT(DISTINCT %s) AS plays' % group_by,\n",
" 'SUM(CASE WHEN session_history.stopped > 0 THEN (session_history.stopped - session_history.started) \\\n",
" ELSE 0 END) - SUM(CASE WHEN session_history.paused_counter IS NULL THEN 0 ELSE \\\n",
" session_history.paused_counter END) AS duration',\n",
" 'MAX(session_history.started) AS last_seen',\n",
" 'MAX(session_history.id) AS id',\n",
" 'session_history_metadata.full_title AS last_played',\n",
" 'session_history.ip_address',\n",
" 'session_history.platform',\n",
" 'session_history.player',\n",
" 'session_history.rating_key',\n",
" 'session_history_metadata.media_type',\n",
" 'session_history_metadata.thumb',\n",
" 'session_history_metadata.parent_thumb',\n",
" 'session_history_metadata.grandparent_thumb',\n",
" 'session_history_metadata.parent_title',\n",
" 'session_history_metadata.year',\n",
" 'session_history_metadata.media_index',\n",
" 'session_history_metadata.parent_media_index',\n",
" 'session_history_metadata.live',\n",
" 'session_history_metadata.added_at',\n",
" 'session_history_metadata.originally_available_at',\n",
" 'session_history_metadata.guid',\n",
" 'session_history_media_info.transcode_decision',\n",
" 'users.do_notify as do_notify',\n",
" 'users.keep_history as keep_history',\n",
" 'users.allow_guest as allow_guest'\n",
" ]\n",
" try:\n",
" query = data_tables.ssp_query(table_name='users',\n",
" columns=columns,\n",
" custom_where=custom_where,\n",
" group_by=['users.user_id'],\n",
" join_types=['LEFT OUTER JOIN',\n",
" 'LEFT OUTER JOIN',\n",
" 'LEFT OUTER JOIN'],\n",
" join_tables=['session_history',\n",
" 'session_history_metadata',\n",
" 'session_history_media_info'],\n",
" join_evals=[['session_history.user_id', 'users.user_id'],\n",
" ['session_history.id', 'session_history_metadata.id'],\n",
" ['session_history.id', 'session_history_media_info.id']],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_list: %s.\" % e)\n",
" return default_return\n",
"\n",
" users = query['result']\n",
"\n",
" rows = []\n",
" for item in users:\n",
" if item['media_type'] == 'episode' and item['parent_thumb']:\n",
" thumb = item['parent_thumb']\n",
" elif item['media_type'] == 'episode':\n",
" thumb = item['grandparent_thumb']\n",
" else:\n",
" thumb = item['thumb']\n",
"\n",
" if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:\n",
" user_thumb = item['custom_thumb']\n",
" elif item['user_thumb']:\n",
" user_thumb = item['user_thumb']\n",
" else:\n",
" user_thumb = common.DEFAULT_USER_THUMB\n",
"\n",
" # Rename Mystery platform names\n",
" platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])\n",
"\n",
" row = {'user_id': item['user_id'],\n",
" 'friendly_name': item['friendly_name'],\n",
" 'user_thumb': user_thumb,\n",
" 'plays': item['plays'],\n",
" 'duration': item['duration'],\n",
" 'last_seen': item['last_seen'],\n",
" 'last_played': item['last_played'],\n",
" 'id': item['id'],\n",
" 'ip_address': item['ip_address'],\n",
" 'platform': platform,\n",
" 'player': item['player'],\n",
" 'rating_key': item['rating_key'],\n",
" 'media_type': item['media_type'],\n",
" 'thumb': thumb,\n",
" 'parent_title': item['parent_title'],\n",
" 'year': item['year'],\n",
" 'media_index': item['media_index'],\n",
" 'parent_media_index': item['parent_media_index'],\n",
" 'live': item['live'],\n",
" 'originally_available_at': item['originally_available_at'],\n",
" 'guid': item['guid'],\n",
" 'transcode_decision': item['transcode_decision'],\n",
" 'do_notify': helpers.checked(item['do_notify']),\n",
" 'keep_history': helpers.checked(item['keep_history']),\n",
" 'allow_guest': helpers.checked(item['allow_guest'])\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': session.friendly_name_to_username(rows),\n",
" 'draw': query['draw']\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def get_datatables_unique_ips(self, user_id=None, kwargs=None):\n",
" default_return = {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" if not session.allow_session_user(user_id):\n",
" return default_return\n",
"\n",
" data_tables = datatables.DataTables()\n",
"\n",
" custom_where = ['users.user_id', user_id]\n",
"\n",
" columns = ['session_history.id',\n",
" 'MAX(session_history.started) AS last_seen',\n",
" 'session_history.ip_address',\n",
" 'COUNT(session_history.id) AS play_count',\n",
" 'session_history.platform',\n",
" 'session_history.player',\n",
" 'session_history.rating_key',\n",
" 'session_history_metadata.full_title AS last_played',\n",
" 'session_history_metadata.thumb',\n",
" 'session_history_metadata.parent_thumb',\n",
" 'session_history_metadata.grandparent_thumb',\n",
" 'session_history_metadata.media_type',\n",
" 'session_history_metadata.parent_title',\n",
" 'session_history_metadata.year',\n",
" 'session_history_metadata.media_index',\n",
" 'session_history_metadata.parent_media_index',\n",
" 'session_history_metadata.live',\n",
" 'session_history_metadata.added_at',\n",
" 'session_history_metadata.originally_available_at',\n",
" 'session_history_metadata.guid',\n",
" 'session_history_media_info.transcode_decision',\n",
" 'session_history.user',\n",
" 'session_history.user_id as custom_user_id',\n",
" '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = \"\" \\\n",
" THEN users.username ELSE users.friendly_name END) AS friendly_name'\n",
" ]\n",
"\n",
" try:\n",
" query = data_tables.ssp_query(table_name='session_history',\n",
" columns=columns,\n",
" custom_where=[custom_where],\n",
" group_by=['ip_address'],\n",
" join_types=['JOIN',\n",
" 'JOIN',\n",
" 'JOIN'],\n",
" join_tables=['users',\n",
" 'session_history_metadata',\n",
" 'session_history_media_info'],\n",
" join_evals=[['session_history.user_id', 'users.user_id'],\n",
" ['session_history.id', 'session_history_metadata.id'],\n",
" ['session_history.id', 'session_history_media_info.id']],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_unique_ips: %s.\" % e)\n",
" return default_return\n",
"\n",
" results = query['result']\n",
"\n",
" rows = []\n",
" for item in results:\n",
" if item[\"media_type\"] == 'episode' and item[\"parent_thumb\"]:\n",
" thumb = item[\"parent_thumb\"]\n",
" elif item[\"media_type\"] == 'episode':\n",
" thumb = item[\"grandparent_thumb\"]\n",
" else:\n",
" thumb = item[\"thumb\"]\n",
"\n",
" # Rename Mystery platform names\n",
" platform = common.PLATFORM_NAME_OVERRIDES.get(item[\"platform\"], item[\"platform\"])\n",
"\n",
" row = {'id': item['id'],\n",
" 'last_seen': item['last_seen'],\n",
" 'ip_address': item['ip_address'],\n",
" 'play_count': item['play_count'],\n",
" 'platform': platform,\n",
" 'player': item['player'],\n",
" 'last_played': item['last_played'],\n",
" 'rating_key': item['rating_key'],\n",
" 'thumb': thumb,\n",
" 'media_type': item['media_type'],\n",
" 'parent_title': item['parent_title'],\n",
" 'year': item['year'],\n",
" 'media_index': item['media_index'],\n",
" 'parent_media_index': item['parent_media_index'],\n",
" 'live': item['live'],\n",
" 'originally_available_at': item['originally_available_at'],\n",
" 'guid': item['guid'],\n",
" 'transcode_decision': item['transcode_decision'],\n",
" 'friendly_name': item['friendly_name'],\n",
" 'user_id': item['custom_user_id']\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': session.friendly_name_to_username(rows),\n",
" 'draw': query['draw']\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def set_config(self, user_id=None, friendly_name='', custom_thumb='', do_notify=1, keep_history=1, allow_guest=1):\n",
" if str(user_id).isdigit():\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" key_dict = {'user_id': user_id}\n",
" value_dict = {'friendly_name': friendly_name,\n",
" 'custom_avatar_url': custom_thumb,\n",
" 'do_notify': do_notify,\n",
" 'keep_history': keep_history,\n",
" 'allow_guest': allow_guest\n",
" }\n",
" try:\n",
" monitor_db.upsert('users', value_dict, key_dict)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for set_config: %s.\" % e)\n",
"\n",
" def get_details(self, user_id=None, user=None, email=None):\n",
" default_return = {'user_id': 0,\n",
" 'username': 'Local',\n",
" 'friendly_name': 'Local',\n",
" 'user_thumb': common.DEFAULT_USER_THUMB,\n",
" 'email': '',\n",
" 'is_admin': '',\n",
" 'is_home_user': 0,\n",
" 'is_allow_sync': 0,\n",
" 'is_restricted': 0,\n",
" 'do_notify': 0,\n",
" 'keep_history': 1,\n",
" 'allow_guest': 0,\n",
" 'deleted_user': 0,\n",
" 'shared_libraries': ()\n",
" }\n",
"\n",
" if user_id is None and not user and not email:\n",
" return default_return\n",
"\n",
" def get_user_details(user_id=user_id, user=user, email=email):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" if str(user_id).isdigit():\n",
" query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \\\n",
" 'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \\\n",
" 'allow_guest, shared_libraries ' \\\n",
" 'FROM users ' \\\n",
" 'WHERE user_id = ? '\n",
" result = monitor_db.select(query, args=[user_id])\n",
" elif user:\n",
" query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \\\n",
" 'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \\\n",
" 'allow_guest, shared_libraries ' \\\n",
" 'FROM users ' \\\n",
" 'WHERE username = ? COLLATE NOCASE '\n",
" result = monitor_db.select(query, args=[user])\n",
" elif email:\n",
" query = 'SELECT user_id, username, friendly_name, thumb AS user_thumb, custom_avatar_url AS custom_thumb, ' \\\n",
" 'email, is_admin, is_home_user, is_allow_sync, is_restricted, do_notify, keep_history, deleted_user, ' \\\n",
" 'allow_guest, shared_libraries ' \\\n",
" 'FROM users ' \\\n",
" 'WHERE email = ? COLLATE NOCASE '\n",
" result = monitor_db.select(query, args=[email])\n",
" else:\n",
" result = []\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_details: %s.\" % e)\n",
" result = []\n",
"\n",
" user_details = {}\n",
" if result:\n",
" for item in result:\n",
" if session.get_session_user_id():\n",
" friendly_name = session.get_session_user()\n",
" elif item['friendly_name']:\n",
" friendly_name = item['friendly_name']\n",
" else:\n",
" friendly_name = item['username']\n",
"\n",
" if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:\n",
" user_thumb = item['custom_thumb']\n",
" elif item['user_thumb']:\n",
" user_thumb = item['user_thumb']\n",
" else:\n",
" user_thumb = common.DEFAULT_USER_THUMB\n",
"\n",
" shared_libraries = tuple(item['shared_libraries'].split(';')) if item['shared_libraries'] else ()\n",
"\n",
" user_details = {'user_id': item['user_id'],\n",
" 'username': item['username'],\n",
" 'friendly_name': friendly_name,\n",
" 'user_thumb': user_thumb,\n",
" 'email': item['email'],\n",
" 'is_admin': item['is_admin'],\n",
" 'is_home_user': item['is_home_user'],\n",
" 'is_allow_sync': item['is_allow_sync'],\n",
" 'is_restricted': item['is_restricted'],\n",
" 'do_notify': item['do_notify'],\n",
" 'keep_history': item['keep_history'],\n",
" 'deleted_user': item['deleted_user'],\n",
" 'allow_guest': item['allow_guest'],\n",
" 'shared_libraries': shared_libraries\n",
" }\n",
" return user_details\n",
"\n",
" user_details = get_user_details(user_id=user_id, user=user)\n",
"\n",
" if user_details:\n",
" return user_details\n",
"\n",
" else:\n",
" logger.warn(u\"Tautulli Users :: Unable to retrieve user %s from database. Requesting user list refresh.\"\n",
" % user_id if user_id else user)\n",
" # Let's first refresh the user list to make sure the user isn't newly added and not in the db yet\n",
" refresh_users()\n",
"\n",
" user_details = get_user_details(user_id=user_id, user=user)\n",
"\n",
" if user_details:\n",
" return user_details\n",
"\n",
" else:\n",
" logger.warn(u\"Tautulli Users :: Unable to retrieve user %s from database. Returning 'Local' user.\"\n",
" % user_id if user_id else user)\n",
" # If there is no user data we must return something\n",
" # Use \"Local\" user to retain compatibility with PlexWatch database value\n",
" return default_return\n",
"\n",
" def get_watch_time_stats(self, user_id=None, grouping=None):\n",
" if not session.allow_session_user(user_id):\n",
" return []\n",
"\n",
" if grouping is None:\n",
" grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES\n",
"\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" time_queries = [1, 7, 30, 0]\n",
" user_watch_time_stats = []\n",
"\n",
" group_by = 'reference_id' if grouping else 'id'\n",
"\n",
" for days in time_queries:\n",
" try:\n",
" if days > 0:\n",
" if str(user_id).isdigit():\n",
" query = 'SELECT (SUM(stopped - started) - ' \\\n",
" ' SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END)) AS total_time, ' \\\n",
" 'COUNT(DISTINCT %s) AS total_plays ' \\\n",
" 'FROM session_history ' \\\n",
" 'WHERE datetime(stopped, \"unixepoch\", \"localtime\") >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" 'AND user_id = ? ' % (group_by, days)\n",
" result = monitor_db.select(query, args=[user_id])\n",
" else:\n",
" result = []\n",
" else:\n",
" if str(user_id).isdigit():\n",
" query = 'SELECT (SUM(stopped - started) - ' \\\n",
" ' SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END)) AS total_time, ' \\\n",
" 'COUNT(DISTINCT %s) AS total_plays ' \\\n",
" 'FROM session_history ' \\\n",
" 'WHERE user_id = ? ' % group_by\n",
" result = monitor_db.select(query, args=[user_id])\n",
" else:\n",
" result = []\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_watch_time_stats: %s.\" % e)\n",
" result = []\n",
"\n",
" for item in result:\n",
" if item['total_time']:\n",
" total_time = item['total_time']\n",
" total_plays = item['total_plays']\n",
" else:\n",
" total_time = 0\n",
" total_plays = 0\n",
"\n",
" row = {'query_days': days,\n",
" 'total_time': total_time,\n",
" 'total_plays': total_plays\n",
" }\n",
"\n",
" user_watch_time_stats.append(row)\n",
"\n",
" return user_watch_time_stats\n",
"\n",
" def get_player_stats(self, user_id=None, grouping=None):\n",
" if not session.allow_session_user(user_id):\n",
" return []\n",
"\n",
" if grouping is None:\n",
" grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES\n",
"\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" player_stats = []\n",
" result_id = 0\n",
"\n",
" group_by = 'reference_id' if grouping else 'id'\n",
"\n",
" try:\n",
" if str(user_id).isdigit():\n",
" query = 'SELECT player, COUNT(DISTINCT %s) as player_count, platform ' \\\n",
" 'FROM session_history ' \\\n",
" 'WHERE user_id = ? ' \\\n",
" 'GROUP BY player ' \\\n",
" 'ORDER BY player_count DESC' % group_by\n",
" result = monitor_db.select(query, args=[user_id])\n",
" else:\n",
" result = []\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_player_stats: %s.\" % e)\n",
" result = []\n",
"\n",
" for item in result:\n",
" # Rename Mystery platform names\n",
" platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])\n",
" platform_name = next((v for k, v in common.PLATFORM_NAMES.iteritems() if k in platform.lower()), 'default')\n",
"\n",
" row = {'player_name': item['player'],\n",
" 'platform': platform,\n",
" 'platform_name': platform_name,\n",
" 'total_plays': item['player_count'],\n",
" 'result_id': result_id\n",
" }\n",
" player_stats.append(row)\n",
" result_id += 1\n",
"\n",
" return player_stats\n",
"\n",
" def get_recently_watched(self, user_id=None, limit='10'):\n",
" if not session.allow_session_user(user_id):\n",
" return []\n",
"\n",
" monitor_db = database.MonitorDatabase()\n",
" recently_watched = []\n",
"\n",
" if not limit.isdigit():\n",
" limit = '10'\n",
"\n",
" try:\n",
" if str(user_id).isdigit():\n",
" query = 'SELECT session_history.id, session_history.media_type, guid, ' \\\n",
" 'session_history.rating_key, session_history.parent_rating_key, session_history.grandparent_rating_key, ' \\\n",
" 'title, parent_title, grandparent_title, original_title, ' \\\n",
" 'thumb, parent_thumb, grandparent_thumb, media_index, parent_media_index, ' \\\n",
" 'year, originally_available_at, added_at, live, started, user ' \\\n",
" 'FROM session_history_metadata ' \\\n",
" 'JOIN session_history ON session_history_metadata.id = session_history.id ' \\\n",
" 'WHERE user_id = ? ' \\\n",
" 'GROUP BY (CASE WHEN session_history.media_type = \"track\" THEN session_history.parent_rating_key ' \\\n",
" ' ELSE session_history.rating_key END) ' \\\n",
" 'ORDER BY started DESC LIMIT ?'\n",
" result = monitor_db.select(query, args=[user_id, limit])\n",
" else:\n",
" result = []\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_recently_watched: %s.\" % e)\n",
" result = []\n",
"\n",
" for row in result:\n",
" if row['media_type'] == 'episode' and row['parent_thumb']:\n",
" thumb = row['parent_thumb']\n",
" elif row['media_type'] == 'episode':\n",
" thumb = row['grandparent_thumb']\n",
" else:\n",
" thumb = row['thumb']\n",
"\n",
" recent_output = {'row_id': row['id'],\n",
" 'media_type': row['media_type'],\n",
" 'rating_key': row['rating_key'],\n",
" 'parent_rating_key': row['parent_rating_key'],\n",
" 'grandparent_rating_key': row['grandparent_rating_key'],\n",
" 'title': row['title'],\n",
" 'parent_title': row['parent_title'],\n",
" 'grandparent_title': row['grandparent_title'],\n",
" 'original_title': row['original_title'],\n",
" 'thumb': thumb,\n",
" 'media_index': row['media_index'],\n",
" 'parent_media_index': row['parent_media_index'],\n",
" 'year': row['year'],\n",
" 'originally_available_at': row['originally_available_at'],\n",
" 'live': row['live'],\n",
" 'guid': row['guid'],\n",
" 'time': row['started'],\n",
" 'user': row['user']\n",
" }\n",
" recently_watched.append(recent_output)\n",
"\n",
" return recently_watched\n",
"\n",
" def get_users(self):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" query = 'SELECT user_id, username, friendly_name, thumb, custom_avatar_url, email, ' \\\n",
" 'is_admin, is_home_user, is_allow_sync, is_restricted, ' \\\n",
" 'do_notify, keep_history, allow_guest, server_token, shared_libraries, ' \\\n",
" 'filter_all, filter_movies, filter_tv, filter_music, filter_photos ' \\\n",
" 'FROM users WHERE deleted_user = 0'\n",
" result = monitor_db.select(query=query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_users: %s.\" % e)\n",
" return None\n",
"\n",
" users = []\n",
" for item in result:\n",
" user = {'user_id': item['user_id'],\n",
" 'username': item['username'],\n",
" 'friendly_name': item['friendly_name'] or item['username'],\n",
" 'thumb': item['custom_avatar_url'] or item['thumb'],\n",
" 'email': item['email'],\n",
" 'is_admin': item['is_admin'],\n",
" 'is_home_user': item['is_home_user'],\n",
" 'is_allow_sync': item['is_allow_sync'],\n",
" 'is_restricted': item['is_restricted'],\n",
" 'do_notify': item['do_notify'],\n",
" 'keep_history': item['keep_history'],\n",
" 'allow_guest': item['allow_guest'],\n",
" 'server_token': item['server_token'],\n",
" 'shared_libraries': item['shared_libraries'],\n",
" 'filter_all': item['filter_all'],\n",
" 'filter_movies': item['filter_movies'],\n",
" 'filter_tv': item['filter_tv'],\n",
" 'filter_music': item['filter_music'],\n",
" 'filter_photos': item['filter_photos'],\n",
" }\n",
" users.append(user)\n",
"\n",
" return users\n",
"\n",
" def delete_all_history(self, user_id=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" if str(user_id).isdigit():\n",
" logger.info(u\"Tautulli Users :: Deleting all history for user id %s from database.\" % user_id)\n",
" session_history_media_info_del = \\\n",
" monitor_db.action('DELETE FROM '\n",
" 'session_history_media_info '\n",
" 'WHERE session_history_media_info.id IN (SELECT session_history_media_info.id '\n",
" 'FROM session_history_media_info '\n",
" 'JOIN session_history ON session_history_media_info.id = session_history.id '\n",
" 'WHERE session_history.user_id = ?)', [user_id])\n",
" session_history_metadata_del = \\\n",
" monitor_db.action('DELETE FROM '\n",
" 'session_history_metadata '\n",
" 'WHERE session_history_metadata.id IN (SELECT session_history_metadata.id '\n",
" 'FROM session_history_metadata '\n",
" 'JOIN session_history ON session_history_metadata.id = session_history.id '\n",
" 'WHERE session_history.user_id = ?)', [user_id])\n",
" session_history_del = \\\n",
" monitor_db.action('DELETE FROM '\n",
" 'session_history '\n",
" 'WHERE session_history.user_id = ?', [user_id])\n",
"\n",
" return 'Deleted all items for user_id %s.' % user_id\n",
" else:\n",
" return 'Unable to delete items. Input user_id not valid.'\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for delete_all_history: %s.\" % e)\n",
"\n",
" def delete(self, user_id=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" if str(user_id).isdigit():\n",
" self.delete_all_history(user_id)\n",
" logger.info(u\"Tautulli Users :: Deleting user with id %s from database.\" % user_id)\n",
" monitor_db.action('UPDATE users SET deleted_user = 1 WHERE user_id = ?', [user_id])\n",
" monitor_db.action('UPDATE users SET keep_history = 0 WHERE user_id = ?', [user_id])\n",
" monitor_db.action('UPDATE users SET do_notify = 0 WHERE user_id = ?', [user_id])\n",
"\n",
" return 'Deleted user with id %s.' % user_id\n",
" else:\n",
" return 'Unable to delete user, user_id not valid.'\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for delete: %s.\" % e)\n",
"\n",
" def undelete(self, user_id=None, username=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" if user_id and str(user_id).isdigit():\n",
" query = 'SELECT * FROM users WHERE user_id = ?'\n",
" result = monitor_db.select(query=query, args=[user_id])\n",
" if result:\n",
" logger.info(u\"Tautulli Users :: Re-adding user with id %s to database.\" % user_id)\n",
" monitor_db.action('UPDATE users SET deleted_user = 0 WHERE user_id = ?', [user_id])\n",
" monitor_db.action('UPDATE users SET keep_history = 1 WHERE user_id = ?', [user_id])\n",
" monitor_db.action('UPDATE users SET do_notify = 1 WHERE user_id = ?', [user_id])\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
" elif username:\n",
" query = 'SELECT * FROM users WHERE username = ?'\n",
" result = monitor_db.select(query=query, args=[username])\n",
" if result:\n",
" logger.info(u\"Tautulli Users :: Re-adding user with username %s to database.\" % username)\n",
" monitor_db.action('UPDATE users SET deleted_user = 0 WHERE username = ?', [username])\n",
" monitor_db.action('UPDATE users SET keep_history = 1 WHERE username = ?', [username])\n",
" monitor_db.action('UPDATE users SET do_notify = 1 WHERE username = ?', [username])\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for undelete: %s.\" % e)\n",
"\n",
" # Keep method for PlexWatch/Plexivity import\n",
" def get_user_id(self, user=None):\n",
" if user:\n",
" try:\n",
" monitor_db = database.MonitorDatabase()\n",
" query = 'SELECT user_id FROM users WHERE username = ?'\n",
" result = monitor_db.select_single(query, args=[user])\n",
" if result:\n",
" return result['user_id']\n",
" else:\n",
" return None\n",
" except:\n",
" return None\n",
"\n",
" return None\n",
"\n",
" def get_user_names(self, kwargs=None):\n",
" monitor_db = database.MonitorDatabase()\n",
" \n",
" user_cond = ''\n",
" if session.get_session_user_id():\n",
" user_cond = 'AND user_id = %s ' % session.get_session_user_id()\n",
"\n",
" try:\n",
" query = 'SELECT user_id, ' \\\n",
" '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = \"\" \\\n",
" THEN users.username ELSE users.friendly_name END) AS friendly_name ' \\\n",
" 'FROM users ' \\\n",
" 'WHERE deleted_user = 0 %s' % user_cond\n",
"\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_user_names: %s.\" % e)\n",
" return None\n",
" \n",
" return session.friendly_name_to_username(result)\n",
" \n",
" def get_tokens(self, user_id=None):\n",
" if user_id:\n",
" try:\n",
" monitor_db = database.MonitorDatabase()\n",
" query = 'SELECT allow_guest, user_token, server_token FROM users ' \\\n",
" 'WHERE user_id = ? AND deleted_user = 0'\n",
" result = monitor_db.select_single(query, args=[user_id])\n",
" if result:\n",
" tokens = {'allow_guest': result['allow_guest'],\n",
" 'user_token': result['user_token'],\n",
" 'server_token': result['server_token']\n",
" }\n",
" return tokens\n",
" else:\n",
" return None\n",
" except:\n",
" return None\n",
"\n",
" return None\n",
"\n",
" def get_filters(self, user_id=None):\n",
" import urlparse\n",
"\n",
" if not user_id:\n",
" return {}\n",
"\n",
" try:\n",
" monitor_db = database.MonitorDatabase()\n",
" query = 'SELECT filter_all, filter_movies, filter_tv, filter_music, filter_photos FROM users ' \\\n",
" 'WHERE user_id = ?'\n",
" result = monitor_db.select_single(query, args=[user_id])\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_filters: %s.\" % e)\n",
" result = {}\n",
"\n",
" filters_list = {}\n",
" for k, v in result.iteritems():\n",
" filters = {}\n",
" \n",
" for f in v.split('|'):\n",
" if 'contentRating=' in f or 'label=' in f:\n",
" filters.update(dict(urlparse.parse_qsl(f)))\n",
" \n",
" filters['content_rating'] = tuple(f for f in filters.pop('contentRating', '').split(',') if f)\n",
" filters['labels'] = tuple(f for f in filters.pop('label', '').split(',') if f)\n",
"\n",
" filters_list[k] = filters\n",
"\n",
" return filters_list\n",
"\n",
" def set_user_login(self, user_id=None, user=None, user_group=None, ip_address=None, host=None, user_agent=None, success=0):\n",
"\n",
" if user_id is None or str(user_id).isdigit():\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" keys = {'timestamp': int(time.time()),\n",
" 'user_id': user_id}\n",
"\n",
" values = {'user': user,\n",
" 'user_group': user_group,\n",
" 'ip_address': ip_address,\n",
" 'host': host,\n",
" 'user_agent': user_agent,\n",
" 'success': success}\n",
"\n",
" try:\n",
" monitor_db.upsert(table_name='user_login', key_dict=keys, value_dict=values)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for set_login_log: %s.\" % e)\n",
"\n",
" def get_datatables_user_login(self, user_id=None, kwargs=None):\n",
" default_return = {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" if not session.allow_session_user(user_id):\n",
" return default_return\n",
"\n",
" data_tables = datatables.DataTables()\n",
"\n",
" if session.get_session_user_id():\n",
" custom_where = [['user_login.user_id', session.get_session_user_id()]]\n",
" else:\n",
" custom_where = [['user_login.user_id', user_id]] if user_id else []\n",
"\n",
" columns = ['user_login.timestamp',\n",
" 'user_login.user_id',\n",
" 'user_login.user',\n",
" 'user_login.user_group',\n",
" 'user_login.ip_address',\n",
" 'user_login.host',\n",
" 'user_login.user_agent',\n",
" 'user_login.success',\n",
" '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = \"\" \\\n",
" THEN users.username ELSE users.friendly_name END) AS friendly_name'\n",
" ]\n",
"\n",
" try:\n",
" query = data_tables.ssp_query(table_name='user_login',\n",
" columns=columns,\n",
" custom_where=custom_where,\n",
" group_by=[],\n",
" join_types=['LEFT OUTER JOIN'],\n",
" join_tables=['users'],\n",
" join_evals=[['user_login.user_id', 'users.user_id']],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for get_datatables_user_login: %s.\" % e)\n",
" return default_return\n",
"\n",
" results = query['result']\n",
"\n",
" rows = []\n",
" for item in results:\n",
" (os, browser) = httpagentparser.simple_detect(item['user_agent'])\n",
"\n",
" row = {'timestamp': item['timestamp'],\n",
" 'user_id': item['user_id'],\n",
" 'user_group': item['user_group'],\n",
" 'ip_address': item['ip_address'],\n",
" 'host': item['host'],\n",
" 'user_agent': item['user_agent'],\n",
" 'os': os,\n",
" 'browser': browser,\n",
" 'success': item['success'],\n",
" 'friendly_name': item['friendly_name'] or item['user']\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': session.friendly_name_to_username(rows),\n",
" 'draw': query['draw']\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def delete_login_log(self):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" logger.info(u\"Tautulli Users :: Clearing login logs from database.\")\n",
" monitor_db.action('DELETE FROM user_login')\n",
" monitor_db.action('VACUUM')\n",
" return True\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli Users :: Unable to execute database query for delete_login_log: %s.\" % e)\n",
" return False"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0.010526315789473684,
0.011235955056179775,
0,
0,
0,
0.008264462809917356,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.011627906976744186,
0.01,
0.009174311926605505,
0.008928571428571428,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.011627906976744186,
0.01,
0.009174311926605505,
0.008928571428571428,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007692307692307693,
0.007518796992481203,
0,
0,
0,
0,
0,
0.007692307692307693,
0.007518796992481203,
0,
0,
0,
0,
0,
0.007692307692307693,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008,
0,
0,
0.007633587786259542,
0,
0,
0,
0,
0,
0,
0,
0.008,
0,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0.010638297872340425,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.007575757575757576,
0.011764705882352941,
0.00980392156862745,
0.011111111111111112,
0,
0.00980392156862745,
0,
0.008,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0.010526315789473684,
0.01098901098901099,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0.00847457627118644,
0,
0.008620689655172414,
0.011494252873563218,
0,
0,
0,
0.008771929824561403,
0,
0.008771929824561403,
0.011494252873563218,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0.01,
0.01,
0.01,
0.010309278350515464,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0.009615384615384616,
0.009615384615384616,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0.009433962264150943,
0.009433962264150943,
0.009708737864077669,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0.1111111111111111,
0,
0.2,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0.04,
0.009345794392523364,
0.01098901098901099,
0,
0,
0,
0,
0,
0.0078125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.009174311926605505,
0.041666666666666664
] | 906 | 0.001865 | false |
"""
examples that show how to obtain information about an individual event
"""
from eventregistry import *
er = EventRegistry()
#
# NOTE: if you don't have access to historical data, you have to change the event URI
# to some recent event that you can access in order to run the example
#
eventUri = "eng-2940883"
# iterate over all articles that belong to a particular event with a given URI
iter = QueryEventArticlesIter(eventUri)
for art in iter.execQuery(er):
print(art)
# iterate over the articles in the event, but only return those that mention Obama in the title
iter = QueryEventArticlesIter(eventUri,
keywords = "Obama",
keywordsLoc = "title")
for art in iter.execQuery(er):
print(art)
# iterate over the articles in the event, but only select those that are in English or German language
iter = QueryEventArticlesIter(eventUri,
lang = ["eng", "deu"])
for art in iter.execQuery(er):
print(art)
# iterate over the articles in the event, but only select those that are from sources located in United States
iter = QueryEventArticlesIter(eventUri,
sourceLocationUri = er.getLocationUri("United States"))
for art in iter.execQuery(er):
print(art)
# get event info (title, summary, concepts, location, date, ...) about event with a particular uri
q = QueryEvent(eventUri)
q.setRequestedResult(RequestEventInfo(
returnInfo = ReturnInfo(conceptInfo = ConceptInfoFlags(lang = ["eng", "spa", "slv"]))))
eventRes = er.execQuery(q)
# get list of 10 articles about the event
q.setRequestedResult(RequestEventArticles(page = 1, count = 10)) # get 10 articles about the event (any language is ok) that are closest to the center of the event
eventRes = er.execQuery(q)
#
# OTHER AGGREGATES ABOUT THE EVENT
#
# get information about how reporting about the event was trending over time
q.setRequestedResult(RequestEventArticleTrend())
eventRes = er.execQuery(q)
# get the tag cloud of top words for the event
q.setRequestedResult(RequestEventKeywordAggr())
eventRes = er.execQuery(q)
# get information about the news sources that reported about the event
q.setRequestedResult(RequestEventSourceAggr())
eventRes = er.execQuery(q)
# return the
q.setRequestedResult(RequestEventSimilarEvents(
conceptInfoList=[
{"uri": er.getConceptUri("Trump"), "wgt": 100},
{"uri": er.getConceptUri("Obama"), "wgt": 100},
{"uri": er.getConceptUri("Richard Nixon"), "wgt": 30},
{"uri": er.getConceptUri("republican party"), "wgt": 30},
{"uri": er.getConceptUri("democrat party"), "wgt": 30}
]
))
eventRes = er.execQuery(q)
| [
"\"\"\"\n",
"examples that show how to obtain information about an individual event\n",
"\"\"\"\n",
"from eventregistry import *\n",
"\n",
"er = EventRegistry()\n",
"\n",
"#\n",
"# NOTE: if you don't have access to historical data, you have to change the event URI\n",
"# to some recent event that you can access in order to run the example\n",
"#\n",
"\n",
"eventUri = \"eng-2940883\"\n",
"\n",
"# iterate over all articles that belong to a particular event with a given URI\n",
"iter = QueryEventArticlesIter(eventUri)\n",
"for art in iter.execQuery(er):\n",
" print(art)\n",
"\n",
"# iterate over the articles in the event, but only return those that mention Obama in the title\n",
"iter = QueryEventArticlesIter(eventUri,\n",
" keywords = \"Obama\",\n",
" keywordsLoc = \"title\")\n",
"for art in iter.execQuery(er):\n",
" print(art)\n",
"\n",
"\n",
"# iterate over the articles in the event, but only select those that are in English or German language\n",
"iter = QueryEventArticlesIter(eventUri,\n",
" lang = [\"eng\", \"deu\"])\n",
"for art in iter.execQuery(er):\n",
" print(art)\n",
"\n",
"\n",
"# iterate over the articles in the event, but only select those that are from sources located in United States\n",
"iter = QueryEventArticlesIter(eventUri,\n",
" sourceLocationUri = er.getLocationUri(\"United States\"))\n",
"for art in iter.execQuery(er):\n",
" print(art)\n",
"\n",
"\n",
"# get event info (title, summary, concepts, location, date, ...) about event with a particular uri\n",
"q = QueryEvent(eventUri)\n",
"q.setRequestedResult(RequestEventInfo(\n",
" returnInfo = ReturnInfo(conceptInfo = ConceptInfoFlags(lang = [\"eng\", \"spa\", \"slv\"]))))\n",
"eventRes = er.execQuery(q)\n",
"\n",
"# get list of 10 articles about the event\n",
"q.setRequestedResult(RequestEventArticles(page = 1, count = 10)) # get 10 articles about the event (any language is ok) that are closest to the center of the event\n",
"eventRes = er.execQuery(q)\n",
"\n",
"#\n",
"# OTHER AGGREGATES ABOUT THE EVENT\n",
"#\n",
"\n",
"# get information about how reporting about the event was trending over time\n",
"q.setRequestedResult(RequestEventArticleTrend())\n",
"eventRes = er.execQuery(q)\n",
"\n",
"# get the tag cloud of top words for the event\n",
"q.setRequestedResult(RequestEventKeywordAggr())\n",
"eventRes = er.execQuery(q)\n",
"\n",
"# get information about the news sources that reported about the event\n",
"q.setRequestedResult(RequestEventSourceAggr())\n",
"eventRes = er.execQuery(q)\n",
"\n",
"# return the\n",
"q.setRequestedResult(RequestEventSimilarEvents(\n",
" conceptInfoList=[\n",
" {\"uri\": er.getConceptUri(\"Trump\"), \"wgt\": 100},\n",
" {\"uri\": er.getConceptUri(\"Obama\"), \"wgt\": 100},\n",
" {\"uri\": er.getConceptUri(\"Richard Nixon\"), \"wgt\": 30},\n",
" {\"uri\": er.getConceptUri(\"republican party\"), \"wgt\": 30},\n",
" {\"uri\": er.getConceptUri(\"democrat party\"), \"wgt\": 30}\n",
" ]\n",
"))\n",
"eventRes = er.execQuery(q)\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.125,
0.1111111111111111,
0,
0,
0,
0,
0.009708737864077669,
0,
0.1111111111111111,
0,
0,
0,
0,
0.009009009009009009,
0,
0.05,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0.07608695652173914,
0,
0,
0,
0.029239766081871343,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 79 | 0.019663 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from time import time
from wsgiref.handlers import format_date_time
from .._error import (
_validate_not_none,
_ERROR_START_END_NEEDED_FOR_MD5,
_ERROR_RANGE_TOO_LARGE_FOR_MD5,
)
from .._common_conversion import _str
def _get_path(share_name=None, directory_name=None, file_name=None):
'''
Creates the path to access a file resource.
share_name:
Name of share.
directory_name:
The path to the directory.
file_name:
Name of file.
'''
if share_name and directory_name and file_name:
return '/{0}/{1}/{2}'.format(
_str(share_name),
_str(directory_name),
_str(file_name))
elif share_name and directory_name:
return '/{0}/{1}'.format(
_str(share_name),
_str(directory_name))
elif share_name and file_name:
return '/{0}/{1}'.format(
_str(share_name),
_str(file_name))
elif share_name:
return '/{0}'.format(_str(share_name))
else:
return '/'
def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False):
# If end range is provided, start range must be provided
if start_range_required == True or end_range is not None:
_validate_not_none('start_range', start_range)
if end_range_required == True:
_validate_not_none('end_range', end_range)
# Format based on whether end_range is present
request.headers = request.headers or {}
if end_range is not None:
request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)
elif start_range is not None:
request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)
# Content MD5 can only be provided for a complete range less than 4MB in size
if check_content_md5 == True:
if start_range is None or end_range is None:
raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)
if end_range - start_range > 4 * 1024 * 1024:
raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)
request.headers['x-ms-range-get-content-md5'] = 'true' | [
"#-------------------------------------------------------------------------\r\n",
"# Copyright (c) Microsoft. All rights reserved.\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"#--------------------------------------------------------------------------\r\n",
"from time import time\r\n",
"from wsgiref.handlers import format_date_time\r\n",
"from .._error import (\r\n",
" _validate_not_none,\r\n",
" _ERROR_START_END_NEEDED_FOR_MD5,\r\n",
" _ERROR_RANGE_TOO_LARGE_FOR_MD5,\r\n",
")\r\n",
"from .._common_conversion import _str\r\n",
"\r\n",
"def _get_path(share_name=None, directory_name=None, file_name=None):\r\n",
" '''\r\n",
" Creates the path to access a file resource.\r\n",
"\r\n",
" share_name:\r\n",
" Name of share.\r\n",
" directory_name:\r\n",
" The path to the directory.\r\n",
" file_name:\r\n",
" Name of file.\r\n",
" '''\r\n",
" if share_name and directory_name and file_name:\r\n",
" return '/{0}/{1}/{2}'.format(\r\n",
" _str(share_name),\r\n",
" _str(directory_name),\r\n",
" _str(file_name))\r\n",
" elif share_name and directory_name:\r\n",
" return '/{0}/{1}'.format(\r\n",
" _str(share_name),\r\n",
" _str(directory_name))\r\n",
" elif share_name and file_name:\r\n",
" return '/{0}/{1}'.format(\r\n",
" _str(share_name),\r\n",
" _str(file_name))\r\n",
" elif share_name:\r\n",
" return '/{0}'.format(_str(share_name))\r\n",
" else:\r\n",
" return '/'\r\n",
"\r\n",
"def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False):\r\n",
" # If end range is provided, start range must be provided\r\n",
" if start_range_required == True or end_range is not None:\r\n",
" _validate_not_none('start_range', start_range)\r\n",
" if end_range_required == True:\r\n",
" _validate_not_none('end_range', end_range)\r\n",
"\r\n",
" # Format based on whether end_range is present\r\n",
" request.headers = request.headers or {}\r\n",
" if end_range is not None:\r\n",
" request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range)\r\n",
" elif start_range is not None:\r\n",
" request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range)\r\n",
"\r\n",
" # Content MD5 can only be provided for a complete range less than 4MB in size\r\n",
" if check_content_md5 == True:\r\n",
" if start_range is None or end_range is None:\r\n",
" raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5)\r\n",
" if end_range - start_range > 4 * 1024 * 1024:\r\n",
" raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5)\r\n",
"\r\n",
" request.headers['x-ms-range-get-content-md5'] = 'true'"
] | [
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013245033112582781,
0,
0.015873015873015872,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0.012048192771084338,
0.02857142857142857,
0,
0,
0,
0,
0,
0.016129032258064516
] | 74 | 0.002236 | false |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from intern.service.boss.httperrorlist import HTTPErrorList
import numpy
import random
import requests
from requests import Session, HTTPError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import unittest
import time
API_VER = 'v1'
class VolumeServiceTest_v1(unittest.TestCase):
"""Integration tests of the Boss volume service API.
Because setup and teardown involves many REST calls, tests are only
divided into tests of the different types of data model resources. All
operations are performed within a single test of each resource.
"""
@classmethod
def setUpClass(cls):
"""Do an initial DB clean up in case something went wrong the last time.
If a test failed really badly, the DB might be in a bad state despite
attempts to clean up during tearDown().
"""
cls.rmt = BossRemote('test.cfg', API_VER)
# Turn off SSL cert verification. This is necessary for interacting with
# developer instances of the Boss.
cls.rmt.project_service.session_send_opts = {'verify': False}
cls.rmt.metadata_service.session_send_opts = {'verify': False}
cls.rmt.volume_service.session_send_opts = {'verify': False}
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
coll_name = 'collection2323{}'.format(random.randint(0, 9999))
cls.coll = CollectionResource(coll_name, 'bar')
cf_name = 'BestFrame{}'.format(random.randint(0, 9999))
cls.coord = CoordinateFrameResource(
cf_name, 'Test coordinate frame.', 0, 2048, 0, 2048, 0, 100,
1, 1, 1, 'nanometers', 0, 'nanoseconds')
# cls.exp.coord_frame must be set with valid id before creating.
cls.exp = ExperimentResource(
'exp2323x2', cls.coll.name, cls.coord.name, 'my experiment',
1, 'isotropic', 10)
cls.chan = ChannelResource(
'myVolChan', cls.coll.name, cls.exp.name, 'image', 'test channel',
0, 'uint8', 0)
cls.chan16 = ChannelResource(
'myVol16bitChan', cls.coll.name, cls.exp.name, 'image',
'16 bit test channel', 0, 'uint16', 0)
cls.ann_chan = ChannelResource(
'annVolChan2', cls.coll.name, cls.exp.name, 'annotation',
'annotation test channel', 0, 'uint64', 0, sources=[cls.chan.name])
# This channel reserved for testing get_ids_in_region(). This is a
# separate channel so we don't have to worry about ids written by
# other tests.
cls.ann_region_chan = ChannelResource(
'annRegionChan2', cls.coll.name, cls.exp.name, 'annotation',
'annotation ids in region test channel', 0, 'uint64', 0,
sources=[cls.chan.name])
# This channel reerved for testing tight bounding boxes.
cls.ann_bounding_chan = ChannelResource(
'annRegionChan3', cls.coll.name, cls.exp.name, 'annotation',
'annotation ids in bounding box test channel', 0, 'uint64', 0,
sources=[cls.chan.name])
cls.rmt.create_project(cls.coll)
cls.rmt.create_project(cls.coord)
cls.rmt.create_project(cls.exp)
cls.rmt.create_project(cls.chan16)
cls.rmt.create_project(cls.chan)
cls.rmt.create_project(cls.ann_chan)
cls.rmt.create_project(cls.ann_region_chan)
cls.rmt.create_project(cls.ann_bounding_chan)
@classmethod
def tearDownClass(cls):
"""Clean up the data model objects used by this test case.
This method is used by both tearDownClass() and setUpClass().
"""
try:
cls.rmt.delete_project(cls.ann_bounding_chan)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.ann_region_chan)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.ann_chan)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.chan16)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.chan)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.exp)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.coord)
except HTTPError:
pass
try:
cls.rmt.delete_project(cls.coll)
except HTTPError:
pass
def setUp(self):
self.rmt = BossRemote('test.cfg')
def tearDown(self):
pass
def test_reserve_ids(self):
first_id = self.rmt.reserve_ids(self.ann_chan, 20)
self.assertTrue(first_id > 0)
def test_get_bounding_box_id_doesnt_exist(self):
resolution = 0
id = 12345678
with self.assertRaises(HTTPError) as err:
self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')
expected_msg_prefix = 'Reserve ids failed'
self.assertTrue(err.message.startwswith(expected_msg_prefix))
@unittest.skip('Skipping - currently indexing disabled')
def test_get_bounding_box_spans_cuboids_in_x(self):
x_rng = [511, 515]
y_rng = [0, 8]
z_rng = [0, 5]
t_rng = [0, 1]
id = 77555
data = numpy.zeros((5, 8, 4), dtype='uint64')
data[1][0][0] = id
data[2][1][1] = id
data[3][2][3] = id
resolution = 0
self.rmt.create_cutout(
self.ann_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
expected = {
'x_range': [0, 1024],
'y_range': [0, 512],
'z_range': [0, 16],
't_range': [0, 1]
}
actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_get_bounding_box_spans_cuboids_in_y(self):
x_rng = [0, 8]
y_rng = [511, 515]
z_rng = [0, 5]
t_rng = [0, 1]
id = 77666
data = numpy.zeros((5, 4, 8), dtype='uint64')
data[1][0][0] = id
data[2][1][0] = id
data[3][2][0] = id
resolution = 0
self.rmt.create_cutout(
self.ann_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
expected = {
'x_range': [0, 512],
'y_range': [0, 1024],
'z_range': [0, 16],
't_range': [0, 1]
}
actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_get_bounding_box_spans_cuboids_in_z(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [30, 35]
t_rng = [0, 1]
id = 77888
data = numpy.zeros((5, 4, 8), dtype='uint64')
data[1][0][0] = id
data[2][1][0] = id
data[3][2][0] = id
resolution = 0
self.rmt.create_cutout(
self.ann_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
expected = {
'x_range': [0, 512],
'y_range': [0, 512],
'z_range': [16, 48],
't_range': [0, 1]
}
actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_tight_bounding_box_x_axis(self):
"""Test tight bounding box with ids that span three cuboids along the x axis."""
resolution = 0
x_rng = [511, 1025]
y_rng = [512, 1024]
z_rng = [16, 32]
t_rng = [0, 1]
data = numpy.zeros((16, 512, 514), dtype='uint64')
x_id = 123
y_id = 127
z_id = 500000000000000000
# Id in partial region on x axis closest to origin.
data[1][1][0] = x_id
# Id in partial region on x axis furthest from origin.
data[1][1][513] = x_id
# Id in cuboid aligned region.
data[2][2][21] = x_id
data[2][1][22] = y_id
data[4][24][72] = z_id
expected = {'x_range': [511, 1025], 'y_range': [513, 515], 'z_range': [17, 19]}
self.rmt.create_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_bounding_box(
self.ann_bounding_chan, resolution, x_id, bb_type='tight')
@unittest.skip('Skipping - currently indexing disabled')
def test_tight_bounding_box_y_axis(self):
"""Test tight bounding box with ids that span three cuboids along the x axis."""
resolution = 0
x_rng = [512, 1024]
y_rng = [511, 1025]
z_rng = [16, 32]
t_rng = [0, 1]
data = numpy.zeros((16, 514, 512), dtype='uint64')
x_id = 123
y_id = 127
z_id = 500000000000000000
# Id in partial region on y axis closest to origin.
data[1][0][10] = y_id
# Id in partial region on y axis furthest from origin.
data[1][513][13] = y_id
# Id in cuboid aligned region.
data[2][2][21] = y_id
data[2][3][20] = x_id
data[4][25][71] = z_id
expected = {'x_range': [522, 526], 'y_range': [511, 1025], 'z_range': [17, 19]}
self.rmt.create_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_bounding_box(
self.ann_bounding_chan, resolution, y_id, bb_type='tight')
@unittest.skip('Skipping - currently indexing disabled')
def test_tight_bounding_box_z_axis(self):
"""Test tight bounding box with ids that span three cuboids along the x axis."""
resolution = 0
x_rng = [512, 1024]
y_rng = [512, 1024]
z_rng = [15, 33]
t_rng = [0, 1]
data = numpy.zeros((18, 512, 512), dtype='uint64')
x_id = 123
y_id = 127
z_id = 500000000000000000
# Id in partial region on z axis closest to origin.
data[0][22][60] = z_id
# Id in partial region on z axis furthest from origin.
data[17][23][63] = z_id
# Id in cuboid aligned region.
data[5][24][71] = z_id
data[3][2][20] = x_id
data[3][1][21] = y_id
expected = {'x_range': [572, 583], 'y_range': [534, 537], 'z_range': [15, 33]}
self.rmt.create_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_bounding_box(
self.ann_bounding_chan, resolution, z_id, bb_type='tight')
def test_get_ids_in_region_none(self):
"""Run on region that hasn't been written with ids, yet."""
resolution = 0
x_rng = [1536, 1540]
y_rng = [1536, 1540]
z_rng = [48, 56]
t_rng = [0, 1]
data = numpy.zeros((8, 4, 4), dtype='uint64')
expected = []
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_ids_in_region(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
self.assertEqual(expected, actual)
def test_filtered_cutout(self):
"""Test filtered cutout using same data written for get_ids_in_region_x_axis."""
resolution = 0
x_rng = [511, 1025]
y_rng = [512, 1024]
z_rng = [16, 32]
t_rng = [0, 1]
data = numpy.zeros((16, 512, 514), dtype='uint64')
# Id in partial region on x axis closest to origin.
data[1][1][0] = 123
# Id in partial region on x axis furthest from origin.
data[1][1][513] = 321
# Id in cuboid aligned region.
data[10][20][21] = 55555
expected = [123, 321, 55555]
self.rmt.create_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Should get back the exact data given in create_cutout().
filtered_data1 = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng,
id_list=[123, 321, 55555])
numpy.testing.assert_array_equal(data, filtered_data1)
# Filter on id 123.
expected_data_123 = numpy.zeros((16, 512, 514), dtype='uint64')
expected_data_123[1][1][0] = 123
filtered_data_123 = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, id_list=[123])
numpy.testing.assert_array_equal(expected_data_123, filtered_data_123)
# Filter on id 321.
expected_data_321 = numpy.zeros((16, 512, 514), dtype='uint64')
expected_data_321[1][1][513] = 321
filtered_data_321 = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, id_list=[321])
numpy.testing.assert_array_equal(expected_data_321, filtered_data_321)
# Filter on ids 123 and 55555.
expected_data_123_55555 = numpy.zeros((16, 512, 514), dtype='uint64')
expected_data_123_55555[1][1][0] = 123
expected_data_123_55555[10][20][21] = 55555
filtered_data_123_55555 = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng,
id_list=[123, 55555])
numpy.testing.assert_array_equal(
expected_data_123_55555, filtered_data_123_55555)
@unittest.skip('Skipping - currently indexing disabled')
def test_get_ids_in_region_x_axis(self):
"""Test using a region that's cuboid aligned except for the x axis."""
resolution = 0
x_rng = [511, 1025]
y_rng = [512, 1024]
z_rng = [16, 32]
t_rng = [0, 1]
data = numpy.zeros((16, 512, 514), dtype='uint64')
# Id in partial region on x axis closest to origin.
data[1][1][0] = 123
# Id in partial region on x axis furthest from origin.
data[1][1][513] = 321
# Id in cuboid aligned region.
data[10][20][21] = 55555
expected = [123, 321, 55555]
self.rmt.create_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_ids_in_region(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_get_ids_in_region_y_axis(self):
"""Test using a region that's cuboid aligned except for the y axis."""
resolution = 0
x_rng = [512, 1024]
y_rng = [511, 1025]
z_rng = [16, 32]
t_rng = [0, 1]
data = numpy.zeros((16, 514, 512), dtype='uint64')
# Id in partial region on y axis closest to origin.
data[1][0][1] = 456
# Id in partial region on y axis furthest from origin.
data[1][513][1] = 654
# Id in cuboid aligned region.
data[10][21][20] = 55555
# expected = [123, 321, 456, 654, 789, 987, 55555]
expected = [456, 654, 55555]
self.rmt.create_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_ids_in_region(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_get_ids_in_region_z_axis(self):
"""Test using a region that's cuboid aligned except for the z axis."""
resolution = 0
x_rng = [512, 1024]
y_rng = [512, 1024]
z_rng = [15, 33]
t_rng = [0, 1]
data = numpy.zeros((18, 512, 512), dtype='uint64')
# Id in partial region on z axis closest to origin.
data[0][1][1] = 789
# Id in partial region on z axis furthest from origin.
data[17][1][1] = 987
# Id in cuboid aligned region.
data[11][20][20] = 55555
expected = [789, 987, 55555]
self.rmt.create_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)
# Get cutout to make sure data is done writing and indices updated.
actual_data = self.rmt.get_cutout(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual_data)
# Method under test.
actual = self.rmt.get_ids_in_region(
self.ann_region_chan, resolution, x_rng, y_rng, z_rng)
self.assertEqual(expected, actual)
def test_upload_and_download_to_channel(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
data = numpy.random.randint(1, 254, (5, 4, 8))
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
def test_upload_and_download_to_channel_with_time(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
t_rng = [3, 6]
data = numpy.random.randint(1, 254, (3, 5, 4, 8))
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)
numpy.testing.assert_array_equal(data, actual)
def test_upload_and_download_subsection_to_channel(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 19]
sub_x = [12, 14]
sub_y = [7, 10]
sub_z = [12, 17]
data = numpy.random.randint(1, 10, (9, 5, 10))
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.chan, 0, sub_x, sub_y, sub_z)
numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)
def test_upload_to_x_edge_of_channel(self):
x_rng = [10, 2048]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_y_edge_of_channel(self):
x_rng = [10, 20]
y_rng = [5, 2048]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_z_edge_of_channel(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 100]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_x_edge_of_channel(self):
x_rng = [10, 2049]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_y_edge_of_channel(self):
x_rng = [10, 20]
y_rng = [5, 2049]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_z_edge_of_channel(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 101]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_and_download_to_channel_16bit(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.chan16, 0, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
def test_upload_and_download_subsection_to_channel_16bit(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 19]
sub_x = [12, 14]
sub_y = [7, 10]
sub_z = [12, 17]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.chan16, 0, sub_x, sub_y, sub_z)
numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)
def test_upload_to_x_edge_of_channel_16bit(self):
x_rng = [2000, 2048]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_y_edge_of_channel_16bit(self):
x_rng = [10, 20]
y_rng = [2000, 2048]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_z_edge_of_channel_16bit(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 100]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_x_edge_of_channel_16bit(self):
x_rng = [2000, 2049]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_y_edge_of_channel_16bit(self):
x_rng = [10, 20]
y_rng = [2000, 2049]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_z_edge_of_channel_16bit(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 101]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint16)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)
def test_upload_and_download_to_anno_chan(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(data, actual)
def test_upload_and_download_subsection_to_anno_chan(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 19]
sub_x = [12, 14]
sub_y = [7, 10]
sub_z = [12, 17]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
actual = self.rmt.get_cutout(self.ann_chan, 0, sub_x, sub_y, sub_z)
numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)
def test_upload_to_x_edge_of_anno_chan(self):
x_rng = [2000, 2048]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_y_edge_of_anno_chan(self):
x_rng = [10, 20]
y_rng = [2000, 2048]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_to_z_edge_of_anno_chan(self):
x_rng = [10, 100]
y_rng = [5, 10]
z_rng = [10, 100]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_x_edge_of_anno_chan(self):
x_rng = [10, 2049]
y_rng = [5, 10]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_y_edge_of_anno_chan(self):
x_rng = [10, 991]
y_rng = [5, 2049]
z_rng = [10, 19]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_past_z_edge_of_anno_chan(self):
x_rng = [10, 20]
y_rng = [5, 10]
z_rng = [10, 101]
shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint64)
with self.assertRaises(HTTPError):
self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)
def test_upload_and_download_to_channel_4D(self):
x_rng = [600, 680]
y_rng = [600, 640]
z_rng = [50, 55]
t_rng = [0, 1]
shape = (t_rng[1]-t_rng[0], z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])
data = numpy.random.randint(1, 10, shape)
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)
numpy.testing.assert_array_equal(data, actual)
def test_upload_and_cutout_to_black(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
data = numpy.random.randint(1, 254, (5, 4, 8))
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
self.rmt.create_cutout_to_black(self.chan, 0, x_rng, y_rng, z_rng)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(numpy.zeros((5,4,8)), actual)
def test_upload_and_cutout_to_black_with_time(self):
x_rng = [0, 8]
y_rng = [0, 4]
z_rng = [0, 5]
t_rng = [3, 6]
data = numpy.random.randint(1, 254, (3, 5, 4, 8))
data = data.astype(numpy.uint8)
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)
self.rmt.create_cutout_to_black(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)
numpy.testing.assert_array_equal(numpy.zeros((3, 5, 4, 8)), actual)
def test_upload_and_cutout_to_black_partial(self):
x_rng = [0, 1024]
y_rng = [0, 1024]
z_rng = [0, 5]
x_rng_black = [0, 256]
y_rng_black = [0, 512]
z_rng_black = [2,3]
data = numpy.random.randint(1, 254, (5, 1024, 1024))
data = data.astype(numpy.uint8)
expected = numpy.copy(data)
expected[2:3, 0:512, 0:256] = 0
self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)
self.rmt.create_cutout_to_black(self.chan, 0, x_rng_black, y_rng_black, z_rng_black)
actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)
numpy.testing.assert_array_equal(expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from intern.remote.boss import BossRemote\n",
"from intern.resource.boss.resource import *\n",
"from intern.service.boss.httperrorlist import HTTPErrorList\n",
"import numpy\n",
"\n",
"import random\n",
"import requests\n",
"from requests import Session, HTTPError\n",
"from requests.packages.urllib3.exceptions import InsecureRequestWarning\n",
"\n",
"import unittest\n",
"import time\n",
"\n",
"API_VER = 'v1'\n",
"\n",
"\n",
"class VolumeServiceTest_v1(unittest.TestCase):\n",
" \"\"\"Integration tests of the Boss volume service API.\n",
"\n",
" Because setup and teardown involves many REST calls, tests are only\n",
" divided into tests of the different types of data model resources. All\n",
" operations are performed within a single test of each resource.\n",
" \"\"\"\n",
"\n",
" @classmethod\n",
" def setUpClass(cls):\n",
" \"\"\"Do an initial DB clean up in case something went wrong the last time.\n",
"\n",
" If a test failed really badly, the DB might be in a bad state despite\n",
" attempts to clean up during tearDown().\n",
" \"\"\"\n",
" cls.rmt = BossRemote('test.cfg', API_VER)\n",
"\n",
" # Turn off SSL cert verification. This is necessary for interacting with\n",
" # developer instances of the Boss.\n",
" cls.rmt.project_service.session_send_opts = {'verify': False}\n",
" cls.rmt.metadata_service.session_send_opts = {'verify': False}\n",
" cls.rmt.volume_service.session_send_opts = {'verify': False}\n",
" requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n",
"\n",
" coll_name = 'collection2323{}'.format(random.randint(0, 9999))\n",
" cls.coll = CollectionResource(coll_name, 'bar')\n",
"\n",
" cf_name = 'BestFrame{}'.format(random.randint(0, 9999))\n",
" cls.coord = CoordinateFrameResource(\n",
" cf_name, 'Test coordinate frame.', 0, 2048, 0, 2048, 0, 100,\n",
" 1, 1, 1, 'nanometers', 0, 'nanoseconds')\n",
"\n",
" # cls.exp.coord_frame must be set with valid id before creating.\n",
" cls.exp = ExperimentResource(\n",
" 'exp2323x2', cls.coll.name, cls.coord.name, 'my experiment',\n",
" 1, 'isotropic', 10)\n",
"\n",
" cls.chan = ChannelResource(\n",
" 'myVolChan', cls.coll.name, cls.exp.name, 'image', 'test channel',\n",
" 0, 'uint8', 0)\n",
"\n",
" cls.chan16 = ChannelResource(\n",
" 'myVol16bitChan', cls.coll.name, cls.exp.name, 'image',\n",
" '16 bit test channel', 0, 'uint16', 0)\n",
"\n",
" cls.ann_chan = ChannelResource(\n",
" 'annVolChan2', cls.coll.name, cls.exp.name, 'annotation',\n",
" 'annotation test channel', 0, 'uint64', 0, sources=[cls.chan.name])\n",
"\n",
" # This channel reserved for testing get_ids_in_region(). This is a\n",
" # separate channel so we don't have to worry about ids written by\n",
" # other tests.\n",
" cls.ann_region_chan = ChannelResource(\n",
" 'annRegionChan2', cls.coll.name, cls.exp.name, 'annotation',\n",
" 'annotation ids in region test channel', 0, 'uint64', 0,\n",
" sources=[cls.chan.name])\n",
"\n",
" # This channel reerved for testing tight bounding boxes.\n",
" cls.ann_bounding_chan = ChannelResource(\n",
" 'annRegionChan3', cls.coll.name, cls.exp.name, 'annotation',\n",
" 'annotation ids in bounding box test channel', 0, 'uint64', 0,\n",
" sources=[cls.chan.name])\n",
"\n",
" cls.rmt.create_project(cls.coll)\n",
" cls.rmt.create_project(cls.coord)\n",
" cls.rmt.create_project(cls.exp)\n",
" cls.rmt.create_project(cls.chan16)\n",
" cls.rmt.create_project(cls.chan)\n",
" cls.rmt.create_project(cls.ann_chan)\n",
" cls.rmt.create_project(cls.ann_region_chan)\n",
" cls.rmt.create_project(cls.ann_bounding_chan)\n",
"\n",
" @classmethod\n",
" def tearDownClass(cls):\n",
" \"\"\"Clean up the data model objects used by this test case.\n",
"\n",
" This method is used by both tearDownClass() and setUpClass().\n",
" \"\"\"\n",
" try:\n",
" cls.rmt.delete_project(cls.ann_bounding_chan)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.ann_region_chan)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.ann_chan)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.chan16)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.chan)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.exp)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.coord)\n",
" except HTTPError:\n",
" pass\n",
" try:\n",
" cls.rmt.delete_project(cls.coll)\n",
" except HTTPError:\n",
" pass\n",
"\n",
" def setUp(self):\n",
" self.rmt = BossRemote('test.cfg')\n",
"\n",
" def tearDown(self):\n",
" pass\n",
"\n",
" def test_reserve_ids(self):\n",
" first_id = self.rmt.reserve_ids(self.ann_chan, 20)\n",
" self.assertTrue(first_id > 0)\n",
"\n",
" def test_get_bounding_box_id_doesnt_exist(self):\n",
" resolution = 0\n",
" id = 12345678\n",
" with self.assertRaises(HTTPError) as err:\n",
" self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')\n",
" expected_msg_prefix = 'Reserve ids failed'\n",
" self.assertTrue(err.message.startwswith(expected_msg_prefix))\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_bounding_box_spans_cuboids_in_x(self):\n",
" x_rng = [511, 515]\n",
" y_rng = [0, 8]\n",
" z_rng = [0, 5]\n",
" t_rng = [0, 1]\n",
"\n",
" id = 77555\n",
"\n",
" data = numpy.zeros((5, 8, 4), dtype='uint64')\n",
" data[1][0][0] = id\n",
" data[2][1][1] = id\n",
" data[3][2][3] = id\n",
"\n",
" resolution = 0\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" expected = {\n",
" 'x_range': [0, 1024],\n",
" 'y_range': [0, 512],\n",
" 'z_range': [0, 16],\n",
" 't_range': [0, 1]\n",
" }\n",
"\n",
" actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_bounding_box_spans_cuboids_in_y(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [511, 515]\n",
" z_rng = [0, 5]\n",
" t_rng = [0, 1]\n",
"\n",
" id = 77666\n",
"\n",
" data = numpy.zeros((5, 4, 8), dtype='uint64')\n",
" data[1][0][0] = id\n",
" data[2][1][0] = id\n",
" data[3][2][0] = id\n",
"\n",
" resolution = 0\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" expected = {\n",
" 'x_range': [0, 512],\n",
" 'y_range': [0, 1024],\n",
" 'z_range': [0, 16],\n",
" 't_range': [0, 1]\n",
" }\n",
"\n",
" actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_bounding_box_spans_cuboids_in_z(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [30, 35]\n",
" t_rng = [0, 1]\n",
"\n",
" id = 77888\n",
"\n",
" data = numpy.zeros((5, 4, 8), dtype='uint64')\n",
" data[1][0][0] = id\n",
" data[2][1][0] = id\n",
" data[3][2][0] = id\n",
"\n",
" resolution = 0\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual = self.rmt.get_cutout(self.ann_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" expected = {\n",
" 'x_range': [0, 512],\n",
" 'y_range': [0, 512],\n",
" 'z_range': [16, 48],\n",
" 't_range': [0, 1]\n",
" }\n",
"\n",
" actual = self.rmt.get_bounding_box(self.ann_chan, resolution, id, 'loose')\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_tight_bounding_box_x_axis(self):\n",
" \"\"\"Test tight bounding box with ids that span three cuboids along the x axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [511, 1025]\n",
" y_rng = [512, 1024]\n",
" z_rng = [16, 32]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((16, 512, 514), dtype='uint64')\n",
"\n",
" x_id = 123\n",
" y_id = 127\n",
" z_id = 500000000000000000\n",
"\n",
" # Id in partial region on x axis closest to origin.\n",
" data[1][1][0] = x_id\n",
" # Id in partial region on x axis furthest from origin.\n",
" data[1][1][513] = x_id\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[2][2][21] = x_id\n",
" data[2][1][22] = y_id\n",
" data[4][24][72] = z_id\n",
"\n",
" expected = {'x_range': [511, 1025], 'y_range': [513, 515], 'z_range': [17, 19]}\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_bounding_box(\n",
" self.ann_bounding_chan, resolution, x_id, bb_type='tight')\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_tight_bounding_box_y_axis(self):\n",
" \"\"\"Test tight bounding box with ids that span three cuboids along the x axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [512, 1024]\n",
" y_rng = [511, 1025]\n",
" z_rng = [16, 32]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((16, 514, 512), dtype='uint64')\n",
"\n",
" x_id = 123\n",
" y_id = 127\n",
" z_id = 500000000000000000\n",
"\n",
" # Id in partial region on y axis closest to origin.\n",
" data[1][0][10] = y_id\n",
" # Id in partial region on y axis furthest from origin.\n",
" data[1][513][13] = y_id\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[2][2][21] = y_id\n",
" data[2][3][20] = x_id\n",
" data[4][25][71] = z_id\n",
"\n",
" expected = {'x_range': [522, 526], 'y_range': [511, 1025], 'z_range': [17, 19]}\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_bounding_box(\n",
" self.ann_bounding_chan, resolution, y_id, bb_type='tight')\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_tight_bounding_box_z_axis(self):\n",
" \"\"\"Test tight bounding box with ids that span three cuboids along the x axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [512, 1024]\n",
" y_rng = [512, 1024]\n",
" z_rng = [15, 33]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((18, 512, 512), dtype='uint64')\n",
"\n",
" x_id = 123\n",
" y_id = 127\n",
" z_id = 500000000000000000\n",
"\n",
" # Id in partial region on z axis closest to origin.\n",
" data[0][22][60] = z_id\n",
" # Id in partial region on z axis furthest from origin.\n",
" data[17][23][63] = z_id\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[5][24][71] = z_id\n",
" data[3][2][20] = x_id\n",
" data[3][1][21] = y_id\n",
"\n",
" expected = {'x_range': [572, 583], 'y_range': [534, 537], 'z_range': [15, 33]}\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_bounding_box(\n",
" self.ann_bounding_chan, resolution, z_id, bb_type='tight')\n",
"\n",
" def test_get_ids_in_region_none(self):\n",
" \"\"\"Run on region that hasn't been written with ids, yet.\"\"\"\n",
" resolution = 0\n",
" x_rng = [1536, 1540]\n",
" y_rng = [1536, 1540]\n",
" z_rng = [48, 56]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((8, 4, 4), dtype='uint64')\n",
"\n",
" expected = []\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_bounding_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_ids_in_region(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" def test_filtered_cutout(self):\n",
" \"\"\"Test filtered cutout using same data written for get_ids_in_region_x_axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [511, 1025]\n",
" y_rng = [512, 1024]\n",
" z_rng = [16, 32]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((16, 512, 514), dtype='uint64')\n",
"\n",
" # Id in partial region on x axis closest to origin.\n",
" data[1][1][0] = 123\n",
" # Id in partial region on x axis furthest from origin.\n",
" data[1][1][513] = 321\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[10][20][21] = 55555\n",
"\n",
" expected = [123, 321, 55555]\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Should get back the exact data given in create_cutout().\n",
" filtered_data1 = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng,\n",
" id_list=[123, 321, 55555])\n",
" numpy.testing.assert_array_equal(data, filtered_data1)\n",
"\n",
" # Filter on id 123.\n",
" expected_data_123 = numpy.zeros((16, 512, 514), dtype='uint64')\n",
" expected_data_123[1][1][0] = 123\n",
"\n",
" filtered_data_123 = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, id_list=[123])\n",
" numpy.testing.assert_array_equal(expected_data_123, filtered_data_123)\n",
"\n",
" # Filter on id 321.\n",
" expected_data_321 = numpy.zeros((16, 512, 514), dtype='uint64')\n",
" expected_data_321[1][1][513] = 321\n",
"\n",
" filtered_data_321 = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, id_list=[321])\n",
" numpy.testing.assert_array_equal(expected_data_321, filtered_data_321)\n",
"\n",
" # Filter on ids 123 and 55555.\n",
" expected_data_123_55555 = numpy.zeros((16, 512, 514), dtype='uint64')\n",
" expected_data_123_55555[1][1][0] = 123\n",
" expected_data_123_55555[10][20][21] = 55555\n",
"\n",
" filtered_data_123_55555 = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng,\n",
" id_list=[123, 55555])\n",
" numpy.testing.assert_array_equal(\n",
" expected_data_123_55555, filtered_data_123_55555)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_ids_in_region_x_axis(self):\n",
" \"\"\"Test using a region that's cuboid aligned except for the x axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [511, 1025]\n",
" y_rng = [512, 1024]\n",
" z_rng = [16, 32]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((16, 512, 514), dtype='uint64')\n",
"\n",
" # Id in partial region on x axis closest to origin.\n",
" data[1][1][0] = 123\n",
" # Id in partial region on x axis furthest from origin.\n",
" data[1][1][513] = 321\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[10][20][21] = 55555\n",
"\n",
" expected = [123, 321, 55555]\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_ids_in_region(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_ids_in_region_y_axis(self):\n",
" \"\"\"Test using a region that's cuboid aligned except for the y axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [512, 1024]\n",
" y_rng = [511, 1025]\n",
" z_rng = [16, 32]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((16, 514, 512), dtype='uint64')\n",
"\n",
" # Id in partial region on y axis closest to origin.\n",
" data[1][0][1] = 456\n",
" # Id in partial region on y axis furthest from origin.\n",
" data[1][513][1] = 654\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[10][21][20] = 55555\n",
"\n",
" # expected = [123, 321, 456, 654, 789, 987, 55555]\n",
" expected = [456, 654, 55555]\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_ids_in_region(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
" @unittest.skip('Skipping - currently indexing disabled')\n",
" def test_get_ids_in_region_z_axis(self):\n",
" \"\"\"Test using a region that's cuboid aligned except for the z axis.\"\"\"\n",
" resolution = 0\n",
" x_rng = [512, 1024]\n",
" y_rng = [512, 1024]\n",
" z_rng = [15, 33]\n",
" t_rng = [0, 1]\n",
"\n",
" data = numpy.zeros((18, 512, 512), dtype='uint64')\n",
"\n",
" # Id in partial region on z axis closest to origin.\n",
" data[0][1][1] = 789\n",
" # Id in partial region on z axis furthest from origin.\n",
" data[17][1][1] = 987\n",
"\n",
" # Id in cuboid aligned region.\n",
" data[11][20][20] = 55555\n",
"\n",
" expected = [789, 987, 55555]\n",
"\n",
" self.rmt.create_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng, data)\n",
"\n",
" # Get cutout to make sure data is done writing and indices updated.\n",
" actual_data = self.rmt.get_cutout(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual_data)\n",
"\n",
" # Method under test.\n",
" actual = self.rmt.get_ids_in_region(\n",
" self.ann_region_chan, resolution, x_rng, y_rng, z_rng)\n",
"\n",
" self.assertEqual(expected, actual)\n",
"\n",
"\n",
" def test_upload_and_download_to_channel(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
"\n",
" data = numpy.random.randint(1, 254, (5, 4, 8))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" def test_upload_and_download_to_channel_with_time(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
" t_rng = [3, 6]\n",
"\n",
" data = numpy.random.randint(1, 254, (3, 5, 4, 8))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" def test_upload_and_download_subsection_to_channel(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" sub_x = [12, 14]\n",
" sub_y = [7, 10]\n",
" sub_z = [12, 17]\n",
"\n",
" data = numpy.random.randint(1, 10, (9, 5, 10))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.chan, 0, sub_x, sub_y, sub_z)\n",
" numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)\n",
"\n",
" def test_upload_to_x_edge_of_channel(self):\n",
" x_rng = [10, 2048]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_y_edge_of_channel(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 2048]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_z_edge_of_channel(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 100]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_x_edge_of_channel(self):\n",
" x_rng = [10, 2049]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_y_edge_of_channel(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 2049]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_z_edge_of_channel(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 101]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_and_download_to_channel_16bit(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.chan16, 0, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" def test_upload_and_download_subsection_to_channel_16bit(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" sub_x = [12, 14]\n",
" sub_y = [7, 10]\n",
" sub_z = [12, 17]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.chan16, 0, sub_x, sub_y, sub_z)\n",
" numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)\n",
"\n",
" def test_upload_to_x_edge_of_channel_16bit(self):\n",
" x_rng = [2000, 2048]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_y_edge_of_channel_16bit(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [2000, 2048]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_z_edge_of_channel_16bit(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 100]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_x_edge_of_channel_16bit(self):\n",
" x_rng = [2000, 2049]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_y_edge_of_channel_16bit(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [2000, 2049]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_z_edge_of_channel_16bit(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 101]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint16)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.chan16, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_and_download_to_anno_chan(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" def test_upload_and_download_subsection_to_anno_chan(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" sub_x = [12, 14]\n",
" sub_y = [7, 10]\n",
" sub_z = [12, 17]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
" actual = self.rmt.get_cutout(self.ann_chan, 0, sub_x, sub_y, sub_z)\n",
" numpy.testing.assert_array_equal(data[2:7, 2:5, 2:4], actual)\n",
"\n",
" def test_upload_to_x_edge_of_anno_chan(self):\n",
" x_rng = [2000, 2048]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_y_edge_of_anno_chan(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [2000, 2048]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_to_z_edge_of_anno_chan(self):\n",
" x_rng = [10, 100]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 100]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_x_edge_of_anno_chan(self):\n",
" x_rng = [10, 2049]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_y_edge_of_anno_chan(self):\n",
" x_rng = [10, 991]\n",
" y_rng = [5, 2049]\n",
" z_rng = [10, 19]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_past_z_edge_of_anno_chan(self):\n",
" x_rng = [10, 20]\n",
" y_rng = [5, 10]\n",
" z_rng = [10, 101]\n",
"\n",
" shape = (z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint64)\n",
"\n",
" with self.assertRaises(HTTPError):\n",
" self.rmt.create_cutout(self.ann_chan, 0, x_rng, y_rng, z_rng, data)\n",
"\n",
" def test_upload_and_download_to_channel_4D(self):\n",
" x_rng = [600, 680]\n",
" y_rng = [600, 640]\n",
" z_rng = [50, 55]\n",
" t_rng = [0, 1]\n",
"\n",
" shape = (t_rng[1]-t_rng[0], z_rng[1]-z_rng[0], y_rng[1]-y_rng[0], x_rng[1]-x_rng[0])\n",
"\n",
" data = numpy.random.randint(1, 10, shape)\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)\n",
" numpy.testing.assert_array_equal(data, actual)\n",
"\n",
" def test_upload_and_cutout_to_black(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
"\n",
" data = numpy.random.randint(1, 254, (5, 4, 8))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
" self.rmt.create_cutout_to_black(self.chan, 0, x_rng, y_rng, z_rng)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(numpy.zeros((5,4,8)), actual)\n",
"\n",
" def test_upload_and_cutout_to_black_with_time(self):\n",
" x_rng = [0, 8]\n",
" y_rng = [0, 4]\n",
" z_rng = [0, 5]\n",
" t_rng = [3, 6]\n",
"\n",
" data = numpy.random.randint(1, 254, (3, 5, 4, 8))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data, time_range=t_rng)\n",
" self.rmt.create_cutout_to_black(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng, time_range=t_rng)\n",
" numpy.testing.assert_array_equal(numpy.zeros((3, 5, 4, 8)), actual)\n",
"\n",
" def test_upload_and_cutout_to_black_partial(self):\n",
" x_rng = [0, 1024]\n",
" y_rng = [0, 1024]\n",
" z_rng = [0, 5]\n",
"\n",
" x_rng_black = [0, 256]\n",
" y_rng_black = [0, 512]\n",
" z_rng_black = [2,3] \n",
"\n",
" data = numpy.random.randint(1, 254, (5, 1024, 1024))\n",
" data = data.astype(numpy.uint8)\n",
"\n",
" expected = numpy.copy(data)\n",
" expected[2:3, 0:512, 0:256] = 0\n",
"\n",
" self.rmt.create_cutout(self.chan, 0, x_rng, y_rng, z_rng, data)\n",
" self.rmt.create_cutout_to_black(self.chan, 0, x_rng_black, y_rng_black, z_rng_black)\n",
" actual = self.rmt.get_cutout(self.chan, 0, x_rng, y_rng, z_rng)\n",
" numpy.testing.assert_array_equal(expected, actual)\n",
"\n",
"if __name__ == '__main__':\n",
" unittest.main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0.011111111111111112,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.028169014084507043,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.010752688172043012,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06896551724137931,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0.037037037037037035,
0
] | 963 | 0.00047 | false |
# Copyright 2015 Altova GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__copyright__ = 'Copyright 2015 Altova GmbH'
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
# Calculates financial statements and ratios from SEC filings in the given RSS feed and adds them to a database.
#
# Usage:
# raptorxmlxbrl script scripts/build_secdb.py feeds/xbrlrss-2015-*.xml --db=sec2015.db3
import feed_tools
import re,csv,json,glob,enum,datetime,argparse,logging,itertools,os.path,urllib,threading,concurrent.futures,timeit,calendar
from altova_api.v2 import xml, xsd, xbrl
class Summations(dict):
def __missing__(self, key):
return 0
gsRootDir = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])
reports = json.load(open(os.path.join(gsRootDir,'data','reports.json')))
reports['balance'].update({'kind': 'balance', 'name': 'Balance Sheet', 'mappings': json.load(open(os.path.join(gsRootDir,'data','balance_mappings.json')))})
reports['income'].update({'kind': 'income', 'name': 'Income Statement', 'mappings': json.load(open(os.path.join(gsRootDir,'data','income_mappings.json')))})
reports['cashflow'].update({'kind': 'cashflow', 'name': 'Cashflow Statement', 'mappings': json.load(open(os.path.join(gsRootDir,'data','cashflow_mappings.json')))})
def setup_db_connect(driver,name):
"""Returns a function object that can be used to connect to the DB. The function doesn't require any additional parameters as it stores the DB name/connection string using closure."""
logger.info('Using %s DB with DSN=%s',driver,name)
def connect_sqlite():
con = sqlite3.connect(name,isolation_level=None)
con.execute('PRAGMA journal_mode=WAL')
return con
def connect_odbc():
return pyodbc.connect(name)
if driver == 'sqlite':
import sqlite3
return connect_sqlite
elif driver == 'odbc':
import pyodbc
return connect_odbc
def create_db_tables():
"""Create all the necessary DB tables."""
logger.info('Creating DB tables')
try:
with db_connect() as con:
cur = con.cursor()
cur.execute("""
CREATE TABLE tickers (
symbol VARCHAR(10) PRIMARY KEY,
cikNumber INTEGER
);""")
cur.execute("""
CREATE TABLE filings (
accessionNumber CHAR(20) PRIMARY KEY,
cikNumber INTEGER,
companyName TEXT,
formType TEXT,
filingDate DATETIME,
fileNumber TEXT,
acceptanceDatetime DATETIME,
period DATE,
assistantDirector TEXT,
assignedSic INTEGER,
otherCikNumbers TEXT,
fiscalYearEnd INTEGER,
instanceUrl TEXT,
errors TEXT
);""")
cur.execute("""
CREATE TABLE facts (
accessionNumber CHAR(20),
report TEXT,
pos SMALLINT,
lineitem TEXT,
label TEXT,
namespace TEXT,
name TEXT,
value TEXT,
level SMALLINT,
is_abstract BOOLEAN,
is_total BOOLEAN,
is_negated BOOLEAN,
PRIMARY KEY (accessionNumber,report,pos)
);""")
cur.execute("""
CREATE TABLE balance_sheet (
accessionNumber CHAR(20) PRIMARY KEY,
cikNumber INTEGER,
endDate DATE,
currencyCode CHAR(3),
%s
);""" % ','.join(key+' BIGINT' for key in reports['balance']['lineitems']))
cur.execute("""
CREATE TABLE income_statement (
accessionNumber CHAR(20),
cikNumber INTEGER,
endDate DATE,
duration INTEGER,
currencyCode CHAR(3),
%s,
PRIMARY KEY (accessionNumber,duration)
);""" % ','.join(key+' BIGINT' for key in reports['income']['lineitems']))
cur.execute("""
CREATE TABLE cashflow_statement (
accessionNumber CHAR(20),
cikNumber INTEGER,
endDate DATE,
duration INTEGER,
currencyCode CHAR(3),
%s,
PRIMARY KEY (accessionNumber,duration)
);""" % ','.join(key+' BIGINT' for key in reports['cashflow']['lineitems']))
cur.execute("""
CREATE TABLE ratios (
accessionNumber CHAR(20),
cikNumber INTEGER,
endDate DATE,
kind CHAR(3),
%s,
PRIMARY KEY (accessionNumber,kind)
);""" % ','.join(key+' REAL' for key in reports['ratios']['lineitems']))
con.commit()
except:
logger.exception('Failed creating DB tables')
raise RuntimeError('Failed creating DB tables')
def create_db_indices():
"""Create all the necessary DB indices."""
logger.info('Creating DB indices')
try:
with db_connect() as con:
cur = con.cursor()
# Create indices
cur.execute('CREATE INDEX income_cik ON income_statement (cikNumber);')
cur.execute('CREATE INDEX balance_cik ON balance_sheet (cikNumber);')
cur.execute('CREATE INDEX cashflow_cik ON cashflow_statement (cikNumber);')
cur.execute('CREATE INDEX ratios_cik ON ratios (cikNumber);')
cur.execute('CREATE INDEX filings_cik ON filings (cikNumber);')
cur.execute('CREATE INDEX filings_company ON filings (companyName);')
con.commit()
except:
logger.exception('Failed creating DB indices')
raise RuntimeError('Failed creating DB indices')
def load_ticker_symbols():
"""Returns a dict of CIK to ticker symbol."""
logger.info('Loading ticker file %s','tickers.csv')
tickers = {}
with open(os.path.join(gsRootDir,'data','tickers.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
tickers[int(row[1])] = row[0].split('^')[0]
return tickers
def insert_ticker_symbols(tickers):
"""Writes ticker symbol and CIK pairs to the DB."""
with db_connect() as con:
for cik, symbol in tickers.items():
con.execute('INSERT INTO tickers VALUES (?,?)',(symbol,cik))
re_usgaap = re.compile('^http://[^/]+/us-gaap/')
re_dei = re.compile('^http://xbrl.us/dei/|^http://xbrl.sec.gov/dei/')
def find_std_namespaces(dts):
"""Returns a tuple with the us-gaap and dei namespaces imported in this company extension taxonomy."""
usgaap_ns, dei_ns = None, None
# Iterate through all taxonomy schemas within the DTS and compare the target namespaces
for taxonomy in dts.taxonomy_schemas:
if re_usgaap.match(taxonomy.target_namespace):
usgaap_ns = taxonomy.target_namespace
elif re_dei.match(taxonomy.target_namespace):
dei_ns = taxonomy.target_namespace
return (usgaap_ns, dei_ns)
parts_empty_re = re.compile(r"[`,'.]")
parts_space_re = re.compile(r"[\][{}():/&-]")
def classify_linkrole(definition):
"""Returns the type of report based on the roleType definition string."""
# According to EDGAR Filer Manual rule 6.7.12 definition strings must follow the following syntax:
# {SortCode} - {Type} - {Title}
# where {Type} is one of Disclosure, Document, Schedule or Statement
definition_parts = definition.split(' - ')
if len(definition_parts) >= 3 and definition_parts[1] == 'Statement':
# Remove any punctuation signs
words = parts_empty_re.sub('', parts_space_re.sub(' ', ' '.join(definition_parts[2:]).upper())).split()
# Skip any parenthetical and other supporting statements
if any(word in words for word in ('PARENTHETHICAL', 'PARENTHETCIAL', 'PARATHETICAL', 'PARATHENTICALS', 'PARENTHETIC', 'PARENTHETICAL', 'PARENTHETICALS', 'PARANTHETICAL', 'PARANTHETICALS', 'PARENTHICAL', 'PARENTHICALS', 'PARENTHTICAL', 'NOTE', 'DISCLOSURE', 'FOOTNOTES', 'DETAILS')):
return 'other'
# Check for cash flow statement
if 'CASHFLOW' in words or 'CASHFLOWS' in words:
return 'cashflow'
if 'CASH' in words and ('FLOW' in words or 'FLOWN' in words or 'FLOWS' in words or 'RECEIPTS' in words):
return 'cashflow'
# Check for income statement
if ('INCOME' in words and 'CHANGES' not in words and 'TAX' not in words and 'TAXES' not in words) or 'PROFIT' in words or 'EARNINGS' in words or 'REVENUES' in words or 'OPERATION' in words or 'OPERATIONS' in words or 'EXPENSES' in words or 'LOSS' in words or 'LOSSES' in words:
return 'income'
# Check for other naming alternatives for cash flow statement
if 'CHANGES' in words and (('NET' in words and 'ASSETS' in words) or 'CAPITAL' in words or 'TRUST' in words):
return 'cashflow'
# Check for balance sheet statement
if ('BALANCE' in words or 'BALANCES' in words) and ('SHEET' in words or 'SHEETS' in words or 'SHEEETS' in words):
return 'balance'
if 'FINANCIAL' in words or 'POSITION' in words or 'POSITIONS' in words or 'CONDITION' in words or 'CONDITIONS' in words:
return 'balance'
if 'ASSETS' in words or 'LIABILITIES' in words:
return 'balance'
return 'other'
def definition_string(dts,linkrole):
role_type = dts.role_type(linkrole)
if role_type:
definition = role_type.definition
if definition:
return definition.value
return None
def classify_presentation_link_roles(dts):
"""Returns a dict containing a list of linkroles for each kind of financial statement."""
linkroles = {kind: [] for kind in ('balance','income','cashflow','other')}
for linkrole in dts.presentation_link_roles():
definition = definition_string(dts,linkrole)
if definition:
kind = classify_linkrole(definition)
linkroles[kind].append(linkrole)
for kind in ('balance','income','cashflow'):
if len(linkroles[kind]) > 1:
filtered = []
for linkrole in linkroles[kind]:
definition = ' '.join(definition_string(dts,linkrole).split(' - ')[2:]).upper()
if 'COMPREHESIVE' not in definition and 'COMPREHENSIVE' not in definition and 'SUPPLEMENTAL' not in definition:
filtered.append(linkrole)
filtered.sort(key=lambda linkrole:int(definition_string(dts,linkrole).split(' - ')[0]))
linkroles[kind] = filtered
return linkroles
def find_required_context(instance,dei_ns):
"""Returns the required context for the main reporting period."""
# According to EDGAR Filter Manual rule 6.5.20 the Required Document Information elements must be reported at least with the Required Context.
# Required contexts can be distinguished by an absent xbrli:segment element.
documentPeriodEndDates = instance.facts.filter(xml.QName('DocumentPeriodEndDate',dei_ns))
for fact in documentPeriodEndDates:
if not fact.context.entity.segment:
return fact.context
return None
def find_required_instant_context(instance,instant):
"""Returns the required instant context (with absent xbrli:segment element) for the given date."""
for context in instance.contexts:
if context.period.is_instant() and context.period.instant.value == instant and not context.entity.segment:
return context
return None
def find_dimension_contexts(instance,context,dimensions):
"""Returns a list of contexts containing the given dimension values and having the same period as the given context."""
contexts = []
for dimcontext in instance.contexts:
if dimcontext.period_aspect_value == context.period_aspect_value and dimcontext.entity_identifier_aspect_value == context.entity_identifier_aspect_value:
dim_values = list(dimcontext.dimension_aspect_values)
if dim_values:
matching_context = True
for dim in dim_values:
if dim.dimension not in dimensions or dim.value not in dimensions[dim.dimension]:
matching_context = False
break
if matching_context:
contexts.append(dimcontext)
return contexts
def find_fact_value(instance, concept, context):
"""Returns the fact value found for the given concept and context."""
if context:
facts = instance.facts.filter(concept, context)
for fact in facts:
# Check for xsi:nil facts
if fact.xsi_nil:
continue
return fact.normalized_value
return None
def find_numeric_value(instance, concept, context):
"""Returns the fact numeric value found for the given concept and context."""
# Ignore non-numeric facts
if concept.is_numeric() and context:
facts = instance.facts.filter(concept, context)
for fact in facts:
# Check for xsi:nil facts
if fact.xsi_nil:
continue
return fact.effective_numeric_value
return None
def find_monetary_value(instance, concept, context, currency):
"""Returns the fact value found for the given concept, context and currency."""
# Ignore non-monetary facts
if concept.is_monetary() and context:
facts = instance.facts.filter(concept, context)
for fact in facts:
# Check for xsi:nil facts
if fact.xsi_nil:
continue
# Ignore facts reported with other currency units
unit = fact.unit_aspect_value
if unit.iso4217_currency == currency:
return int(fact.effective_numeric_value)
return None
def descendants(network,root,include_self=False):
"""Returns a list of all descendant concepts form the given root."""
def _descendants(network,root,concepts):
concepts.append(root)
for rel in network.relationships_from(root):
_descendants(network,rel.target,concepts)
concepts = []
if include_self:
concepts.append(root)
for rel in network.relationships_from(root):
_descendants(network,rel.target,concepts)
return concepts
def presentation_concepts(dts,linkrole):
"""Returns a tuple with a list of all primary items and a dict of dimension domain values featured in the network of presentation relationships for the given linkrole."""
def _presentation_concepts(network,concept,preferred_label_role,level,concepts,dimensions):
if isinstance(concept,xbrl.xdt.Dimension):
dimensions[concept] = set(descendants(network,concept))
return
if isinstance(concept,xbrl.xdt.Hypercube):
level -= 1
else:
concepts.append((concept,preferred_label_role,level))
for rel in network.relationships_from(concept):
_presentation_concepts(network,rel.target,rel.preferred_label,level+1,concepts,dimensions)
concepts = []
dimensions = {}
network = dts.presentation_base_set(linkrole).network_of_relationships()
for root in network.roots:
_presentation_concepts(network,root,None,0,concepts,dimensions)
return concepts, dimensions
def concept_label(concept,label_role):
labels = list(concept.labels(label_role=label_role))
if not labels:
return None
return labels[0].text
def is_total_role(preferred_label_role):
if preferred_label_role:
return 'total' in preferred_label_role.lower()
return False
def is_negated_role(preferred_label_role):
if preferred_label_role:
return 'negated' in preferred_label_role.lower()
return False
def is_start_role(preferred_label_role):
if preferred_label_role:
return 'periodstart' in preferred_label_role.lower()
return False
def is_end_role(preferred_label_role):
if preferred_label_role:
return 'periodend' in preferred_label_role.lower()
return False
def find_presentation_linkbase_values(filing, report, instance, linkrole, context, currency):
"""Returns a dict from concept name to fact value for all monetary concepts appearing in the presentation linkbase for the given linkrole."""
dim_contexts = []
dim_contexts_stock = []
# Get all concepts and dimensions in the presentation linkbase for the given linkrole
concepts, dimensions = presentation_concepts(instance.dts,linkrole)
if dimensions:
dim_contexts = find_dimension_contexts(instance,context,{dim: dimensions[dim] for dim in dimensions if dim.name not in ('LegalEntityAxis','StatementClassOfStockAxis')})
dim_contexts_stock = find_dimension_contexts(instance,context,{dim: dimensions[dim] for dim in dimensions if dim.name == 'StatementClassOfStockAxis'})
fact_values = {}
for i, (concept, preferred_label_role, level) in enumerate(concepts):
# Skip abstract and non-monetary concepts
if concept.abstract:
value = None
elif concept.is_monetary():
values = []
# Try to find a value with the main required context
value = find_monetary_value(instance, concept, context, currency)
if value is not None:
values.append(value)
else:
# If the concept is reported only with a dimensional breakdown, sum over all dimension domain members
for dim_context in dim_contexts:
value = find_monetary_value(instance, concept, dim_context, currency)
if value is not None:
values.append(value)
# Exception for StatementClassOfStockAxis dimension: Add the sum over all dimension domain members to the value reported without dimensions
for dim_context in dim_contexts_stock:
value = find_monetary_value(instance, concept, dim_context, currency)
if value is not None:
values.append(value)
value = sum(values) if values else None
if value:
fact_values[concept.name] = {'pos': i, 'concept': concept, 'value': value}
else:
if is_start_role(preferred_label_role) and context.period.is_duration():
value = find_monetary_value(instance, concept, find_required_instant_context(instance, context.period.start_date.value), currency)
elif is_end_role(preferred_label_role) and context.period.is_duration():
value = find_monetary_value(instance, concept, find_required_instant_context(instance, context.period.end_date.value), currency)
elif concept.is_numeric():
value = find_numeric_value(instance, concept, context)
else:
value = find_fact_value(instance, concept, context)
# Insert fact value to DB
if args.store_fact_mappings:
with db_connect() as con:
con.execute('INSERT INTO facts VALUES(?,?,?,?,?,?,?,?,?,?,?,?)',(filing['accessionNumber'],report['kind'],i,None,concept_label(concept,preferred_label_role),concept.target_namespace,concept.name,str(value),level,concept.abstract,is_total_role(preferred_label_role),is_negated_role(preferred_label_role)))
con.commit()
return fact_values
def walk_calc_tree(filing,report,instance,network,concept,weight,fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts):
"""Iterates over the concepts in the calculation tree and adds them to the appropriate report line items. If an unknown concept is encountered, it is added to the "other" line item of the current breakdown."""
if concept in visited_concepts:
visited_concepts.update(descendants(network,concept))
return
visited_concepts.add(concept)
lineitem = None
child_rels = list(network.relationships_from(concept))
value = fact_values.get(concept.name)
current_mapping = report['mappings'].get(concept.name)
if current_mapping:
if 'add-to' in current_mapping:
lineitem = current_mapping['add-to'][0]
for x in current_mapping['add-to']:
if x in allowed_lineitems:
lineitem = x
break
elif 'total' in current_mapping:
lineitem = current_mapping['total']
if lineitem and lineitem not in allowed_lineitems:
# log error
filing_logger.warning('%s: Concept %s is not expected to occur within breakdown of %s',report['name'],concept.qname,next(network.relationships_to(concept)).source.qname)
lineitem = None
allowed_lineitems = allowed_lineitems & set(current_mapping['allowed'] if 'allowed' in current_mapping else [lineitem])
if len(allowed_lineitems) == 0:
# log error
filing_logger.warning('%s: Concept %s is not expected to occur within breakdown of %s',report['name'],concept.qname,next(network.relationships_to(concept)).source.qname)
if 'other' in current_mapping:
other_lineitem = current_mapping.get('other')
if value:
if not lineitem and not child_rels:
lineitem = other_lineitem
if lineitem:
# Insert mapping to DB
if args.store_fact_mappings:
with db_connect() as con:
con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(lineitem,filing['accessionNumber'],report['kind'],value['pos']))
con.commit()
if 'total' in current_mapping:
if lineitem in lineitem_values:
# error if already set
filing_logger.error('%s: Overwriting already set total value of concept %s',report['name'],concept.qname)
lineitem_values[lineitem] = weight * value['value']
else:
lineitem_values[lineitem] += weight * value['value']
visited_concepts.update(descendants(network,concept))
return
elif not child_rels:
# log error
filing_logger.error('%s: Ignored value of inconsistent concept %s',report['name'],concept.qname)
else:
if value and not child_rels:
if other_lineitem:
# Insert mapping to DB
if args.store_fact_mappings:
with db_connect() as con:
con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(other_lineitem,filing['accessionNumber'],report['kind'],value['pos']))
con.commit()
# log unknown concept
filing_logger.warning('%s: Added value of unknown concept %s to %s',report['name'],concept.qname,other_lineitem)
lineitem_values[other_lineitem] += weight * value['value']
else:
# log error
filing_logger.error('%s: Ignored value of unknown concept %s',report['name'],concept.qname)
visited_concepts.update(descendants(network,concept))
return
if concept.name == 'Assets':
rel_current = None
for rel in child_rels:
if rel.target.name == 'AssetsCurrent':
rel_current = rel
break
if rel_current:
allowed_lineitems = set(['cashAndCashEquivalents','shortTermInvestments','cashAndShortTermInvestments','receivablesNet','inventory','currentAssetsOther','currentAssetsTotal'])
other_lineitem = 'currentAssetsOther'
for rel in child_rels:
walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)
if rel == rel_current:
allowed_lineitems = set(['longTermInvestments','propertyPlantAndEquipmentGross','accumulatedDepreciation','propertyPlantAndEquipmentNet','goodwill','intangibleAssets','nonCurrrentAssetsOther','deferredLongTermAssetCharges','nonCurrentAssetsTotal'])
other_lineitem = 'nonCurrrentAssetsOther'
return
if concept.name == 'Liabilities':
rel_current = None
for rel in child_rels:
if rel.target.name == 'LiabilitiesCurrent':
rel_current = rel
break
if rel_current:
allowed_lineitems = set(['accountsPayable','shortTermDebt', 'currentLiabilitiesOther', 'currentLiabilitiesTotal'])
other_lineitem = 'currentLiabilitiesOther'
for rel in child_rels:
walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)
if rel == rel_current:
allowed_lineitems = set(['longTermDebt','capitalLeaseObligations', 'longTermDebtTotal', 'deferredLongTermLiabilityCharges', 'nonCurrentLiabilitiesOther', 'nonCurrentLiabilitiesTotal'])
other_lineitem = 'nonCurrentLiabilitiesOther'
return
for rel in child_rels:
walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)
def calc_total_values(total_rules,lineitem_values,lineitem):
"""Calculates any missing (not directly reported) total values."""
if lineitem not in lineitem_values or lineitem_values[lineitem] is None:
values = []
negate = False
for summand in total_rules[lineitem]:
if summand == '-':
negate = True
continue
if summand in total_rules:
calc_total_values(total_rules,lineitem_values,summand)
if summand in lineitem_values and lineitem_values[summand] is not None:
values.append(-lineitem_values[summand] if negate else lineitem_values[summand])
lineitem_values[lineitem] = sum(values) if len(values) > 0 else None
def calc_report_values(filing,report,instance,linkrole,fact_values):
"""Returns a dict with the calculated values for each lineitem of the report."""
lineitem_values = Summations()
visited_concepts = set()
network = calculation_network(instance.dts,linkrole)
if network:
for root in network.roots:
walk_calc_tree(filing,report,instance,network,root,1,fact_values,lineitem_values,set(report['lineitems']),None,visited_concepts)
visited_concept_names = set(concept.name for concept in visited_concepts)
for concept_name, value in fact_values.items():
if concept_name not in visited_concept_names:
lineitem = None
current_mapping = report['mappings'].get(concept_name)
if current_mapping:
if 'add-to' in current_mapping:
lineitem = current_mapping['add-to'][0]
elif 'total' in current_mapping:
lineitem = current_mapping['total']
if lineitem:
if lineitem not in lineitem_values:
# Insert mapping to DB
if args.store_fact_mappings:
with db_connect() as con:
con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(lineitem,filing['accessionNumber'],report['kind'],value['pos']))
con.commit()
if lineitem == 'treasuryStockValue' and value['value'] > 0:
value['value'] *= -1
lineitem_values[lineitem] += value['value']
else:
# log error
filing_logger.warning('%s: Ignored value of concept %s outside of calculation tree to preserve totals',report['name'],value['concept'].qname)
else:
# log unknown concept
filing_logger.warning('%s: Ignored value of unknown concept %s outside of calculation tree',report['name'],value['concept'].qname)
# Set missing/not reported values to None
for lineitem in report['lineitems']:
if lineitem not in lineitem_values:
lineitem_values[lineitem] = None
for lineitem in report['totals']:
calc_total_values(report['totals'],lineitem_values,lineitem)
return lineitem_values
def calculation_network(dts,linkrole):
"""Returns an object representing the network of calculation relationships for the given linkrole."""
baseset = dts.calculation_base_set(linkrole)
if baseset:
return baseset.network_of_relationships()
else:
filing_logger.warning('No calculation linkbase found for linkrole %s',linkrole)
return None
def end_date(context):
"""Returns the end date specified as in the context as used in financial statements (e.g. ending Dec. 31, 2015 instead of Jan. 1)."""
period = context.period_aspect_value
if period.period_type == xbrl.PeriodType.INSTANT:
return period.instant.date() - datetime.timedelta(days=1)
elif period.period_type == xbrl.PeriodType.START_END:
return period.end.date() - datetime.timedelta(days=1)
else:
return datetime.date.max
def calc_balance_sheet(filing,instance,context,linkroles):
"""Calculate balance sheet line items from XBRL instance and store in DB."""
filing_logger.info('Calculate %s',reports['balance']['name'])
if not context:
filing_logger.error('Skipped %s: No required context found',reports['balance']['name'])
return
if len(linkroles) == 0:
filing_logger.error('Skipped %s: No linkrole found',reports['balance']['name'])
return
elif len(linkroles) > 1:
filing_logger.warning('%s: Multiple linkroles found: %s',reports['balance']['name'],','.join(linkroles))
linkrole = linkroles[0]
fact_values = find_presentation_linkbase_values(filing,reports['balance'],instance,linkrole,context,'USD')
values = calc_report_values(filing,reports['balance'],instance,linkrole,fact_values)
values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': end_date(context), 'currencyCode': 'USD'})
# Insert balance sheet into DB
with db_connect() as con:
db_fields = ['accessionNumber','cikNumber','endDate','currencyCode'] + reports['balance']['lineitems']
con.execute('INSERT INTO balance_sheet VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
def calc_income_statement(filing,instance,context,linkroles):
"""Calculate income line items from XBRL instance and store in DB."""
filing_logger.info('Calculate %s',reports['income']['name'])
if not context:
filing_logger.error('Skipped %s: No required context found',reports['income']['name'])
return
if len(linkroles) == 0:
filing_logger.error('Skipped %s: No linkrole found',reports['income']['name'])
return
elif len(linkroles) > 1:
filing_logger.warning('%s: Multiple linkroles found: %s',reports['income']['name'],','.join(linkroles))
linkrole = linkroles[0]
duration = 12 if filing['formType'] == '10-K' else 3
contexts = [context2 for context2 in instance.contexts if context2.period.is_start_end() and context2.period.aspect_value.end == context.period.aspect_value.end and round((context2.period.aspect_value.end-context2.period.aspect_value.start).days/30) == duration and not context2.entity.segment]
if not contexts:
filing_logger.error('%s: No required context found with %d month duration',reports['income']['name'],duration)
return
context = contexts[0]
fact_values = find_presentation_linkbase_values(filing,reports['income'],instance,linkrole,context,'USD')
values = calc_report_values(filing,reports['income'],instance,linkrole,fact_values)
values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'duration': duration, 'endDate': end_date(context), 'currencyCode': 'USD'})
for lineitem in ('costOfRevenue','researchAndDevelopment','sellingGeneralAndAdministrative','nonRecurring','operatingExpensesOther','operatingExpensesTotal','interestExpense','incomeTaxExpense','minorityInterest','preferredStockAndOtherAdjustments'):
if values[lineitem]:
values[lineitem] *= -1
# Insert income statement into DB
with db_connect() as con:
db_fields = ['accessionNumber','cikNumber','endDate','duration','currencyCode'] + reports['income']['lineitems']
con.execute('INSERT INTO income_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
# Calculate data for the last quarter from the annual report
if filing['formType'] == '10-K':
previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])
previous_quarters = con.execute('SELECT * FROM income_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = "10-Q" AND period BETWEEN ? AND ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()
previous_quarters = {previous_quarter[2]: previous_quarter for previous_quarter in previous_quarters} # ignore duplicate filings
if len(previous_quarters) == 3:
field_offset = len(db_fields)-len(reports['income']['lineitems'])
for i, lineitem in enumerate(reports['income']['lineitems']):
if values[lineitem] is not None:
for previous_quarter in previous_quarters.values():
if previous_quarter[i+field_offset]:
values[lineitem] -= previous_quarter[i+field_offset]
values['duration'] = 3
con.execute('INSERT INTO income_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
def calc_cashflow_statement(filing,instance,context,linkroles):
"""Calculate cashflow line items from XBRL instance and store in DB."""
filing_logger.info('Calculate %s',reports['cashflow']['name'])
if not context:
filing_logger.error('Skipped %s: No required context found',reports['cashflow']['name'])
return
if len(linkroles) == 0:
filing_logger.error('Skipped %s: No linkrole found',reports['cashflow']['name'])
return
elif len(linkroles) > 1:
filing_logger.warning('%s: Multiple linkroles found: %s',reports['cashflow']['name'],','.join(linkroles))
linkrole = linkroles[0]
duration = round((context.period_aspect_value.end.date()-context.period_aspect_value.start.date()).days/30)
fact_values = find_presentation_linkbase_values(filing,reports['cashflow'],instance,linkrole,context,'USD')
values = calc_report_values(filing,reports['cashflow'],instance,linkrole,fact_values)
values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'duration': duration, 'endDate': end_date(context), 'currencyCode': 'USD'})
# Insert cash flow statement into DB
with db_connect() as con:
db_fields = ['accessionNumber','cikNumber','endDate','duration','currencyCode'] + reports['cashflow']['lineitems']
con.execute('INSERT INTO cashflow_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
previous_quarters = None
# Calculate data for the current quarter
if filing['formType'] == '10-Q' and duration > 3:
month, year = filing['period'].month, filing['period'].year
month -= duration
if month < 1:
month += 12
year -= 1
previous_year_date = datetime.date(year,month,calendar.monthrange(year,month)[1])
previous_quarters = con.execute('SELECT * FROM cashflow_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = "10-Q" AND period BETWEEN ? and ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()
if len(previous_quarters) != (duration/3 - 1):
filing_logger.error('%s: Missing previous quarterly reports to calculate quarterly data from compounded quarterly report',reports['cashflow']['name'])
previous_quarters = None
# Calculate data for the last quarter of the financial year from the annual report
elif filing['formType'] == '10-K':
previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])
previous_quarters = con.execute('SELECT * FROM cashflow_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = "10-Q" AND period BETWEEN ? and ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()
if len(previous_quarters) != 3:
filing_logger.error('%s: Missing previous quarterly reports to calculate quarterly data from annual report',reports['cashflow']['name'])
previous_quarters = None
if previous_quarters:
field_offset = len(db_fields)-len(reports['cashflow']['lineitems'])
for i, lineitem in enumerate(reports['cashflow']['lineitems']):
if values[lineitem] is not None:
for previous_quarter in previous_quarters:
if previous_quarter[i+field_offset]:
values[lineitem] -= previous_quarter[i+field_offset]
values['duration'] = 3
con.execute('INSERT INTO cashflow_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
def dbvalue(dbvalues,report,lineitem,avg_over_duration):
if lineitem[0] == '-':
weight = -1
lineitem = lineitem[1:]
else:
weight = 1
if report == 'balance':
if avg_over_duration:
return weight*(dbvalues['previous_balance'][lineitem] + dbvalues['balance'][lineitem])/2
else:
return weight*dbvalues['balance'][lineitem]
else:
return weight*dbvalues[report][lineitem]
def calc_ratios_mrq(filing):
"""Computes the ratios for the most recent quarter (mrq), annualized."""
dbvalues = {
'previous_balance': Summations(),
'balance': Summations(),
'income': Summations(),
'cashflow': Summations()
}
with db_connect() as con:
factor = 4 if filing['formType'] == '10-Q' else 1
# Fetch end balance sheet values from DB
for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(filing['accessionNumber'],)):
for i, lineitem in enumerate(reports['balance']['lineitems']):
dbvalues['balance'][lineitem] += row[i+4] if row[i+4] else 0
# Fetch start balance sheet values from DB
previous_filing = con.execute('SELECT accessionNumber FROM filings WHERE cikNumber = ? AND period < ? ORDER BY period DESC',(filing['cikNumber'],filing['period'])).fetchone()
if previous_filing:
for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(previous_filing[0],)):
for i, lineitem in enumerate(reports['balance']['lineitems']):
dbvalues['previous_balance'][lineitem] += row[i+4] if row[i+4] else 0
# Fetch income statement values from DB
for row in con.execute('SELECT * FROM income_statement WHERE accessionNumber = ?',(filing['accessionNumber'],)):
for i, lineitem in enumerate(reports['income']['lineitems']):
dbvalues['income'][lineitem] += factor*row[i+5] if row[i+5] else 0
# Fetch cashflow statement values from DB
for row in con.execute('SELECT * FROM cashflow_statement WHERE accessionNumber = ?',(filing['accessionNumber'],)):
for i, lineitem in enumerate(reports['cashflow']['lineitems']):
dbvalues['cashflow'][lineitem] += factor*row[i+5] if row[i+5] else 0
values = {'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': filing['period'], 'kind': 'mrq'}
for lineitem, ratio in reports['ratios']['formulas'].items():
# Check if the average in assets/liabilities over the whole period should be used
referenced_reports = set(op['report'] for op in itertools.chain(ratio['numerator'],ratio['denominator']))
avg_over_duration = len(referenced_reports) > 1 and 'balance' in referenced_reports
# Calculate the ratio
numerator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],avg_over_duration) for op in ratio['numerator'])
denominator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],avg_over_duration) for op in ratio['denominator'])
values[lineitem] = numerator / denominator if denominator else None
# Insert ratios into DB
db_fields = ['accessionNumber','cikNumber','endDate','kind'] + reports['ratios']['lineitems']
con.execute('INSERT INTO ratios VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
def calc_ratios_ttm(filing):
"""Computes the ratios for the trailing twelve months (ttm)."""
dbvalues = {
'balance': Summations(),
'income': Summations(),
'cashflow': Summations()
}
previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])
with db_connect() as con:
# Fetch filings for the last year
previous_filings = con.execute('SELECT * FROM filings WHERE cikNumber = ? AND period > ? AND period <= ?',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()
for previous_filing in previous_filings:
# Fetch balance sheet values from DB
for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(previous_filing[0],)):
for i, lineitem in enumerate(reports['balance']['lineitems']):
dbvalues['balance'][lineitem] += row[i+4]/4 if row[i+4] else 0
# Fetch income statement values from DB
for row in con.execute('SELECT * FROM income_statement WHERE accessionNumber = ? AND duration = 3',(previous_filing[0],)):
for i, lineitem in enumerate(reports['income']['lineitems']):
dbvalues['income'][lineitem] += row[i+5] if row[i+5] else 0
# Fetch cashflow statement values from DB
for row in con.execute('SELECT * FROM cashflow_statement WHERE accessionNumber = ? AND duration = 3',(previous_filing[0],)):
for i, lineitem in enumerate(reports['cashflow']['lineitems']):
dbvalues['cashflow'][lineitem] += row[i+5] if row[i+5] else 0
values = {'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': filing['period'], 'kind': 'ttm'}
for lineitem, ratio in reports['ratios']['formulas'].items():
# Calculate the ratio
numerator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],False) for op in ratio['numerator'])
denominator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],False) for op in ratio['denominator'])
values[lineitem] = numerator / denominator if denominator else None
# Insert ratios into DB
db_fields = ['accessionNumber','cikNumber','endDate','kind'] + reports['ratios']['lineitems']
con.execute('INSERT INTO ratios VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])
con.commit()
def process_filing(filing):
"""Load XBRL instance and store extracted data to DB."""
# Store current filing in thread-local storage
tls.filing = filing
filing_logger.info('Start processing filing')
with db_connect() as con:
# Check if the filing was already processed
if con.execute('SELECT accessionNumber FROM filings WHERE accessionNumber = ?',(filing['accessionNumber'],)).fetchone():
if not args.recompute:
filing_logger.info('Skipped already processed filing')
return
filing_logger.info('Deleting existing filing %s',filing['accessionNumber'])
con.execute('DELETE FROM filings WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.execute('DELETE FROM facts WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.execute('DELETE FROM balance_sheet WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.execute('DELETE FROM income_statement WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.execute('DELETE FROM cashflow_statement WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.execute('DELETE FROM ratios WHERE accessionNumber = ?',(filing['accessionNumber'],))
con.commit()
# Handle amendment filings
if filing['formType'].endswith('/A'):
filing['formType'] = filing['formType'][:-2]
# Delete the previous amended filing
for row in con.execute('SELECT accessionNumber FROM filings WHERE cikNumber = ? and period = ?',(filing['cikNumber'],filing['period'])):
filing_logger.info('Deleting amended filing %s',row[0])
con.execute('DELETE FROM filings WHERE accessionNumber = ?',(row[0],))
con.execute('DELETE FROM facts WHERE accessionNumber = ?',(row[0],))
con.execute('DELETE FROM balance_sheet WHERE accessionNumber = ?',(row[0],))
con.execute('DELETE FROM income_statement WHERE accessionNumber = ?',(row[0],))
con.execute('DELETE FROM cashflow_statement WHERE accessionNumber = ?',(row[0],))
con.execute('DELETE FROM ratios WHERE accessionNumber = ?',(row[0],))
con.commit()
# Load XBRL instance from zip archive
instance, log = feed_tools.load_instance(filing)
filing['errors'] = '\n'.join(error.text for error in log.errors) if log.has_errors() else None
#filing['warnings'] = '\n'.join(error.text for error in itertools.chain(log.warnings, log.inconsistencies)) if log.has_warnings() or log.has_inconsistencies() else None
# Write filing metadata into DB
with db_connect() as con:
con.execute('INSERT INTO filings VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',[filing[key] for key in ('accessionNumber','cikNumber','companyName','formType','filingDate','fileNumber','acceptanceDatetime','period','assistantDirector','assignedSic','otherCikNumbers','fiscalYearEnd','instanceUrl','errors')])
con.commit()
if instance:
# Find the appropriate linkroles for the main financial statements
linkroles = classify_presentation_link_roles(instance.dts)
# Find the required contexts for the main reporting period
usgaap_ns, dei_ns = find_std_namespaces(instance.dts)
required_context = find_required_context(instance,dei_ns)
if required_context and required_context.period.is_start_end():
# Check duration of required context
duration = round((required_context.period_aspect_value.end.date()-required_context.period_aspect_value.start.date()).days/30)
if filing['formType'] == '10-K' and duration != 12:
filing_logger.warning('10-K Required Context has duration of %d months',duration)
elif filing['formType'] == '10-Q' and (duration != 3 and duration != 6 and duration != 9):
filing_logger.warning('10-Q Required Context has duration of %d months',duration)
# Find an instant context for the period end date
required_instant_context = find_required_instant_context(instance,required_context.period.end_date.value)
# Calculate and store values for the main financial statements to DB
calc_balance_sheet(filing,instance,required_instant_context,linkroles['balance'])
calc_income_statement(filing,instance,required_context,linkroles['income'])
calc_cashflow_statement(filing,instance,required_context,linkroles['cashflow'])
# Calculate and store ratios to DB
calc_ratios_mrq(filing)
calc_ratios_ttm(filing)
else:
filing_logger.error('Missing or non-duration required context encountered')
else:
filing_logger.error('Invalid XBRL instance:\n%s',filing['errors'])
filing_logger.info('Finished processing filing')
def process_filings_for_cik(cik,filings):
#if filings[0]['companyName'] not in ('JOHNSON & JOHNSON','INTERNATIONAL BUSINESS MACHINES CORP','EXXON MOBIL CORP','CARNIVAL CORP','Google Inc.','AMAZON COM INC','APPLE INC','MICROSOFT CORP','ORACLE CORP','General Motors Co','GENERAL ELECTRIC CO','WAL MART STORES INC'):
# return
for filing in filings:
try:
process_filing(filing)
except:
logger.exception('Failed processing filing %s',filing['accessionNumber'])
def process_filings(filings):
"""Distribute processing of filings over multiple threads/cores."""
logger.info('Start processing 10-K/10-Q filings (count=%d)',sum(len(x) for x in filings.values()))
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_threads) as executor:
futures = [executor.submit(process_filings_for_cik,cik,filings[cik]) for cik in filings]
for future in concurrent.futures.as_completed(futures):
try:
future.result()
except:
logger.exception('Exception occurred')
logger.info('Finished processing 10-K/10-Q filings')
class FilingLogAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
filing = tls.filing
return '[%s %s %s] %s'%(filing['ticker'],filing['cikNumber'],filing['accessionNumber'],msg), kwargs
def setup_logging(log_file):
"""Setup the Python logging infrastructure."""
global tls,logger,filing_logger
tls = threading.local()
if log_file:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)
logger = logging.getLogger('default')
filing_logger = FilingLogAdapter(logger,{})
def parse_args(daily_update=False):
"""Returns the arguments and options passed to the script."""
parser = argparse.ArgumentParser(description='Process XBRL filings and extract financial data and ratios into a DB.')
if not daily_update:
parser.add_argument('rss_feeds', metavar='RSS', nargs='*', help='EDGAR RSS feed file')
parser.add_argument('--create-tables', default=False, action='store_true', help='specify very first time to create empty DB tables')
parser.add_argument('--db', metavar='DSN', default='sec.db3', dest='db_name', help='specify the target DB datasource name or file')
parser.add_argument('--db-driver', default='sqlite', choices=['sqlite','odbc'], help='specify the DB driver to use')
parser.add_argument('--log', metavar='LOGFILE', dest='log_file', help='specify output log file')
parser.add_argument('--threads', metavar='MAXTHREADS', type=int, default=8, dest='max_threads', help='specify max number of threads')
parser.add_argument('--cik', metavar='CIK', type=int, nargs='*', help='limit processing to only the specified CIK number')
parser.add_argument('--recompute', default=False, action='store_true', help='recompute and replace filings already present in DB')
parser.add_argument('--store-fact-mappings', default=False, action='store_true', help='stores original XBRL fact values and mappings to line items in DB')
if daily_update:
parser.add_argument('--retries', type=int, default=3, dest='max_retries', help='specify max number of retries to download a specific filing')
parser.add_argument('--update-tickers', default=False, action='store_true', help='updates Ticker/CIK in --db')
return parser.parse_args()
def build_secdb(feeds):
# Setup python logging framework
setup_logging(args.log_file)
tickers = load_ticker_symbols()
# Setup up DB connection
global db_connect
db_connect = setup_db_connect(args.db_driver,args.db_name)
# Create all required DB tables
if 'create_tables' in args:
if args.create_tables:
create_db_tables()
create_db_indices()
insert_ticker_symbols(tickers)
# Process all filings in the given RSS feeds one month after another
for filepath in feeds:
# Load EDGAR filing metadata from RSS feed (and filter out all non 10-K/10-Q filings or companies without an assigned ticker symbol)
filings = {}
for filing in feed_tools.read_feed(filepath):
# Google to Alphabet reorganization
if filing['cikNumber'] == 1288776:
filing['cikNumber'] = 1652044
if args.cik is None or filing['cikNumber'] in args.cik:
if filing['formType'] in ('10-K','10-K/A','10-Q','10-Q/A') and filing['cikNumber'] in tickers:
filing['ticker'] = tickers[filing['cikNumber']]
filings.setdefault(filing['cikNumber'],[]).append(filing)
# Process the selected XBRL filings
process_filings(filings)
def collect_feeds(args):
"""Returns an generator of the resolved, absolute RSS file paths."""
for filepath in args.rss_feeds:
for resolved_filepath in glob.iglob(os.path.abspath(filepath)):
yield resolved_filepath
def main():
# Parse script arguments
global args
args = parse_args()
build_secdb(collect_feeds(args))
if __name__ == '__main__':
sec = timeit.timeit(main,number=1)
logger.info('Finished in %fs',sec)
| [
"# Copyright 2015 Altova GmbH\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"__copyright__ = 'Copyright 2015 Altova GmbH'\n",
"__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'\n",
"\n",
"# Calculates financial statements and ratios from SEC filings in the given RSS feed and adds them to a database.\n",
"#\n",
"# Usage:\n",
"# raptorxmlxbrl script scripts/build_secdb.py feeds/xbrlrss-2015-*.xml --db=sec2015.db3\n",
"\n",
"import feed_tools\n",
"import re,csv,json,glob,enum,datetime,argparse,logging,itertools,os.path,urllib,threading,concurrent.futures,timeit,calendar\n",
"from altova_api.v2 import xml, xsd, xbrl\n",
"\n",
"class Summations(dict):\n",
" def __missing__(self, key):\n",
" return 0\n",
"\n",
"gsRootDir = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\n",
"\n",
"reports = json.load(open(os.path.join(gsRootDir,'data','reports.json')))\n",
"reports['balance'].update({'kind': 'balance', 'name': 'Balance Sheet', 'mappings': json.load(open(os.path.join(gsRootDir,'data','balance_mappings.json')))})\n",
"reports['income'].update({'kind': 'income', 'name': 'Income Statement', 'mappings': json.load(open(os.path.join(gsRootDir,'data','income_mappings.json')))})\n",
"reports['cashflow'].update({'kind': 'cashflow', 'name': 'Cashflow Statement', 'mappings': json.load(open(os.path.join(gsRootDir,'data','cashflow_mappings.json')))})\n",
"\n",
"\n",
"def setup_db_connect(driver,name):\n",
" \"\"\"Returns a function object that can be used to connect to the DB. The function doesn't require any additional parameters as it stores the DB name/connection string using closure.\"\"\"\n",
" logger.info('Using %s DB with DSN=%s',driver,name)\n",
" def connect_sqlite():\n",
" con = sqlite3.connect(name,isolation_level=None)\n",
" con.execute('PRAGMA journal_mode=WAL')\n",
" return con\n",
" def connect_odbc():\n",
" return pyodbc.connect(name)\n",
" if driver == 'sqlite':\n",
" import sqlite3\n",
" return connect_sqlite\n",
" elif driver == 'odbc':\n",
" import pyodbc\n",
" return connect_odbc\n",
"\n",
"def create_db_tables():\n",
" \"\"\"Create all the necessary DB tables.\"\"\"\n",
" logger.info('Creating DB tables')\n",
"\n",
" try:\n",
" with db_connect() as con:\n",
" cur = con.cursor()\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE tickers (\n",
" symbol VARCHAR(10) PRIMARY KEY,\n",
" cikNumber INTEGER\n",
");\"\"\")\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE filings (\n",
" accessionNumber CHAR(20) PRIMARY KEY,\n",
" cikNumber INTEGER,\n",
" companyName TEXT,\n",
" formType TEXT,\n",
" filingDate DATETIME,\n",
" fileNumber TEXT,\n",
" acceptanceDatetime DATETIME,\n",
" period DATE,\n",
" assistantDirector TEXT,\n",
" assignedSic INTEGER,\n",
" otherCikNumbers TEXT,\n",
" fiscalYearEnd INTEGER,\n",
" instanceUrl TEXT,\n",
" errors TEXT\n",
");\"\"\")\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE facts (\n",
" accessionNumber CHAR(20),\n",
" report TEXT,\n",
" pos SMALLINT,\n",
" lineitem TEXT,\n",
" label TEXT,\n",
" namespace TEXT,\n",
" name TEXT,\n",
" value TEXT,\n",
" level SMALLINT,\n",
" is_abstract BOOLEAN,\n",
" is_total BOOLEAN,\n",
" is_negated BOOLEAN,\n",
" PRIMARY KEY (accessionNumber,report,pos)\n",
");\"\"\")\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE balance_sheet (\n",
" accessionNumber CHAR(20) PRIMARY KEY,\n",
" cikNumber INTEGER,\n",
" endDate DATE,\n",
" currencyCode CHAR(3),\n",
" %s\n",
");\"\"\" % ','.join(key+' BIGINT' for key in reports['balance']['lineitems']))\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE income_statement (\n",
" accessionNumber CHAR(20),\n",
" cikNumber INTEGER,\n",
" endDate DATE,\n",
" duration INTEGER,\n",
" currencyCode CHAR(3),\n",
" %s,\n",
" PRIMARY KEY (accessionNumber,duration)\n",
");\"\"\" % ','.join(key+' BIGINT' for key in reports['income']['lineitems']))\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE cashflow_statement (\n",
" accessionNumber CHAR(20),\n",
" cikNumber INTEGER,\n",
" endDate DATE,\n",
" duration INTEGER,\n",
" currencyCode CHAR(3),\n",
" %s,\n",
" PRIMARY KEY (accessionNumber,duration)\n",
");\"\"\" % ','.join(key+' BIGINT' for key in reports['cashflow']['lineitems']))\n",
"\n",
" cur.execute(\"\"\"\n",
"CREATE TABLE ratios (\n",
" accessionNumber CHAR(20),\n",
" cikNumber INTEGER,\n",
" endDate DATE,\n",
" kind CHAR(3),\n",
" %s,\n",
" PRIMARY KEY (accessionNumber,kind)\n",
");\"\"\" % ','.join(key+' REAL' for key in reports['ratios']['lineitems']))\n",
"\n",
" con.commit()\n",
" except:\n",
" logger.exception('Failed creating DB tables')\n",
" raise RuntimeError('Failed creating DB tables')\n",
"\n",
"def create_db_indices():\n",
" \"\"\"Create all the necessary DB indices.\"\"\"\n",
" logger.info('Creating DB indices')\n",
"\n",
" try:\n",
" with db_connect() as con:\n",
" cur = con.cursor()\n",
"\n",
" # Create indices\n",
" cur.execute('CREATE INDEX income_cik ON income_statement (cikNumber);')\n",
" cur.execute('CREATE INDEX balance_cik ON balance_sheet (cikNumber);')\n",
" cur.execute('CREATE INDEX cashflow_cik ON cashflow_statement (cikNumber);')\n",
" cur.execute('CREATE INDEX ratios_cik ON ratios (cikNumber);')\n",
" cur.execute('CREATE INDEX filings_cik ON filings (cikNumber);')\n",
" cur.execute('CREATE INDEX filings_company ON filings (companyName);')\n",
"\n",
" con.commit()\n",
" except:\n",
" logger.exception('Failed creating DB indices')\n",
" raise RuntimeError('Failed creating DB indices')\n",
"\n",
"def load_ticker_symbols():\n",
" \"\"\"Returns a dict of CIK to ticker symbol.\"\"\"\n",
" logger.info('Loading ticker file %s','tickers.csv')\n",
"\n",
" tickers = {}\n",
" with open(os.path.join(gsRootDir,'data','tickers.csv'),'r') as f:\n",
" reader = csv.reader(f)\n",
" for row in reader:\n",
" tickers[int(row[1])] = row[0].split('^')[0]\n",
" return tickers\n",
"\n",
"def insert_ticker_symbols(tickers):\n",
" \"\"\"Writes ticker symbol and CIK pairs to the DB.\"\"\"\n",
" with db_connect() as con:\n",
" for cik, symbol in tickers.items():\n",
" con.execute('INSERT INTO tickers VALUES (?,?)',(symbol,cik))\n",
"\n",
"re_usgaap = re.compile('^http://[^/]+/us-gaap/')\n",
"re_dei = re.compile('^http://xbrl.us/dei/|^http://xbrl.sec.gov/dei/')\n",
"\n",
"def find_std_namespaces(dts):\n",
" \"\"\"Returns a tuple with the us-gaap and dei namespaces imported in this company extension taxonomy.\"\"\"\n",
" usgaap_ns, dei_ns = None, None\n",
" # Iterate through all taxonomy schemas within the DTS and compare the target namespaces\n",
" for taxonomy in dts.taxonomy_schemas:\n",
" if re_usgaap.match(taxonomy.target_namespace):\n",
" usgaap_ns = taxonomy.target_namespace\n",
" elif re_dei.match(taxonomy.target_namespace):\n",
" dei_ns = taxonomy.target_namespace\n",
" return (usgaap_ns, dei_ns)\n",
"\n",
"parts_empty_re = re.compile(r\"[`,'.]\")\n",
"parts_space_re = re.compile(r\"[\\][{}():/&-]\")\n",
"\n",
"def classify_linkrole(definition):\n",
" \"\"\"Returns the type of report based on the roleType definition string.\"\"\"\n",
" # According to EDGAR Filer Manual rule 6.7.12 definition strings must follow the following syntax:\n",
" # {SortCode} - {Type} - {Title}\n",
" # where {Type} is one of Disclosure, Document, Schedule or Statement\n",
" definition_parts = definition.split(' - ')\n",
" if len(definition_parts) >= 3 and definition_parts[1] == 'Statement':\n",
" # Remove any punctuation signs\n",
" words = parts_empty_re.sub('', parts_space_re.sub(' ', ' '.join(definition_parts[2:]).upper())).split()\n",
"\n",
" # Skip any parenthetical and other supporting statements\n",
" if any(word in words for word in ('PARENTHETHICAL', 'PARENTHETCIAL', 'PARATHETICAL', 'PARATHENTICALS', 'PARENTHETIC', 'PARENTHETICAL', 'PARENTHETICALS', 'PARANTHETICAL', 'PARANTHETICALS', 'PARENTHICAL', 'PARENTHICALS', 'PARENTHTICAL', 'NOTE', 'DISCLOSURE', 'FOOTNOTES', 'DETAILS')):\n",
" return 'other'\n",
"\n",
" # Check for cash flow statement\n",
" if 'CASHFLOW' in words or 'CASHFLOWS' in words:\n",
" return 'cashflow'\n",
" if 'CASH' in words and ('FLOW' in words or 'FLOWN' in words or 'FLOWS' in words or 'RECEIPTS' in words):\n",
" return 'cashflow'\n",
"\n",
" # Check for income statement\n",
" if ('INCOME' in words and 'CHANGES' not in words and 'TAX' not in words and 'TAXES' not in words) or 'PROFIT' in words or 'EARNINGS' in words or 'REVENUES' in words or 'OPERATION' in words or 'OPERATIONS' in words or 'EXPENSES' in words or 'LOSS' in words or 'LOSSES' in words:\n",
" return 'income'\n",
"\n",
" # Check for other naming alternatives for cash flow statement\n",
" if 'CHANGES' in words and (('NET' in words and 'ASSETS' in words) or 'CAPITAL' in words or 'TRUST' in words):\n",
" return 'cashflow'\n",
"\n",
" # Check for balance sheet statement\n",
" if ('BALANCE' in words or 'BALANCES' in words) and ('SHEET' in words or 'SHEETS' in words or 'SHEEETS' in words):\n",
" return 'balance'\n",
" if 'FINANCIAL' in words or 'POSITION' in words or 'POSITIONS' in words or 'CONDITION' in words or 'CONDITIONS' in words:\n",
" return 'balance'\n",
" if 'ASSETS' in words or 'LIABILITIES' in words:\n",
" return 'balance'\n",
"\n",
" return 'other'\n",
"\n",
"def definition_string(dts,linkrole):\n",
" role_type = dts.role_type(linkrole)\n",
" if role_type:\n",
" definition = role_type.definition\n",
" if definition:\n",
" return definition.value\n",
" return None\n",
"\n",
"def classify_presentation_link_roles(dts):\n",
" \"\"\"Returns a dict containing a list of linkroles for each kind of financial statement.\"\"\"\n",
"\n",
" linkroles = {kind: [] for kind in ('balance','income','cashflow','other')}\n",
" for linkrole in dts.presentation_link_roles():\n",
" definition = definition_string(dts,linkrole)\n",
" if definition:\n",
" kind = classify_linkrole(definition)\n",
" linkroles[kind].append(linkrole)\n",
"\n",
" for kind in ('balance','income','cashflow'):\n",
" if len(linkroles[kind]) > 1:\n",
" filtered = []\n",
" for linkrole in linkroles[kind]:\n",
" definition = ' '.join(definition_string(dts,linkrole).split(' - ')[2:]).upper()\n",
" if 'COMPREHESIVE' not in definition and 'COMPREHENSIVE' not in definition and 'SUPPLEMENTAL' not in definition:\n",
" filtered.append(linkrole)\n",
" filtered.sort(key=lambda linkrole:int(definition_string(dts,linkrole).split(' - ')[0]))\n",
" linkroles[kind] = filtered\n",
" return linkroles\n",
"\n",
"def find_required_context(instance,dei_ns):\n",
" \"\"\"Returns the required context for the main reporting period.\"\"\"\n",
" # According to EDGAR Filter Manual rule 6.5.20 the Required Document Information elements must be reported at least with the Required Context.\n",
" # Required contexts can be distinguished by an absent xbrli:segment element.\n",
" documentPeriodEndDates = instance.facts.filter(xml.QName('DocumentPeriodEndDate',dei_ns))\n",
" for fact in documentPeriodEndDates:\n",
" if not fact.context.entity.segment:\n",
" return fact.context\n",
" return None\n",
"\n",
"def find_required_instant_context(instance,instant):\n",
" \"\"\"Returns the required instant context (with absent xbrli:segment element) for the given date.\"\"\"\n",
" for context in instance.contexts:\n",
" if context.period.is_instant() and context.period.instant.value == instant and not context.entity.segment:\n",
" return context\n",
" return None\n",
"\n",
"def find_dimension_contexts(instance,context,dimensions):\n",
" \"\"\"Returns a list of contexts containing the given dimension values and having the same period as the given context.\"\"\"\n",
" contexts = []\n",
" for dimcontext in instance.contexts:\n",
" if dimcontext.period_aspect_value == context.period_aspect_value and dimcontext.entity_identifier_aspect_value == context.entity_identifier_aspect_value:\n",
" dim_values = list(dimcontext.dimension_aspect_values)\n",
" if dim_values:\n",
" matching_context = True\n",
" for dim in dim_values:\n",
" if dim.dimension not in dimensions or dim.value not in dimensions[dim.dimension]:\n",
" matching_context = False\n",
" break\n",
" if matching_context:\n",
" contexts.append(dimcontext)\n",
" return contexts\n",
"\n",
"def find_fact_value(instance, concept, context):\n",
" \"\"\"Returns the fact value found for the given concept and context.\"\"\"\n",
" if context:\n",
" facts = instance.facts.filter(concept, context)\n",
" for fact in facts:\n",
" # Check for xsi:nil facts\n",
" if fact.xsi_nil:\n",
" continue\n",
" return fact.normalized_value\n",
" return None\n",
"\n",
"def find_numeric_value(instance, concept, context):\n",
" \"\"\"Returns the fact numeric value found for the given concept and context.\"\"\"\n",
" # Ignore non-numeric facts\n",
" if concept.is_numeric() and context:\n",
" facts = instance.facts.filter(concept, context)\n",
" for fact in facts:\n",
" # Check for xsi:nil facts\n",
" if fact.xsi_nil:\n",
" continue\n",
" return fact.effective_numeric_value\n",
" return None\n",
"\n",
"def find_monetary_value(instance, concept, context, currency):\n",
" \"\"\"Returns the fact value found for the given concept, context and currency.\"\"\"\n",
" # Ignore non-monetary facts\n",
" if concept.is_monetary() and context:\n",
" facts = instance.facts.filter(concept, context)\n",
" for fact in facts:\n",
" # Check for xsi:nil facts\n",
" if fact.xsi_nil:\n",
" continue\n",
" # Ignore facts reported with other currency units\n",
" unit = fact.unit_aspect_value\n",
" if unit.iso4217_currency == currency:\n",
" return int(fact.effective_numeric_value)\n",
" return None\n",
"\n",
"\n",
"def descendants(network,root,include_self=False):\n",
" \"\"\"Returns a list of all descendant concepts form the given root.\"\"\"\n",
" def _descendants(network,root,concepts):\n",
" concepts.append(root)\n",
" for rel in network.relationships_from(root):\n",
" _descendants(network,rel.target,concepts)\n",
"\n",
" concepts = []\n",
" if include_self:\n",
" concepts.append(root)\n",
" for rel in network.relationships_from(root):\n",
" _descendants(network,rel.target,concepts)\n",
" return concepts\n",
"\n",
"\n",
"def presentation_concepts(dts,linkrole):\n",
" \"\"\"Returns a tuple with a list of all primary items and a dict of dimension domain values featured in the network of presentation relationships for the given linkrole.\"\"\"\n",
" def _presentation_concepts(network,concept,preferred_label_role,level,concepts,dimensions):\n",
" if isinstance(concept,xbrl.xdt.Dimension):\n",
" dimensions[concept] = set(descendants(network,concept))\n",
" return\n",
"\n",
" if isinstance(concept,xbrl.xdt.Hypercube):\n",
" level -= 1\n",
" else:\n",
" concepts.append((concept,preferred_label_role,level))\n",
" for rel in network.relationships_from(concept):\n",
" _presentation_concepts(network,rel.target,rel.preferred_label,level+1,concepts,dimensions)\n",
"\n",
" concepts = []\n",
" dimensions = {}\n",
" network = dts.presentation_base_set(linkrole).network_of_relationships()\n",
" for root in network.roots:\n",
" _presentation_concepts(network,root,None,0,concepts,dimensions)\n",
" return concepts, dimensions\n",
"\n",
"def concept_label(concept,label_role):\n",
" labels = list(concept.labels(label_role=label_role))\n",
" if not labels:\n",
" return None\n",
" return labels[0].text\n",
"\n",
"def is_total_role(preferred_label_role):\n",
" if preferred_label_role:\n",
" return 'total' in preferred_label_role.lower()\n",
" return False\n",
"\n",
"def is_negated_role(preferred_label_role):\n",
" if preferred_label_role:\n",
" return 'negated' in preferred_label_role.lower()\n",
" return False\n",
"\n",
"def is_start_role(preferred_label_role):\n",
" if preferred_label_role:\n",
" return 'periodstart' in preferred_label_role.lower()\n",
" return False\n",
"\n",
"def is_end_role(preferred_label_role):\n",
" if preferred_label_role:\n",
" return 'periodend' in preferred_label_role.lower()\n",
" return False\n",
"\n",
"def find_presentation_linkbase_values(filing, report, instance, linkrole, context, currency):\n",
" \"\"\"Returns a dict from concept name to fact value for all monetary concepts appearing in the presentation linkbase for the given linkrole.\"\"\"\n",
"\n",
" dim_contexts = []\n",
" dim_contexts_stock = []\n",
" # Get all concepts and dimensions in the presentation linkbase for the given linkrole\n",
" concepts, dimensions = presentation_concepts(instance.dts,linkrole)\n",
" if dimensions:\n",
" dim_contexts = find_dimension_contexts(instance,context,{dim: dimensions[dim] for dim in dimensions if dim.name not in ('LegalEntityAxis','StatementClassOfStockAxis')})\n",
" dim_contexts_stock = find_dimension_contexts(instance,context,{dim: dimensions[dim] for dim in dimensions if dim.name == 'StatementClassOfStockAxis'})\n",
"\n",
" fact_values = {}\n",
" for i, (concept, preferred_label_role, level) in enumerate(concepts):\n",
" # Skip abstract and non-monetary concepts\n",
" if concept.abstract:\n",
" value = None\n",
" elif concept.is_monetary():\n",
" values = []\n",
"\n",
" # Try to find a value with the main required context\n",
" value = find_monetary_value(instance, concept, context, currency)\n",
" if value is not None:\n",
" values.append(value)\n",
" else:\n",
" # If the concept is reported only with a dimensional breakdown, sum over all dimension domain members\n",
" for dim_context in dim_contexts:\n",
" value = find_monetary_value(instance, concept, dim_context, currency)\n",
" if value is not None:\n",
" values.append(value)\n",
"\n",
" # Exception for StatementClassOfStockAxis dimension: Add the sum over all dimension domain members to the value reported without dimensions\n",
" for dim_context in dim_contexts_stock:\n",
" value = find_monetary_value(instance, concept, dim_context, currency)\n",
" if value is not None:\n",
" values.append(value)\n",
"\n",
" value = sum(values) if values else None\n",
" if value:\n",
" fact_values[concept.name] = {'pos': i, 'concept': concept, 'value': value}\n",
" else:\n",
" if is_start_role(preferred_label_role) and context.period.is_duration():\n",
" value = find_monetary_value(instance, concept, find_required_instant_context(instance, context.period.start_date.value), currency)\n",
" elif is_end_role(preferred_label_role) and context.period.is_duration():\n",
" value = find_monetary_value(instance, concept, find_required_instant_context(instance, context.period.end_date.value), currency)\n",
"\n",
" elif concept.is_numeric():\n",
" value = find_numeric_value(instance, concept, context)\n",
" else:\n",
" value = find_fact_value(instance, concept, context)\n",
"\n",
" # Insert fact value to DB\n",
" if args.store_fact_mappings:\n",
" with db_connect() as con:\n",
" con.execute('INSERT INTO facts VALUES(?,?,?,?,?,?,?,?,?,?,?,?)',(filing['accessionNumber'],report['kind'],i,None,concept_label(concept,preferred_label_role),concept.target_namespace,concept.name,str(value),level,concept.abstract,is_total_role(preferred_label_role),is_negated_role(preferred_label_role)))\n",
" con.commit()\n",
"\n",
" return fact_values\n",
"\n",
"def walk_calc_tree(filing,report,instance,network,concept,weight,fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts):\n",
" \"\"\"Iterates over the concepts in the calculation tree and adds them to the appropriate report line items. If an unknown concept is encountered, it is added to the \"other\" line item of the current breakdown.\"\"\"\n",
"\n",
" if concept in visited_concepts:\n",
" visited_concepts.update(descendants(network,concept))\n",
" return\n",
" visited_concepts.add(concept)\n",
"\n",
" lineitem = None\n",
" child_rels = list(network.relationships_from(concept))\n",
"\n",
" value = fact_values.get(concept.name)\n",
"\n",
" current_mapping = report['mappings'].get(concept.name)\n",
" if current_mapping:\n",
"\n",
" if 'add-to' in current_mapping:\n",
" lineitem = current_mapping['add-to'][0]\n",
" for x in current_mapping['add-to']:\n",
" if x in allowed_lineitems:\n",
" lineitem = x\n",
" break\n",
" elif 'total' in current_mapping:\n",
" lineitem = current_mapping['total']\n",
"\n",
" if lineitem and lineitem not in allowed_lineitems:\n",
" # log error\n",
" filing_logger.warning('%s: Concept %s is not expected to occur within breakdown of %s',report['name'],concept.qname,next(network.relationships_to(concept)).source.qname)\n",
" lineitem = None\n",
"\n",
" allowed_lineitems = allowed_lineitems & set(current_mapping['allowed'] if 'allowed' in current_mapping else [lineitem])\n",
" if len(allowed_lineitems) == 0:\n",
" # log error\n",
" filing_logger.warning('%s: Concept %s is not expected to occur within breakdown of %s',report['name'],concept.qname,next(network.relationships_to(concept)).source.qname)\n",
"\n",
" if 'other' in current_mapping:\n",
" other_lineitem = current_mapping.get('other')\n",
"\n",
" if value:\n",
" if not lineitem and not child_rels:\n",
" lineitem = other_lineitem\n",
" if lineitem:\n",
" # Insert mapping to DB\n",
" if args.store_fact_mappings:\n",
" with db_connect() as con:\n",
" con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(lineitem,filing['accessionNumber'],report['kind'],value['pos']))\n",
" con.commit()\n",
"\n",
" if 'total' in current_mapping:\n",
" if lineitem in lineitem_values:\n",
" # error if already set\n",
" filing_logger.error('%s: Overwriting already set total value of concept %s',report['name'],concept.qname)\n",
" lineitem_values[lineitem] = weight * value['value']\n",
" else:\n",
" lineitem_values[lineitem] += weight * value['value']\n",
" visited_concepts.update(descendants(network,concept))\n",
" return\n",
" elif not child_rels:\n",
" # log error\n",
" filing_logger.error('%s: Ignored value of inconsistent concept %s',report['name'],concept.qname)\n",
" else:\n",
"\n",
" if value and not child_rels:\n",
" if other_lineitem:\n",
" # Insert mapping to DB\n",
" if args.store_fact_mappings:\n",
" with db_connect() as con:\n",
" con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(other_lineitem,filing['accessionNumber'],report['kind'],value['pos']))\n",
" con.commit()\n",
"\n",
" # log unknown concept\n",
" filing_logger.warning('%s: Added value of unknown concept %s to %s',report['name'],concept.qname,other_lineitem)\n",
" lineitem_values[other_lineitem] += weight * value['value']\n",
" else:\n",
" # log error\n",
" filing_logger.error('%s: Ignored value of unknown concept %s',report['name'],concept.qname)\n",
" visited_concepts.update(descendants(network,concept))\n",
" return\n",
"\n",
" if concept.name == 'Assets':\n",
" rel_current = None\n",
" for rel in child_rels:\n",
" if rel.target.name == 'AssetsCurrent':\n",
" rel_current = rel\n",
" break\n",
" if rel_current:\n",
" allowed_lineitems = set(['cashAndCashEquivalents','shortTermInvestments','cashAndShortTermInvestments','receivablesNet','inventory','currentAssetsOther','currentAssetsTotal'])\n",
" other_lineitem = 'currentAssetsOther'\n",
" for rel in child_rels:\n",
" walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)\n",
" if rel == rel_current:\n",
" allowed_lineitems = set(['longTermInvestments','propertyPlantAndEquipmentGross','accumulatedDepreciation','propertyPlantAndEquipmentNet','goodwill','intangibleAssets','nonCurrrentAssetsOther','deferredLongTermAssetCharges','nonCurrentAssetsTotal'])\n",
" other_lineitem = 'nonCurrrentAssetsOther'\n",
" return\n",
" if concept.name == 'Liabilities':\n",
" rel_current = None\n",
" for rel in child_rels:\n",
" if rel.target.name == 'LiabilitiesCurrent':\n",
" rel_current = rel\n",
" break\n",
" if rel_current:\n",
" allowed_lineitems = set(['accountsPayable','shortTermDebt', 'currentLiabilitiesOther', 'currentLiabilitiesTotal'])\n",
" other_lineitem = 'currentLiabilitiesOther'\n",
" for rel in child_rels:\n",
" walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)\n",
" if rel == rel_current:\n",
" allowed_lineitems = set(['longTermDebt','capitalLeaseObligations', 'longTermDebtTotal', 'deferredLongTermLiabilityCharges', 'nonCurrentLiabilitiesOther', 'nonCurrentLiabilitiesTotal'])\n",
" other_lineitem = 'nonCurrentLiabilitiesOther'\n",
" return\n",
"\n",
" for rel in child_rels:\n",
" walk_calc_tree(filing,report,instance,network,rel.target,weight*int(rel.weight),fact_values,lineitem_values,allowed_lineitems,other_lineitem,visited_concepts)\n",
"\n",
"def calc_total_values(total_rules,lineitem_values,lineitem):\n",
" \"\"\"Calculates any missing (not directly reported) total values.\"\"\"\n",
" if lineitem not in lineitem_values or lineitem_values[lineitem] is None:\n",
" values = []\n",
" negate = False\n",
" for summand in total_rules[lineitem]:\n",
" if summand == '-':\n",
" negate = True\n",
" continue\n",
" if summand in total_rules:\n",
" calc_total_values(total_rules,lineitem_values,summand)\n",
" if summand in lineitem_values and lineitem_values[summand] is not None:\n",
" values.append(-lineitem_values[summand] if negate else lineitem_values[summand])\n",
" lineitem_values[lineitem] = sum(values) if len(values) > 0 else None\n",
"\n",
"def calc_report_values(filing,report,instance,linkrole,fact_values):\n",
" \"\"\"Returns a dict with the calculated values for each lineitem of the report.\"\"\"\n",
"\n",
" lineitem_values = Summations()\n",
" visited_concepts = set()\n",
"\n",
" network = calculation_network(instance.dts,linkrole)\n",
" if network:\n",
" for root in network.roots:\n",
" walk_calc_tree(filing,report,instance,network,root,1,fact_values,lineitem_values,set(report['lineitems']),None,visited_concepts)\n",
"\n",
" visited_concept_names = set(concept.name for concept in visited_concepts)\n",
" for concept_name, value in fact_values.items():\n",
" if concept_name not in visited_concept_names:\n",
"\n",
" lineitem = None\n",
"\n",
" current_mapping = report['mappings'].get(concept_name)\n",
" if current_mapping:\n",
" if 'add-to' in current_mapping:\n",
" lineitem = current_mapping['add-to'][0]\n",
" elif 'total' in current_mapping:\n",
" lineitem = current_mapping['total']\n",
"\n",
" if lineitem:\n",
" if lineitem not in lineitem_values:\n",
" # Insert mapping to DB\n",
" if args.store_fact_mappings:\n",
" with db_connect() as con:\n",
" con.execute('UPDATE facts SET lineitem = ? WHERE accessionNumber = ? AND report = ? AND pos = ?',(lineitem,filing['accessionNumber'],report['kind'],value['pos']))\n",
" con.commit()\n",
"\n",
" if lineitem == 'treasuryStockValue' and value['value'] > 0:\n",
" value['value'] *= -1\n",
" lineitem_values[lineitem] += value['value']\n",
" else:\n",
" # log error\n",
" filing_logger.warning('%s: Ignored value of concept %s outside of calculation tree to preserve totals',report['name'],value['concept'].qname)\n",
" else:\n",
" # log unknown concept\n",
" filing_logger.warning('%s: Ignored value of unknown concept %s outside of calculation tree',report['name'],value['concept'].qname)\n",
"\n",
" # Set missing/not reported values to None\n",
" for lineitem in report['lineitems']:\n",
" if lineitem not in lineitem_values:\n",
" lineitem_values[lineitem] = None\n",
"\n",
" for lineitem in report['totals']:\n",
" calc_total_values(report['totals'],lineitem_values,lineitem)\n",
"\n",
" return lineitem_values\n",
"\n",
"def calculation_network(dts,linkrole):\n",
" \"\"\"Returns an object representing the network of calculation relationships for the given linkrole.\"\"\"\n",
" baseset = dts.calculation_base_set(linkrole)\n",
" if baseset:\n",
" return baseset.network_of_relationships()\n",
" else:\n",
" filing_logger.warning('No calculation linkbase found for linkrole %s',linkrole)\n",
" return None\n",
"\n",
"def end_date(context):\n",
" \"\"\"Returns the end date specified as in the context as used in financial statements (e.g. ending Dec. 31, 2015 instead of Jan. 1).\"\"\"\n",
" period = context.period_aspect_value\n",
" if period.period_type == xbrl.PeriodType.INSTANT:\n",
" return period.instant.date() - datetime.timedelta(days=1)\n",
" elif period.period_type == xbrl.PeriodType.START_END:\n",
" return period.end.date() - datetime.timedelta(days=1)\n",
" else:\n",
" return datetime.date.max\n",
" \n",
"def calc_balance_sheet(filing,instance,context,linkroles):\n",
" \"\"\"Calculate balance sheet line items from XBRL instance and store in DB.\"\"\"\n",
" filing_logger.info('Calculate %s',reports['balance']['name'])\n",
"\n",
" if not context:\n",
" filing_logger.error('Skipped %s: No required context found',reports['balance']['name'])\n",
" return\n",
" if len(linkroles) == 0:\n",
" filing_logger.error('Skipped %s: No linkrole found',reports['balance']['name'])\n",
" return\n",
" elif len(linkroles) > 1:\n",
" filing_logger.warning('%s: Multiple linkroles found: %s',reports['balance']['name'],','.join(linkroles))\n",
" linkrole = linkroles[0]\n",
"\n",
" fact_values = find_presentation_linkbase_values(filing,reports['balance'],instance,linkrole,context,'USD')\n",
" values = calc_report_values(filing,reports['balance'],instance,linkrole,fact_values)\n",
" values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': end_date(context), 'currencyCode': 'USD'})\n",
"\n",
" # Insert balance sheet into DB\n",
" with db_connect() as con:\n",
" db_fields = ['accessionNumber','cikNumber','endDate','currencyCode'] + reports['balance']['lineitems']\n",
" con.execute('INSERT INTO balance_sheet VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
"def calc_income_statement(filing,instance,context,linkroles):\n",
" \"\"\"Calculate income line items from XBRL instance and store in DB.\"\"\"\n",
" filing_logger.info('Calculate %s',reports['income']['name'])\n",
"\n",
" if not context:\n",
" filing_logger.error('Skipped %s: No required context found',reports['income']['name'])\n",
" return\n",
" if len(linkroles) == 0:\n",
" filing_logger.error('Skipped %s: No linkrole found',reports['income']['name'])\n",
" return\n",
" elif len(linkroles) > 1:\n",
" filing_logger.warning('%s: Multiple linkroles found: %s',reports['income']['name'],','.join(linkroles))\n",
" linkrole = linkroles[0]\n",
"\n",
" duration = 12 if filing['formType'] == '10-K' else 3\n",
" contexts = [context2 for context2 in instance.contexts if context2.period.is_start_end() and context2.period.aspect_value.end == context.period.aspect_value.end and round((context2.period.aspect_value.end-context2.period.aspect_value.start).days/30) == duration and not context2.entity.segment]\n",
" if not contexts:\n",
" filing_logger.error('%s: No required context found with %d month duration',reports['income']['name'],duration)\n",
" return\n",
" context = contexts[0]\n",
"\n",
" fact_values = find_presentation_linkbase_values(filing,reports['income'],instance,linkrole,context,'USD')\n",
" values = calc_report_values(filing,reports['income'],instance,linkrole,fact_values)\n",
" values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'duration': duration, 'endDate': end_date(context), 'currencyCode': 'USD'})\n",
"\n",
" for lineitem in ('costOfRevenue','researchAndDevelopment','sellingGeneralAndAdministrative','nonRecurring','operatingExpensesOther','operatingExpensesTotal','interestExpense','incomeTaxExpense','minorityInterest','preferredStockAndOtherAdjustments'):\n",
" if values[lineitem]:\n",
" values[lineitem] *= -1\n",
"\n",
" # Insert income statement into DB\n",
" with db_connect() as con:\n",
" db_fields = ['accessionNumber','cikNumber','endDate','duration','currencyCode'] + reports['income']['lineitems']\n",
" con.execute('INSERT INTO income_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
" # Calculate data for the last quarter from the annual report\n",
" if filing['formType'] == '10-K':\n",
" previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])\n",
" previous_quarters = con.execute('SELECT * FROM income_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = \"10-Q\" AND period BETWEEN ? AND ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()\n",
"\n",
" previous_quarters = {previous_quarter[2]: previous_quarter for previous_quarter in previous_quarters} # ignore duplicate filings\n",
" if len(previous_quarters) == 3:\n",
" field_offset = len(db_fields)-len(reports['income']['lineitems'])\n",
" for i, lineitem in enumerate(reports['income']['lineitems']):\n",
" if values[lineitem] is not None:\n",
" for previous_quarter in previous_quarters.values():\n",
" if previous_quarter[i+field_offset]:\n",
" values[lineitem] -= previous_quarter[i+field_offset]\n",
"\n",
" values['duration'] = 3\n",
" con.execute('INSERT INTO income_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
"def calc_cashflow_statement(filing,instance,context,linkroles):\n",
" \"\"\"Calculate cashflow line items from XBRL instance and store in DB.\"\"\"\n",
" filing_logger.info('Calculate %s',reports['cashflow']['name'])\n",
"\n",
" if not context:\n",
" filing_logger.error('Skipped %s: No required context found',reports['cashflow']['name'])\n",
" return\n",
" if len(linkroles) == 0:\n",
" filing_logger.error('Skipped %s: No linkrole found',reports['cashflow']['name'])\n",
" return\n",
" elif len(linkroles) > 1:\n",
" filing_logger.warning('%s: Multiple linkroles found: %s',reports['cashflow']['name'],','.join(linkroles))\n",
" linkrole = linkroles[0]\n",
"\n",
" duration = round((context.period_aspect_value.end.date()-context.period_aspect_value.start.date()).days/30)\n",
"\n",
" fact_values = find_presentation_linkbase_values(filing,reports['cashflow'],instance,linkrole,context,'USD')\n",
" values = calc_report_values(filing,reports['cashflow'],instance,linkrole,fact_values)\n",
" values.update({'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'duration': duration, 'endDate': end_date(context), 'currencyCode': 'USD'})\n",
"\n",
" # Insert cash flow statement into DB\n",
" with db_connect() as con:\n",
" db_fields = ['accessionNumber','cikNumber','endDate','duration','currencyCode'] + reports['cashflow']['lineitems']\n",
" con.execute('INSERT INTO cashflow_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
" previous_quarters = None\n",
" # Calculate data for the current quarter\n",
" if filing['formType'] == '10-Q' and duration > 3:\n",
" month, year = filing['period'].month, filing['period'].year\n",
" month -= duration\n",
" if month < 1:\n",
" month += 12\n",
" year -= 1\n",
" previous_year_date = datetime.date(year,month,calendar.monthrange(year,month)[1])\n",
" previous_quarters = con.execute('SELECT * FROM cashflow_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = \"10-Q\" AND period BETWEEN ? and ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()\n",
" if len(previous_quarters) != (duration/3 - 1):\n",
" filing_logger.error('%s: Missing previous quarterly reports to calculate quarterly data from compounded quarterly report',reports['cashflow']['name'])\n",
" previous_quarters = None\n",
"\n",
" # Calculate data for the last quarter of the financial year from the annual report\n",
" elif filing['formType'] == '10-K':\n",
" previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])\n",
" previous_quarters = con.execute('SELECT * FROM cashflow_statement WHERE duration = 3 AND accessionNumber IN (SELECT accessionNumber FROM filings WHERE cikNumber = ? AND formType = \"10-Q\" AND period BETWEEN ? and ?)',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()\n",
" if len(previous_quarters) != 3:\n",
" filing_logger.error('%s: Missing previous quarterly reports to calculate quarterly data from annual report',reports['cashflow']['name'])\n",
" previous_quarters = None\n",
"\n",
" if previous_quarters:\n",
" field_offset = len(db_fields)-len(reports['cashflow']['lineitems'])\n",
" for i, lineitem in enumerate(reports['cashflow']['lineitems']):\n",
" if values[lineitem] is not None:\n",
" for previous_quarter in previous_quarters:\n",
" if previous_quarter[i+field_offset]:\n",
" values[lineitem] -= previous_quarter[i+field_offset]\n",
"\n",
" values['duration'] = 3\n",
" con.execute('INSERT INTO cashflow_statement VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
"def dbvalue(dbvalues,report,lineitem,avg_over_duration):\n",
" if lineitem[0] == '-':\n",
" weight = -1\n",
" lineitem = lineitem[1:]\n",
" else:\n",
" weight = 1\n",
" if report == 'balance':\n",
" if avg_over_duration:\n",
" return weight*(dbvalues['previous_balance'][lineitem] + dbvalues['balance'][lineitem])/2\n",
" else:\n",
" return weight*dbvalues['balance'][lineitem]\n",
" else:\n",
" return weight*dbvalues[report][lineitem]\n",
"\n",
"def calc_ratios_mrq(filing):\n",
" \"\"\"Computes the ratios for the most recent quarter (mrq), annualized.\"\"\"\n",
" dbvalues = {\n",
" 'previous_balance': Summations(),\n",
" 'balance': Summations(),\n",
" 'income': Summations(),\n",
" 'cashflow': Summations()\n",
" }\n",
" with db_connect() as con:\n",
" factor = 4 if filing['formType'] == '10-Q' else 1\n",
" # Fetch end balance sheet values from DB\n",
" for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(filing['accessionNumber'],)):\n",
" for i, lineitem in enumerate(reports['balance']['lineitems']):\n",
" dbvalues['balance'][lineitem] += row[i+4] if row[i+4] else 0\n",
" # Fetch start balance sheet values from DB\n",
" previous_filing = con.execute('SELECT accessionNumber FROM filings WHERE cikNumber = ? AND period < ? ORDER BY period DESC',(filing['cikNumber'],filing['period'])).fetchone()\n",
" if previous_filing:\n",
" for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(previous_filing[0],)):\n",
" for i, lineitem in enumerate(reports['balance']['lineitems']):\n",
" dbvalues['previous_balance'][lineitem] += row[i+4] if row[i+4] else 0\n",
" # Fetch income statement values from DB\n",
" for row in con.execute('SELECT * FROM income_statement WHERE accessionNumber = ?',(filing['accessionNumber'],)):\n",
" for i, lineitem in enumerate(reports['income']['lineitems']):\n",
" dbvalues['income'][lineitem] += factor*row[i+5] if row[i+5] else 0\n",
" # Fetch cashflow statement values from DB\n",
" for row in con.execute('SELECT * FROM cashflow_statement WHERE accessionNumber = ?',(filing['accessionNumber'],)):\n",
" for i, lineitem in enumerate(reports['cashflow']['lineitems']):\n",
" dbvalues['cashflow'][lineitem] += factor*row[i+5] if row[i+5] else 0\n",
"\n",
" values = {'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': filing['period'], 'kind': 'mrq'}\n",
" for lineitem, ratio in reports['ratios']['formulas'].items():\n",
" # Check if the average in assets/liabilities over the whole period should be used\n",
" referenced_reports = set(op['report'] for op in itertools.chain(ratio['numerator'],ratio['denominator']))\n",
" avg_over_duration = len(referenced_reports) > 1 and 'balance' in referenced_reports\n",
"\n",
" # Calculate the ratio\n",
" numerator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],avg_over_duration) for op in ratio['numerator'])\n",
" denominator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],avg_over_duration) for op in ratio['denominator'])\n",
" values[lineitem] = numerator / denominator if denominator else None\n",
"\n",
" # Insert ratios into DB\n",
" db_fields = ['accessionNumber','cikNumber','endDate','kind'] + reports['ratios']['lineitems']\n",
" con.execute('INSERT INTO ratios VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
"def calc_ratios_ttm(filing):\n",
" \"\"\"Computes the ratios for the trailing twelve months (ttm).\"\"\"\n",
" dbvalues = {\n",
" 'balance': Summations(),\n",
" 'income': Summations(),\n",
" 'cashflow': Summations()\n",
" }\n",
" previous_year_date = datetime.date(filing['period'].year-1,filing['period'].month,calendar.monthrange(filing['period'].year-1,filing['period'].month)[1])\n",
"\n",
" with db_connect() as con:\n",
" # Fetch filings for the last year\n",
" previous_filings = con.execute('SELECT * FROM filings WHERE cikNumber = ? AND period > ? AND period <= ?',(filing['cikNumber'],previous_year_date,filing['period'])).fetchall()\n",
" for previous_filing in previous_filings:\n",
" # Fetch balance sheet values from DB\n",
" for row in con.execute('SELECT * FROM balance_sheet WHERE accessionNumber = ?',(previous_filing[0],)):\n",
" for i, lineitem in enumerate(reports['balance']['lineitems']):\n",
" dbvalues['balance'][lineitem] += row[i+4]/4 if row[i+4] else 0\n",
" # Fetch income statement values from DB\n",
" for row in con.execute('SELECT * FROM income_statement WHERE accessionNumber = ? AND duration = 3',(previous_filing[0],)):\n",
" for i, lineitem in enumerate(reports['income']['lineitems']):\n",
" dbvalues['income'][lineitem] += row[i+5] if row[i+5] else 0\n",
" # Fetch cashflow statement values from DB\n",
" for row in con.execute('SELECT * FROM cashflow_statement WHERE accessionNumber = ? AND duration = 3',(previous_filing[0],)):\n",
" for i, lineitem in enumerate(reports['cashflow']['lineitems']):\n",
" dbvalues['cashflow'][lineitem] += row[i+5] if row[i+5] else 0\n",
"\n",
" values = {'accessionNumber': filing['accessionNumber'], 'cikNumber': filing['cikNumber'], 'endDate': filing['period'], 'kind': 'ttm'}\n",
" for lineitem, ratio in reports['ratios']['formulas'].items():\n",
" # Calculate the ratio\n",
" numerator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],False) for op in ratio['numerator'])\n",
" denominator = sum(dbvalue(dbvalues,op['report'],op['lineitem'],False) for op in ratio['denominator'])\n",
" values[lineitem] = numerator / denominator if denominator else None\n",
"\n",
" # Insert ratios into DB\n",
" db_fields = ['accessionNumber','cikNumber','endDate','kind'] + reports['ratios']['lineitems']\n",
" con.execute('INSERT INTO ratios VALUES(%s)' % ','.join(['?']*len(db_fields)),[values[key] for key in db_fields])\n",
" con.commit()\n",
"\n",
"def process_filing(filing):\n",
" \"\"\"Load XBRL instance and store extracted data to DB.\"\"\"\n",
"\n",
" # Store current filing in thread-local storage\n",
" tls.filing = filing\n",
" filing_logger.info('Start processing filing')\n",
"\n",
" with db_connect() as con:\n",
" # Check if the filing was already processed\n",
" if con.execute('SELECT accessionNumber FROM filings WHERE accessionNumber = ?',(filing['accessionNumber'],)).fetchone():\n",
" if not args.recompute:\n",
" filing_logger.info('Skipped already processed filing')\n",
" return\n",
" filing_logger.info('Deleting existing filing %s',filing['accessionNumber'])\n",
" con.execute('DELETE FROM filings WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.execute('DELETE FROM facts WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.execute('DELETE FROM balance_sheet WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.execute('DELETE FROM income_statement WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.execute('DELETE FROM cashflow_statement WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.execute('DELETE FROM ratios WHERE accessionNumber = ?',(filing['accessionNumber'],))\n",
" con.commit()\n",
"\n",
" # Handle amendment filings\n",
" if filing['formType'].endswith('/A'):\n",
" filing['formType'] = filing['formType'][:-2]\n",
"\n",
" # Delete the previous amended filing\n",
" for row in con.execute('SELECT accessionNumber FROM filings WHERE cikNumber = ? and period = ?',(filing['cikNumber'],filing['period'])):\n",
" filing_logger.info('Deleting amended filing %s',row[0])\n",
" con.execute('DELETE FROM filings WHERE accessionNumber = ?',(row[0],))\n",
" con.execute('DELETE FROM facts WHERE accessionNumber = ?',(row[0],))\n",
" con.execute('DELETE FROM balance_sheet WHERE accessionNumber = ?',(row[0],))\n",
" con.execute('DELETE FROM income_statement WHERE accessionNumber = ?',(row[0],))\n",
" con.execute('DELETE FROM cashflow_statement WHERE accessionNumber = ?',(row[0],))\n",
" con.execute('DELETE FROM ratios WHERE accessionNumber = ?',(row[0],))\n",
" con.commit()\n",
"\n",
" # Load XBRL instance from zip archive\n",
" instance, log = feed_tools.load_instance(filing)\n",
" filing['errors'] = '\\n'.join(error.text for error in log.errors) if log.has_errors() else None\n",
" #filing['warnings'] = '\\n'.join(error.text for error in itertools.chain(log.warnings, log.inconsistencies)) if log.has_warnings() or log.has_inconsistencies() else None\n",
"\n",
" # Write filing metadata into DB\n",
" with db_connect() as con:\n",
" con.execute('INSERT INTO filings VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)',[filing[key] for key in ('accessionNumber','cikNumber','companyName','formType','filingDate','fileNumber','acceptanceDatetime','period','assistantDirector','assignedSic','otherCikNumbers','fiscalYearEnd','instanceUrl','errors')])\n",
" con.commit()\n",
"\n",
" if instance:\n",
" # Find the appropriate linkroles for the main financial statements\n",
" linkroles = classify_presentation_link_roles(instance.dts)\n",
"\n",
" # Find the required contexts for the main reporting period\n",
" usgaap_ns, dei_ns = find_std_namespaces(instance.dts)\n",
" required_context = find_required_context(instance,dei_ns)\n",
" if required_context and required_context.period.is_start_end():\n",
" # Check duration of required context\n",
" duration = round((required_context.period_aspect_value.end.date()-required_context.period_aspect_value.start.date()).days/30)\n",
" if filing['formType'] == '10-K' and duration != 12:\n",
" filing_logger.warning('10-K Required Context has duration of %d months',duration)\n",
" elif filing['formType'] == '10-Q' and (duration != 3 and duration != 6 and duration != 9):\n",
" filing_logger.warning('10-Q Required Context has duration of %d months',duration)\n",
"\n",
" # Find an instant context for the period end date\n",
" required_instant_context = find_required_instant_context(instance,required_context.period.end_date.value)\n",
"\n",
" # Calculate and store values for the main financial statements to DB\n",
" calc_balance_sheet(filing,instance,required_instant_context,linkroles['balance'])\n",
" calc_income_statement(filing,instance,required_context,linkroles['income'])\n",
" calc_cashflow_statement(filing,instance,required_context,linkroles['cashflow'])\n",
"\n",
" # Calculate and store ratios to DB\n",
" calc_ratios_mrq(filing)\n",
" calc_ratios_ttm(filing)\n",
" else:\n",
" filing_logger.error('Missing or non-duration required context encountered')\n",
" else:\n",
" filing_logger.error('Invalid XBRL instance:\\n%s',filing['errors'])\n",
"\n",
" filing_logger.info('Finished processing filing')\n",
"\n",
"def process_filings_for_cik(cik,filings):\n",
" #if filings[0]['companyName'] not in ('JOHNSON & JOHNSON','INTERNATIONAL BUSINESS MACHINES CORP','EXXON MOBIL CORP','CARNIVAL CORP','Google Inc.','AMAZON COM INC','APPLE INC','MICROSOFT CORP','ORACLE CORP','General Motors Co','GENERAL ELECTRIC CO','WAL MART STORES INC'):\n",
" # return\n",
"\n",
" for filing in filings:\n",
" try:\n",
" process_filing(filing)\n",
" except:\n",
" logger.exception('Failed processing filing %s',filing['accessionNumber'])\n",
"\n",
"def process_filings(filings):\n",
" \"\"\"Distribute processing of filings over multiple threads/cores.\"\"\"\n",
" logger.info('Start processing 10-K/10-Q filings (count=%d)',sum(len(x) for x in filings.values()))\n",
" with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_threads) as executor:\n",
" futures = [executor.submit(process_filings_for_cik,cik,filings[cik]) for cik in filings]\n",
" for future in concurrent.futures.as_completed(futures):\n",
" try:\n",
" future.result()\n",
" except:\n",
" logger.exception('Exception occurred')\n",
" logger.info('Finished processing 10-K/10-Q filings')\n",
"\n",
"class FilingLogAdapter(logging.LoggerAdapter):\n",
"\n",
" def process(self, msg, kwargs):\n",
" filing = tls.filing\n",
" return '[%s %s %s] %s'%(filing['ticker'],filing['cikNumber'],filing['accessionNumber'],msg), kwargs\n",
"\n",
"def setup_logging(log_file):\n",
" \"\"\"Setup the Python logging infrastructure.\"\"\"\n",
" global tls,logger,filing_logger\n",
" tls = threading.local()\n",
"\n",
" if log_file:\n",
" logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n",
" else:\n",
" logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n",
" logger = logging.getLogger('default')\n",
" filing_logger = FilingLogAdapter(logger,{})\n",
"\n",
"def parse_args(daily_update=False):\n",
" \"\"\"Returns the arguments and options passed to the script.\"\"\"\n",
" parser = argparse.ArgumentParser(description='Process XBRL filings and extract financial data and ratios into a DB.')\n",
" if not daily_update:\n",
" parser.add_argument('rss_feeds', metavar='RSS', nargs='*', help='EDGAR RSS feed file')\n",
" parser.add_argument('--create-tables', default=False, action='store_true', help='specify very first time to create empty DB tables')\n",
" parser.add_argument('--db', metavar='DSN', default='sec.db3', dest='db_name', help='specify the target DB datasource name or file')\n",
" parser.add_argument('--db-driver', default='sqlite', choices=['sqlite','odbc'], help='specify the DB driver to use')\n",
" parser.add_argument('--log', metavar='LOGFILE', dest='log_file', help='specify output log file')\n",
" parser.add_argument('--threads', metavar='MAXTHREADS', type=int, default=8, dest='max_threads', help='specify max number of threads')\n",
" parser.add_argument('--cik', metavar='CIK', type=int, nargs='*', help='limit processing to only the specified CIK number')\n",
" parser.add_argument('--recompute', default=False, action='store_true', help='recompute and replace filings already present in DB')\n",
" parser.add_argument('--store-fact-mappings', default=False, action='store_true', help='stores original XBRL fact values and mappings to line items in DB')\n",
" if daily_update:\n",
" parser.add_argument('--retries', type=int, default=3, dest='max_retries', help='specify max number of retries to download a specific filing')\n",
" parser.add_argument('--update-tickers', default=False, action='store_true', help='updates Ticker/CIK in --db')\n",
" return parser.parse_args()\n",
"\n",
"def build_secdb(feeds):\n",
" # Setup python logging framework\n",
" setup_logging(args.log_file)\n",
"\n",
" tickers = load_ticker_symbols()\n",
"\n",
" # Setup up DB connection\n",
" global db_connect\n",
" db_connect = setup_db_connect(args.db_driver,args.db_name)\n",
"\n",
" # Create all required DB tables\n",
" if 'create_tables' in args:\n",
" if args.create_tables:\n",
" create_db_tables()\n",
" create_db_indices()\n",
" insert_ticker_symbols(tickers)\n",
"\n",
" # Process all filings in the given RSS feeds one month after another\n",
" for filepath in feeds:\n",
"\n",
" # Load EDGAR filing metadata from RSS feed (and filter out all non 10-K/10-Q filings or companies without an assigned ticker symbol)\n",
" filings = {}\n",
" for filing in feed_tools.read_feed(filepath):\n",
" # Google to Alphabet reorganization\n",
" if filing['cikNumber'] == 1288776:\n",
" filing['cikNumber'] = 1652044\n",
" if args.cik is None or filing['cikNumber'] in args.cik:\n",
" if filing['formType'] in ('10-K','10-K/A','10-Q','10-Q/A') and filing['cikNumber'] in tickers:\n",
" filing['ticker'] = tickers[filing['cikNumber']]\n",
" filings.setdefault(filing['cikNumber'],[]).append(filing)\n",
"\n",
" # Process the selected XBRL filings\n",
" process_filings(filings)\n",
"\n",
"def collect_feeds(args):\n",
" \"\"\"Returns an generator of the resolved, absolute RSS file paths.\"\"\"\n",
" for filepath in args.rss_feeds:\n",
" for resolved_filepath in glob.iglob(os.path.abspath(filepath)):\n",
" yield resolved_filepath\n",
"\n",
"def main():\n",
" # Parse script arguments\n",
" global args\n",
" args = parse_args()\n",
"\n",
" build_secdb(collect_feeds(args))\n",
"\n",
"if __name__ == '__main__':\n",
" sec = timeit.timeit(main,number=1)\n",
" logger.info('Finished in %fs',sec)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0.011111111111111112,
0,
0,
0.128,
0,
0,
0.041666666666666664,
0,
0,
0,
0.014285714285714285,
0,
0.0273972602739726,
0.01910828025477707,
0.01910828025477707,
0.01818181818181818,
0,
0,
0.02857142857142857,
0.005319148936170213,
0.03636363636363636,
0.038461538461538464,
0.017543859649122806,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.012195121951219513,
0.011363636363636364,
0,
0,
0.012195121951219513,
0,
0,
0.08333333333333333,
0,
0,
0,
0.037037037037037035,
0,
0.017857142857142856,
0,
0,
0.04285714285714286,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0.0273972602739726,
0,
0.02040816326530612,
0,
0,
0.03333333333333333,
0.009345794392523364,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0.02857142857142857,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0.003436426116838488,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0.0034965034965034965,
0,
0,
0,
0.00847457627118644,
0,
0,
0,
0.00819672131147541,
0,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0.05405405405405406,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.010638297872340425,
0,
0.0379746835443038,
0,
0.018867924528301886,
0,
0,
0,
0,
0.04081632653061224,
0,
0,
0,
0.020833333333333332,
0.0078125,
0,
0.03,
0,
0,
0,
0.045454545454545456,
0,
0.006802721088435374,
0.012345679012345678,
0.02127659574468085,
0,
0,
0,
0,
0,
0.03773584905660377,
0.009708737864077669,
0,
0.008695652173913044,
0,
0,
0,
0.05172413793103448,
0.008064516129032258,
0,
0,
0.006172839506172839,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0.044444444444444446,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0.024390243902439025,
0.005714285714285714,
0.0625,
0.0196078431372549,
0.014705882352941176,
0,
0,
0.0196078431372549,
0,
0,
0.030303030303030304,
0,
0.05825242718446602,
0,
0,
0,
0,
0,
0.06944444444444445,
0,
0,
0.05128205128205128,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0.02127659574468085,
0.00684931506849315,
0,
0,
0,
0.011111111111111112,
0.013888888888888888,
0,
0.022598870056497175,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0.011111111111111112,
0,
0,
0,
0.006578947368421052,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0.011235955056179775,
0.006622516556291391,
0.011235955056179775,
0.006711409395973154,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04361370716510903,
0,
0,
0,
0,
0.08275862068965517,
0.004672897196261682,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02197802197802198,
0,
0,
0.0078125,
0,
0,
0.02197802197802198,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.026737967914438502,
0,
0,
0,
0,
0,
0.023076923076923078,
0,
0,
0,
0.013513513513513514,
0,
0,
0,
0.02654867256637168,
0,
0,
0,
0,
0,
0,
0,
0.025906735751295335,
0,
0,
0,
0.031007751937984496,
0,
0,
0,
0.027777777777777776,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03723404255319149,
0,
0,
0.06285714285714286,
0,
0.03345724907063197,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015748031496062992,
0,
0,
0.06285714285714286,
0,
0.00975609756097561,
0,
0,
0,
0,
0.0658682634730539,
0,
0.04918032786885246,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.028169014084507043,
0.011904761904761904,
0.010309278350515464,
0,
0,
0.07246376811594203,
0.011764705882352941,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0.07801418439716312,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02617801047120419,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0.028985507246376812,
0,
0,
0,
0.05128205128205128,
0.009433962264150943,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0.043478260869565216,
0.007246376811594203,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.06779661016949153,
0.012345679012345678,
0.015151515151515152,
0,
0,
0.020833333333333332,
0,
0,
0.022727272727272728,
0,
0,
0.02654867256637168,
0,
0,
0.05405405405405406,
0.056179775280898875,
0.006535947712418301,
0,
0,
0,
0.036036036036036036,
0.015625,
0,
0,
0.06451612903225806,
0,
0.015384615384615385,
0,
0,
0.021052631578947368,
0,
0,
0.022988505747126436,
0,
0,
0.026785714285714284,
0,
0,
0,
0.0033444816053511705,
0,
0.025210084033613446,
0,
0,
0,
0.05454545454545454,
0.056818181818181816,
0.005714285714285714,
0,
0.0392156862745098,
0,
0,
0,
0,
0,
0.04132231404958678,
0.015267175572519083,
0,
0,
0,
0,
0.024096385542168676,
0.013513513513513514,
0,
0.006993006993006993,
0,
0.012195121951219513,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.014388489208633094,
0,
0,
0.0625,
0,
0.014925373134328358,
0,
0,
0.020618556701030927,
0,
0,
0.02247191011235955,
0,
0,
0.02631578947368421,
0,
0,
0.008928571428571428,
0,
0.05357142857142857,
0.05555555555555555,
0.005714285714285714,
0,
0,
0,
0.04065040650406504,
0.015037593984962405,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0425531914893617,
0.013422818791946308,
0,
0.011976047904191617,
0,
0,
0.01098901098901099,
0,
0.024096385542168676,
0.013422818791946308,
0,
0.013071895424836602,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0.014598540145985401,
0,
0,
0.07017543859649122,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0.01639344262295082,
0,
0.017391304347826087,
0,
0.011111111111111112,
0,
0.01652892561983471,
0,
0.012048192771084338,
0,
0.016260162601626018,
0,
0.011764705882352941,
0,
0.007042253521126761,
0,
0.010638297872340425,
0.01694915254237288,
0.010416666666666666,
0,
0,
0.03278688524590164,
0.031746031746031744,
0,
0,
0,
0.0392156862745098,
0.01652892561983471,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0.02531645569620253,
0,
0,
0,
0.021739130434782608,
0,
0,
0.017391304347826087,
0,
0.012048192771084338,
0,
0.014814814814814815,
0,
0,
0,
0.014598540145985401,
0,
0.012195121951219513,
0,
0.007042253521126761,
0,
0,
0.03636363636363636,
0.03508771929824561,
0,
0,
0,
0.0392156862745098,
0.01652892561983471,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0.015503875968992248,
0,
0,
0,
0.022727272727272728,
0.0196078431372549,
0.02,
0.018518518518518517,
0.018018018018018018,
0.017699115044247787,
0.019801980198019802,
0,
0,
0,
0,
0,
0,
0,
0.020134228187919462,
0.013888888888888888,
0.022988505747126436,
0.023529411764705882,
0.021505376344086023,
0.020833333333333332,
0.02040816326530612,
0.023255813953488372,
0,
0,
0,
0,
0.010101010101010102,
0.011560693641618497,
0,
0,
0,
0.048701298701298704,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0,
0,
0.007246376811594203,
0,
0.02040816326530612,
0.009708737864077669,
0.02040816326530612,
0,
0,
0.01694915254237288,
0,
0.012345679012345678,
0.0425531914893617,
0.045454545454545456,
0.043478260869565216,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0.013333333333333334,
0,
0,
0,
0.047619047619047616,
0.007246376811594203,
0,
0,
0,
0,
0,
0.0625,
0.023255813953488372,
0,
0.03333333333333333,
0,
0.019417475728155338,
0.011111111111111112,
0.030927835051546393,
0,
0,
0,
0.05,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0.046296296296296294,
0,
0.034482758620689655,
0,
0.05555555555555555,
0,
0,
0,
0.031746031746031744,
0,
0.021052631578947368,
0,
0.020833333333333332,
0,
0.027777777777777776,
0,
0.00819672131147541,
0,
0.010526315789473684,
0.0070921985815602835,
0.007352941176470588,
0.01652892561983471,
0.009900990099009901,
0.007246376811594203,
0.007874015748031496,
0.007407407407407408,
0.006289308176100629,
0,
0.006666666666666667,
0.008403361344537815,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0070921985815602835,
0,
0,
0,
0,
0,
0,
0.036036036036036036,
0,
0.01282051282051282,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0.02564102564102564,
0.02564102564102564
] | 1,083 | 0.006792 | false |
# Runs the evaluation of the task 2 response
# Requires python 3 and numpy
# Usage:
#
# <Python3 executable> t2_eval.py -s <std_dir> -t <test_dir> [-o <output_dir>] [-m]
# -s [std_dir] - path to the standard files directory
# -t [test_dir] - path to the response files directory
# -o [output_dir] - path to the comparator reports folder
# -m - enables the simplified comparison mode (no penalty for extra values)
# -h - display this message
#
#########################################################################################
import sys
import os
import getopt
from dialent.task2.eval import Evaluator
#########################################################################################
def usage():
print('Usage:')
print('<Python3 executable> t2_eval.py -s <std_dir> -t <test_dir> [-o <output_dir>] [-m]')
print(' -s [std_dir] - path to the standard files directory')
print(' -t [test_dir] - path to the response files directory')
print(' -o [output_dir] - path to the comparator reports folder')
print(' -m - enables the simplified comparison mode (no penalty for extra values)')
print(' -h - display this message')
def main():
"""
Runs comparison
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:o:hm')
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
std_path = None
test_path = None
out_path = ''
mode = 'regular'
for o, a in opts:
if o == '-h':
usage()
sys.exit()
elif o == '-s':
std_path = a
elif o == '-t':
test_path = a
elif o == '-o':
out_path = a
elif o == '-m':
mode = 'simple'
else:
assert False, 'unhandled option'
assert std_path != None and test_path != None, 'Stnadard and test paths must be set'\
'(see python t2_eval.py -h)'
e = Evaluator(mode)
e.evaluate(std_path, test_path, out_path)
if __name__ == '__main__':
main()
| [
"# Runs the evaluation of the task 2 response\n",
"# Requires python 3 and numpy\n",
"\n",
"# Usage:\n",
"#\n",
"# <Python3 executable> t2_eval.py -s <std_dir> -t <test_dir> [-o <output_dir>] [-m]\n",
"# -s [std_dir] - path to the standard files directory\n",
"# -t [test_dir] - path to the response files directory\n",
"# -o [output_dir] - path to the comparator reports folder\n",
"# -m - enables the simplified comparison mode (no penalty for extra values)\n",
"# -h - display this message\n",
"#\n",
"\n",
"#########################################################################################\n",
"\n",
"import sys\n",
"import os\n",
"import getopt\n",
"\n",
"from dialent.task2.eval import Evaluator\n",
"\n",
"#########################################################################################\n",
"\n",
"def usage():\n",
" print('Usage:')\n",
" print('<Python3 executable> t2_eval.py -s <std_dir> -t <test_dir> [-o <output_dir>] [-m]')\n",
" print(' -s [std_dir] - path to the standard files directory')\n",
" print(' -t [test_dir] - path to the response files directory')\n",
" print(' -o [output_dir] - path to the comparator reports folder')\n",
" print(' -m - enables the simplified comparison mode (no penalty for extra values)')\n",
" print(' -h - display this message')\n",
"\n",
"def main():\n",
" \"\"\"\n",
" Runs comparison\n",
" \"\"\"\n",
" try:\n",
" opts, args = getopt.getopt(sys.argv[1:], 's:t:o:hm')\n",
" except getopt.GetoptError as err:\n",
" print(str(err))\n",
" usage()\n",
" sys.exit(2)\n",
"\n",
" std_path = None\n",
" test_path = None\n",
" out_path = ''\n",
" mode = 'regular'\n",
" for o, a in opts:\n",
" if o == '-h':\n",
" usage()\n",
" sys.exit()\n",
" elif o == '-s':\n",
" std_path = a\n",
" elif o == '-t':\n",
" test_path = a\n",
" elif o == '-o':\n",
" out_path = a\n",
" elif o == '-m':\n",
" mode = 'simple'\n",
" else:\n",
" assert False, 'unhandled option'\n",
"\n",
" assert std_path != None and test_path != None, 'Stnadard and test paths must be set'\\\n",
" '(see python t2_eval.py -h)'\n",
"\n",
" e = Evaluator(mode)\n",
" e.evaluate(std_path, test_path, out_path)\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n",
"\n"
] | [
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0.07692307692307693,
0,
0.010526315789473684,
0,
0,
0,
0.009615384615384616,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
1
] | 71 | 0.018241 | false |
"""
This module contains the 'confirm_password' menu node.
"""
from hashlib import sha256
from textwrap import dedent
from django.conf import settings
from evennia import logger
from menu.character import _login
from menu.email_address import text_email_address
def confirm_password(caller, input):
"""Ask the user to confirm the account's password.
The account's password has been saved in the session for the
time being, as a hashed version. If the hashed version of
the retyped password matches, then the player is created.
If not, ask for another password.
"""
text = ""
options = (
{
"key": "b",
"desc": "Go back to the password selection.",
"goto": "create_password",
},
{
"key": "_default",
"desc": "Enter your password.",
"goto": "confirm_password",
},
)
caller.msg(echo=True)
password = input.strip()
playername = caller.db._playername
first_password = caller.db._password
second_password = sha256(password).hexdigest()
if first_password != second_password:
text = dedent("""
|rThe password you have specified doesn't match the first one.|n
Type |yb|n to choose a different password.
Or type the confirmation password again.
""".strip("\n"))
else:
# Creates the new player.
from evennia.commands.default import unloggedin
try:
permissions = settings.PERMISSION_PLAYER_DEFAULT
player = unloggedin._create_player(caller, playername,
password, permissions)
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't, we
# won't see any errors at all.
caller.msg(dedent("""
|rAn error occurred.|n Please e-mail an admin if
the problem persists.
Type |yb|n to go back to the login screen.
Or enter another password.
""".strip("\n")))
logger.log_trace()
else:
caller.db._player = player
del caller.db._password
_login(caller, player)
text = "Your new account was successfully created!"
text += "\n\n" + text_email_address(player)
options = (
{
"key": "_default",
"desc": "Enter a valid e-mail address.",
"goto": "email_address",
},
)
return text, options
| [
"\"\"\"\n",
"This module contains the 'confirm_password' menu node.\n",
"\n",
"\"\"\"\n",
"\n",
"from hashlib import sha256\n",
"from textwrap import dedent\n",
"\n",
"from django.conf import settings\n",
"\n",
"from evennia import logger\n",
"\n",
"from menu.character import _login\n",
"from menu.email_address import text_email_address\n",
"\n",
"def confirm_password(caller, input):\n",
" \"\"\"Ask the user to confirm the account's password.\n",
"\n",
" The account's password has been saved in the session for the\n",
" time being, as a hashed version. If the hashed version of\n",
" the retyped password matches, then the player is created.\n",
" If not, ask for another password.\n",
"\n",
" \"\"\"\n",
" text = \"\"\n",
" options = (\n",
" {\n",
" \"key\": \"b\",\n",
" \"desc\": \"Go back to the password selection.\",\n",
" \"goto\": \"create_password\",\n",
" },\n",
" {\n",
" \"key\": \"_default\",\n",
" \"desc\": \"Enter your password.\",\n",
" \"goto\": \"confirm_password\",\n",
" },\n",
" )\n",
"\n",
" caller.msg(echo=True)\n",
" password = input.strip()\n",
"\n",
" playername = caller.db._playername\n",
" first_password = caller.db._password\n",
" second_password = sha256(password).hexdigest()\n",
" if first_password != second_password:\n",
" text = dedent(\"\"\"\n",
" |rThe password you have specified doesn't match the first one.|n\n",
" Type |yb|n to choose a different password.\n",
" Or type the confirmation password again.\n",
" \"\"\".strip(\"\\n\"))\n",
" else:\n",
" # Creates the new player.\n",
" from evennia.commands.default import unloggedin\n",
" try:\n",
" permissions = settings.PERMISSION_PLAYER_DEFAULT\n",
" player = unloggedin._create_player(caller, playername,\n",
" password, permissions)\n",
" except Exception:\n",
" # We are in the middle between logged in and -not, so we have\n",
" # to handle tracebacks ourselves at this point. If we don't, we\n",
" # won't see any errors at all.\n",
" caller.msg(dedent(\"\"\"\n",
" |rAn error occurred.|n Please e-mail an admin if\n",
" the problem persists.\n",
" Type |yb|n to go back to the login screen.\n",
" Or enter another password.\n",
" \"\"\".strip(\"\\n\")))\n",
" logger.log_trace()\n",
" else:\n",
" caller.db._player = player\n",
" del caller.db._password\n",
" _login(caller, player)\n",
" text = \"Your new account was successfully created!\"\n",
" text += \"\\n\\n\" + text_email_address(player)\n",
" options = (\n",
" {\n",
" \"key\": \"_default\",\n",
" \"desc\": \"Enter a valid e-mail address.\",\n",
" \"goto\": \"email_address\",\n",
" },\n",
" )\n",
"\n",
" return text, options\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 83 | 0.000606 | false |
from suds.client import Client
from suds.wsse import *
import socket, logging
import suds
CURSOR=None
__ConfServiceClient=None
__DefServiceClient=None
__host = None
__port = None
__usr = None
__pwd = None
__ssl = None
class ConnectionService(object):
"""Connnection Initialization.
:param:
host [string] => host name or ip-address.
port [string] => Port connection.
usr [string] => Username.
pwd [string] => Password.
ssl [boolean] => Use https and ssl security."""
def __init__( self , host , port , usr , pwd, ssl):
socket.setdefaulttimeout(None)
logging.basicConfig(level=logging.INFO)
logging.getLogger("suds.client").setLevel(logging.INFO)
self.__host = host
self.__port = port
self.__usr = usr
self.__pwd = pwd
self.__ssl = ssl
self.__confservice()
self.__defservice()
#Create the object for the ConfigurationService connection
def __confservice(self):
if self.__ssl == True:
Url="https://"+self.__host+":"+self.__port
else:
Url="http://"+self.__host+":"+self.__port
SrvWsdl=Url+"/ws/v8/configurationservice?wsdl"
Security=suds.wsse.Security()
Security.tokens.append(suds.wsse.UsernameToken(self.__usr, self.__pwd))
self.__ConfServiceclient=suds.client.Client(SrvWsdl, timeout=3600)
self.__ConfServiceclient.set_options(wsse=Security)
#Create the object for DefectService connection.
def __defservice(self):
if self.__ssl == True:
Url="https://"+self.__host+":"+self.__port
else:
Url="http://"+self.__host+":"+self.__port
SrvWsdl=Url+"/ws/v8/defectservice?wsdl"
Security=suds.wsse.Security()
Security.tokens.append(suds.wsse.UsernameToken(self.__usr, self.__pwd))
self.__DefServiceclient=suds.client.Client(SrvWsdl, timeout=3600)
self.__DefServiceclient.set_options(wsse=Security)
# METHOD: ConfigurationService->getProjects.
def getProjects(self, descriptionPattern, includeChildren, includeStreams, namePattern):
"""Get a list of projects specifications (for all projects or for all filtered set of projects).
:param:
descriptionPattern [string] => Glob pattern matching the description of one or more projects.
includeChildren [boolean]=> Value of false if the results should not include roles and other properties associated with the project. Defaults to true.
includeStreams [boolean]=> Value of false if the results should not include streams associated with the project. Defaults to true.
namePattern [string] => Glob pattern matching the name of one or more projects."""
projectfilter=self.__ConfServiceclient.factory.create("projectFilterSpecDataObj")
projectfilter.descriptionPattern = descriptionPattern
projectfilter.includeChildren = includeChildren
projectfilter.includeStreams = includeStreams
projectfilter.namePattern= namePattern
try:
return self.__ConfServiceclient.service.getProjects(projectfilter)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->copyStream.
def copyStream(self, projectName, streamName):
"""Make a copy of a stream. Does noy copy stream role assignments.
:param:
name [string] => Required. Name of the project.
name [string] => Required. Name of the stream."""
projectIdDataObj=self.__ConfServiceclient.factory.create("projectIdDataObj")
projectIdDataObj.name=projectName
streamIdObj=self.__ConfServiceclient.factory.create("streamIdObj")
streamIdObj.name=streamName
try:
return self.__ConfServiceclient.service.copyStream(projectIdDataObj, streamIdObj)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->createAttribute.
def createAttribute(self, attributeName, attributeType, attributeValueChangeSpecData, defaultValue, description, showInTriage):
""""Create an attribute.
:param:
attributeName [string] => Name of the attribute. Required
attributeType [string] => The type of attribute. Required
attributeValueChangeSpec [attributeValueChangeSpecDataObj] => For a LIST_OF_VALUES attribute type only: The set of values available to the attribute.
defaultValue [string] => For a LIST_OF_VALUES attribute type only: The default attribute value.
description [string] => Descriptions of the attribute
showInTriage [boolean] => If true, makes the attribute available for use in the Triage pane of the UI."""
attributeDefinitionSpecDataObj= self.__ConfServiceclient.factory.create("attributeDefinitionSpecDataObj")
attributeDefinitionSpecDataObj.attributeName= attributeName
attributeDefinitionSpecDataObj.attributeType= attributeType
attributeDefinitionSpecDataObj.attributeValueChangeSpec= attributeValueChangeSpecData
attributeDefinitionSpecDataObj.defaultValue= defaultValue
attributeDefinitionSpecDataObj.description= description
attributeDefinitionSpecDataObj.showInTriage=showInTriage
try:
self.__ConfServiceclient.service.createAttribute(attributeDefinitionSpecDataObj)
return "Attribute created"
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getAllLdapConfigurations
def getAllLdapConfigurations(self):
"""Return all LDAP Configurations."""
return self.__ConfServiceclient.service.getAllLdapConfigurations()
# METHOD: ConfigurationService->getAttribute.
def getAttribute(self, name):
"""Retrieve the properties of a specified attribute.
:param:
name [string] => Required. Name of the attribute."""
attributeDefinitionId=self.__ConfServiceclient.factory.create("attributeDefinitionIdDataObj")
attributeDefinitionId.name = name
try:
return self.__ConfServiceclient.service.getAttribute(attributeDefinitionId)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getCheckerProperties.
def getCheckerProperties(self, checkerNameList, subcategoryList, domainList, categoryList, cweCategoryList, impactList, projectIdname):
"""Retrieve a list of available checker properties.
:param:
checkerNameList [string] => Name of the checker. Zero or more checker names allowed.
subcategoryList [string] => Subcategorization of the software issue found by the checker.
domainList [string] => Domain of the checker. Zero or more domains allowed.
categoryList [string] => Categorization of the software issue found by checker. Zero or more categories allowed.
cweCategoryList [string] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.
impactList [string] => Probable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.
projectIdname [string] => Required. Name of the project."""
filterSpec= self.__ConfServiceclient.factory.create("checkerPropertyFilterSpecDataObj")
projectId= self.__ConfServiceclient.factory.create("projectIdDataObj")
projectId.name= projectIdname
filterSpec.checkerNameList= checkerNameList
filterSpec.subcategoryList= subcategoryList
filterSpec.domainList= domainList
filterSpec.categoryList= categoryList
filterSpec.cweCategoryList= cweCategoryList
filterSpec.impactList= impactList
filterSpec.projectId= projectId
try:
return self.__ConfServiceclient.service.getCheckerProperties(filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: ConfiguratioService->getComponent.
def getComponent(self, name):
"""Retrieve the properties of a component.
:param:
name [string] => Required. Name of a component in the form componentMapName.componentName (for example, myComponentMap.myComponent)."""
componentId= self.__ConfServiceclient.factory.create("componentIdDataObj")
componentId.name =name
try:
return self.__ConfServiceclient.service.getComponent(componentId)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getComponentMaps.
def getComponentMaps(self, namePattern):
"""Retrieve a list of component maps that matches a component name pattern.
:param:
namePattern [string] => Glob pattern matching the name of one or more component maps."""
filterSpec= self.__ConfServiceclient.factory.create("componentMapFilterSpecDataObj")
filterSpec.namePattern= namePattern
try:
return self.__ConfServiceclient.service.getComponentMaps(filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getDefectStatuses.
def getDefectStatuses(self):
"""Retrieve a list of Status attribute values that can be associated with a software issue."""
return self.__ConfServiceclient.service.getDefectStatuses()
# METHOD: ConfigurationService->getGroup.
def getGroup(self, displayName, domain, name):
"""Retrieve the properties of a user group.
:param:
displayName [string] => The name of a user group. To retrieve an LDAP group, you use <groupname>@<LDAPserver>.
domain [string] => Name of the LDAP server domain.
name [string] => Required. Name of the local or LDAP group."""
groupId=self.__ConfServiceclient.factory.create("groupIdDataObj")
domainId= self.__ConfServiceclient.factory.create("serverDomainDataObj")
domainId.name= domain
groupId.displayName= displayName
groupId.domain= domainId
groupId.name= name
try:
return self.__ConfServiceclient.service.getGroup(groupId)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getGroups.
def getGroups(self, ldap, namePattern, name, userList):
"""Get a list of groups.
:param:
ldap [boolean] => Value of true for LDAP groups only; otherwise, false.
namePattern [string] => Glob pattern matching the name of one or more groups.
name [string] => Name of a project with which the group must have a role association.
userList [string] => User name of a user that must belong to the group."""
filterSpec= self.__ConfServiceclient.factory.create("groupFilterSpecDataObj")
projectIdDataObj= self.__ConfServiceclient.factory.create("projectIdDataObj")
projectIdDataObj.name= name
filterSpec.ldap= ldap
filterSpec.namePattern = namePattern
filterSpec.projectIdDataObj= projectIdDataObj
filterSpec.userList= userList
try:
return self.__ConfServiceclient.service.getGroups(filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getSnapshotInformation.
def getSnapshotInformation(self, id):
"""Retrieve information about a snashot in a stream.
:param:
id [long] => Numeric identifier for the snapshot. Required."""
snapshotIdData=self.__ConfServiceclient.factory.create("snapshotIdDataObj")
snapshotIdData.id=id
try:
return self.__ConfServiceclient.service.getSnapshotInformation(snapshotIdData)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getSnapshotsForStream.
def getSnapshotsForStream(self, name):
"""Retrieve a set of snapshots that belong to a spcecified stream.
:param:
name [string] => Required. Name of the stream."""
streamId= self.__ConfServiceclient.factory.create("streamIdDataObj")
streamId.name= name
try:
return self.__ConfServiceclient.service.getSnapshotsForStream(streamId)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getStreams.
def getStreams(self, languageList, descriptionPattern, namePattern):
"""Retrieve a set of streams.
:param:
languageList [string] => Programming language matching that of one or more streams. Zero or more language filters allowed.
descriptionPattern [string] => Glob pattern matching the description of one or more streams.
namePattern [string] => Glob pattern matching the name of one or more streams."""
filterSpec= self.__ConfServiceclient.factory.create("streamFilterSpecDataObj")
filterSpec.languageList= languageList
filterSpec.descriptionPattern= descriptionPattern
filterSpec.namePattern= namePattern
try:
return self.__ConfServiceclient.service.getStreams(filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getUser.
def getUser(self,username):
"""Retrieve a user by user name.
:param:
username [string] => User name."""
try:
return self.__ConfServiceclient.service.getUser(username)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->getUsers.
def getUsers(self, assignable, disabled, groupList, includeDetails, ldap, locked, namePattern, name, pageSpecData):
"""Get users (filtered or unfiltered).
:param:
assignable [boolean] => Set to true to retrieve only those users who can own software issues; false to retrieve only those who cannot. Otherwise, do not set.
disabled [boolean] => Set to true to retrieve disabled users only. Set to false to retrieve enabled users only. Otherwise, do not set.
groupList [string] => Name of user group to which the retrieved users must belong. Zero or more groups allowed.
includeDetails [boolean] => Set to false to prevent the inclusion of role assignments and other user details in the reqponse. Defaults to true.
ldap [boolean] => Set to true to retrieve only LDAP users; false to retrieve only local users. Otherwise, do not set.
locked [boolean] => Set to true to retrieve only those users who have been locked out; false to retrieve only unlocked users. Otherwise, do not set.
namePattern [string] => Glob pattern that matches the user name of the users to retrieve.
name [string] => Name of project to which the retrieved set of users must have a role association."""
filterSpec= self.__ConfServiceclient.factory.create("userFilterSpecDataObj")
projectIdDataObj= self.__ConfServiceclient.factory.create("projectIdDataObj")
projectIdDataObj.name= name
filterSpec.assignable= assignable
filterSpec.disabled= disabled
filterSpec.groupsList= groupList
filterSpec.includeDetails= includeDetails
filterSpec.ldap= ldap
filterSpec.locked= locked
filterSpec.namePattern= namePattern
filterSpec.projectIdDataObj= projectIdDataObj
try:
return self.__ConfServiceclient.service.getUsers(filterSpec,pageSpecData)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->notify.
def notify(self, usernames, subject, message):
"""Send an emmail notification to a specified user.
:param:
usernames [string] => One or more usernames.
subject [string] => Subject-line text for the email.
message [string] => Body text for the email."""
try:
return self.__ConfServiceclient.service.notify(usernames, subject, message)
except suds.WebFault as detail:
return detail
# METHOD: ConfigurationService->updateAttribute.
def updateAttribute(self, attributeName, attributeType, attributeValueChangeSpecData, defaultValue, description, showInTriage):
"""Update an attribute specification.
attributeName [string] => Required. Name for the attribute.
attributeType [string] => The type of attribute. Required when using createAttribute().
attributeValueChangeSpec [attributeValueChangeSpecDataObj] => For a LIST_OF_VALUES attribute type only: The set of values available to the attribute.
defaultValue [string] => For a LIST_OF_VALUES attribute type only: The default attribute value.
description [string] => Description of the attribute.
showInTriage [boolean]=> If true, makes the attribute available for use in the Triage pane of the UI."""
attributeDefinitionId= self.__ConfServiceclient.factory.create("attributeDefinitionDataObj")
attributeDefinitionId= attributeName
attributeDefinitionSpec= self.__ConfServiceclient.factory.create("attributeDefinitionSpecDataObj")
attributeDefinitionSpec.attributeName= None
attributeDefinitionSpec.attributeType= attributeType
attributeDefinitionSpec.attributeValueChangeSpec= attributeValueChangeSpecData
attributeDefinitionSpec.defaultValue= defaultValue
attributeDefinitionSpec.description= description
attributeDefinitionSpec.showInTriage= showInTriage
try:
self.__ConfServiceclient.service.updateAttribute(attributeDefinitionId, attributeDefinitionSpec)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getCheckerSubcategoriesForProject.
def getCheckerSubcategoriesForProject(self, name):
"""Retrieve a list of subcatergories of software issues in the project that were found by checkers used in the analysis.
:param:
name [string] => Required. Name of the project."""
projectId= self.__DefServiceclient.factory.create("projectIdDataObj")
projectId.name = name
try:
return self.__DefServiceclient.service.getCheckerSubcategoriesForProject(projectId)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getCheckerSubcategoriesForStreams.
def getCheckerSubcategoriesForStreams(self, name):
"""Retrieve a list of subcategories of software issues in the stream that were found by checkers used in the analysis.
:param:
name [string] => Required. Name of the stream."""
streamIds= self.__DefServiceclient.factory.create("streamIdDataObj")
streamIds.name= name
try:
return self.__DefServiceclient.service.getCheckerSubcategoriesForStreams(streamIds)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getComponentMetricsForProject .
def getComponentMetricsForProject (self, projectName, componentName):
"""Retrieve metrics on components associated with streams in a specific project.
:param:
projectName [string] => Required. Name of the project.
componentName [string] => Name of the component in the project in the form [ComponentMap].[component]."""
projectId= self.__DefServiceclient.factory.create("projectIdDataObj")
projectId.name= projectName
componentIds= self.__DefServiceclient.factory.create("componentIdDataObj")
componentIds.name= componentName
try:
return self.__DefServiceclient.service.getComponentMetricsForProject(projectId,componentIds)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getFileContents.
def getFileContents(self, name, contentsMD5, filePathname):
"""Retrieve the Base64-encoded contents of a file that contains an instance of a CID.
:param:
name [string] => Required. Name of the stream.
contentMD5 [string] => Required. MD5 checksum (a fingerprint or message digest) of the file contents.
You can get the contentsMD5 and filePathname for an instance of a CID by using getStreamDefects() with the includeDefectInstances filter set to true.
filePathname [string] => Required. Path to the file that contains the instance of the CID.
You can get the contentsMD5 and filePathname for an instance of a CID by using getStreamDefects() with the includeDefectInstances filter set to true."""
streamId= self.__DefServiceclient.factory.create("streamIdDataObj")
streamId.name =name
fileId= self.__DefServiceclient.factory.create("fileIdDataObj")
fileId.contentMD5= contentsMD5
fileId.filePathname= filePathname
try:
return self.__DefServiceclient.service.getFileContents(streamId, fileId)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getMergedDefectDetectionHistory.
def getMergedDefectDetectionHistory(self, cid, mergeKey, name):
"""Retrieves detection history for a software issue. The return data is similar to the Detection History information in the Coverity Connect UI.
:param:
cid [long] => CID.
mergeKey [string] => Numeric key for a CID.
name [string] => Required. Name of the stream."""
mergedDefectIdDataObj= self.__DefServiceclient.factory.create("mergedDefectIdDataObj")
streamIds= self.__DefServiceclient.factory.create("streamIdDataObj")
mergedDefectIdDataObj.cid= cid
mergedDefectIdDataObj.mergeKey= mergeKey
streamIds.name= name
try:
return self.__DefServiceclient.service.getMergedDefectDetectionHistory(mergedDefectIdDataObj, streamIds)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getMergedDefectHistory.
def getMergedDefectHistory(self, cid, mergeKey, name):
"""Retrieve a date and time stamped list of changes to attributes used to triage a specified CID.
:param:
cid [long] => CID.
mergeKey [string] => Numeric key for a CID.
name [string] => Required. Name of the stream."""
mergedDefectIdDataObj= self.__DefServiceclient.factory.create("mergedDefectIdDataObj")
streamIds= self.__DefServiceclient.factory.create("streamIdDataObj")
mergedDefectIdDataObj.cid= cid
mergedDefectIdDataObj.mergeKey= mergeKey
streamIds.name= name
try:
return self.__DefServiceclient.service.getMergedDefectHistory(mergedDefectIdDataObj,streamIds)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getMergedDefectsForProjectScope.
def getMergedDefectsForProjectScope(self, name, projectScopeDefectFilterSpecData, pageSpecData):
"""Retrieve CIDs (filtered or unfiltered) that are in a specified project.
:param:
name [string] => Required. Name of the project.
projectScopeDefectFilterSpecData [projectScopeDefectFilterSpecDataObj] => An optional filters on the results to return.
pageSpecData [pageSpecDataObj] => Page specifications for results."""
projectId= self.__DefServiceclient.factory.create("projectIdDataObj")
projectId.name= name
try:
return self.__DefServiceclient.service.getMergedDefectsForProjectScope(projectId,projectScopeDefectFilterSpecData,pageSpecData)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getMergedDefectsForSnapshotScope.
def getMergedDefectsForSnapshotScope(self, name, snapshotScopeDefectFilterSpecData, pageSpecData):
"""Retrieve CIDs (filtered or unfiltered) that are in the current or specified snapshots. Optionally, perform snapshot comparisons.
:param:
name [string] => Required. Name of the project.
projectScopeDefectFilterSpecData [projectScopeDefectFilterSpecDataObj] => An optional filters on the results to return.
pageSpecData [pageSpecDataObj] => Page specifications for results."""
projectId= self.__DefServiceclient.factory.create("projectIdDataObj")
projectId.name= name
try:
return self.__DefServiceclient.service.getMergedDefectsForSnapshotScope(projectId, snapshotScopeDefectFilterSpecData,pageSpecData)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getMergedDefectsForStreams.
def getMergedDefectsForStreams(self, name, mergedDefectFilterSpecData, pageSpecData, compareOutdatedStreams, compareSelector, showOutdatedStreams, showSelector):
"""Retrieve the current attributes and other properties of CIDs (filtered or unfiltered) in a specified stream.
:param:
name [string] => Required. Name of the stream.
mergedDefectFilterSpecData [mergedDefectFilterSpecDataObj] => Filter properties used to match CIDs to return from the specified stream.
pageSpecData [pageSpecDataObj] => Page specifications for results.
compareOutdatedStreams [boolean] => If set to true, includes outdated streams found in snapshots specified by compareSelector. If false, the default, only non-outdated streams are included.
compareSelector [string] => Snapshot ID or snapshot grammar value that is used to set the scope of snapshots to compare with the showSelector snapshot scope.
showOutdatedStreams [boolean] => If set to true, includes outdated streams found in snapshots specified by showSelector. If false, the default, only non-outdated streams are included.
showSelector [string] => Required: Snapshot ID or snapshot grammar value that is used to set the scope of snapshots
Default: last() which iincludes the latest snapshot of each stream in the project."""
streamIds= self.__DefServiceclient.factory.create("streamIdDataObj")
snapshotScope= self.__DefServiceclient.factory.create("snapshotScopeSpecDataObj")
streamIds.name= name
snapshotScope.compareOutdatedStreams= compareOutdatedStreams
snapshotScope.compareSelector= compareSelector
snapshotScope.showOutdatedStreams= showOutdatedStreams
snapshotScope.showSelector= showSelector
try:
return self.__DefServiceclient.service.getMergedDefectsForStreams(streamIds, mergedDefectFilterSpecData, pageSpecData, snapshotScope)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getStreamDefects.
def getStreamDefects(self, cid, mergeKey, defectStateEndDate, defectStateStartDate, includeDefectInstances, includeHistory, name):
"""Retrieve instances of software issues for one or more CIDs.
:param:
cid [long] => CID.
mergeKey [string] => Numeric key for a CID.
defectStateEndDate [dateTime]=> Ending date (and optionally, time) for the CIDs to return.
defectStateStartDate [dateTime]=> Starting date (and optionally, time) for the CIDs to return.
includeDefectInstances[boolean] => Set to true for data on each instance of software issue, including the ID. Defaults to false.
includeHistory [boolean] => Set to true for historical triage data on each instance of the software issue.
name [string] => Required. Name of the stream """
mergedDefectIdDataObjs= self.__DefServiceclient.factory.create("mergedDefectIdDataObj")
filterSpec= self.__DefServiceclient.factory.create("streamDefectFilterSpecDataObj")
streamIdDataObj= self.__DefServiceclient.factory.create("streamIdDataObj")
streamIdDataObj.name= name
mergedDefectIdDataObjs.cid= cid
mergedDefectIdDataObjs.mergeKey= mergeKey
filterSpec.defectStateEndDate= defectStateEndDate
filterSpec.defectStateStartDate= defectStateStartDate
filterSpec.includeDefectInstances= includeDefectInstances
filterSpec.includeHistory= includeHistory
filterSpec.streamIdList= streamIdDataObj
try:
return self.__DefServiceclient.service.getStreamDefects(mergedDefectIdDataObjs, filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getTrendRecordsForProject.
def getTrendRecordsForProject(self, name, endDate, startDate):
"""Retrieve daily records on CIDs and source code in a project.
:param:
name [string] => Required. Name of the project.
endDate [dateTime] => End date (and optionally, time) for the set of records to retrieve.
startDate [dateTime] => Start date (and optionally, time) for the set of records to retrieve."""
projectId= self.__DefServiceclient.factory.create("projectIdDataObj")
filterSpec= self.__DefServiceclient.factory.create("projectTrendRecordFilterSpecDataObj")
projectId.name = name
filterSpec.endDate= endDate
filterSpec.startDate= startDate
try:
return self.__DefServiceclient.service.getTrendRecordsForProject(projectId, filterSpec)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->getTriageHistory.
def getTriageHistory(self, cid, mergeKey, name):
"""Retrieve the triage history for a software issue.
:param:
cid [long] => CID.
mergeKey [string] => Numeric key for a CID.
name [string] => Required. Name of the triage store."""
mergedDefectIdDataObj= self.__DefServiceclient.factory.create("mergedDefectIdDataObj")
triageStoreIds= self.__DefServiceclient.factory.create("triageStoreIdDataObj")
mergedDefectIdDataObj.cid= cid
mergedDefectIdDataObj.mergeKey= mergeKey
triageStoreIds.name= name
try:
return self.__DefServiceclient.service.getTriageHistory(mergedDefectIdDataObj, triageStoreIds)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->updateStreamDefects.
def updateStreamDefects(self, defectTriageId, defectTriageVerNum, id, verNum, defectStateAttributeValues):
"""Update the one or more attribute values for all instances of a CID found in a given stream. Note that this update will apply to all instances of the CID in all streams that share the same triage store.
:param:
defectTriageId [long] => Internal value for the last known triage ID. This ID changes when developers triage the issue that is associated with the id.
defectTriageVerNum [int] => Internal value for the last known triage version. This number changes when developers triage the issue that is associated with the id.
id [long] => Internal identifier for the software issue within the context of the stream.
verNum [int] => Version number associated with the id.
defectStateAttributeValues [defectStateAttributeValueDataObj] => Attribute name/value pair. One or more pairs required. """
streamDefectIds= self.__DefServiceclient.factory.create("streamDefectIdDataObj")
defectStateSpec= self.__DefServiceclient.factory.create("defectStateSpecDataObj")
streamDefectIds.defectTriageId=defectTriageId
streamDefectIds.defectTriageVerNum= defectTriageVerNum
streamDefectIds.id= id
streamDefectIds.verNum= verNum
defectStateSpec.defectStateAttributeValues= defectStateAttributeValues
try:
self.__DefServiceclient.service.updateStreamDefects(streamDefectIds, defectStateSpec)
except suds.WebFault as detail:
return detail
# METHOD: DefectService->updateTriageForCIDsInTriageStore.
def updateTriageForCIDsInTriageStore(self, name, cid, mergeKey, defectStateAttributeValues):
"""Update one or more attribute values for a CID in a specified triage store.
:param:
name [string] => Required. Name of triage store.
cid [long] => CID.
mergeKey [string] => Numeric key for a CID.
defectStateAttributeValues [defectStateAttributeValueDataObj] => Attribute name/value pair. One or more pairs required."""
triageStore= self.__DefServiceclient.factory.create("triageStoreIdDataObj")
mergedDefectIdDataObjs= self.__DefServiceclient.factory.create("mergedDefectIdDataObj")
defectState= self.__DefServiceclient.factory.create("defectStateSpecDataObj")
triageStore.name = name
mergedDefectIdDataObjs.cid= cid
mergedDefectIdDataObjs.mergeKey= mergeKey
defectState.defectStateAttributeValues= defectStateAttributeValues
try:
self.__DefServiceclient.service.updateTriageForCIDsInTriageStore(triageStore, mergedDefectIdDataObjs, defectState)
except suds.WebFault as detail:
return detail
# DATA_TYPE: Create attributeValueChangeSpecDataObj data type.
def attributeValueChangeSpec(self, attributeValueIds, attributeValues):
"""Create attributeValueChangeSpecDataObj data type.
:param:
attributeValueIds [attributeValueIdDataObj] => Automatically generated set of IDs for attribute values.
attributeValues [ attributeValueSpecDataObj] => Set of values available to an attribute."""
attributeValueChangeSpecDataObj= self.__ConfServiceclient.factory.create("attributeValueChangeSpecDataObj")
attributeValueChangeSpecDataObj.attributeValueIds= attributeValueIds
attributeValueChangeSpecDataObj.attributeValues= attributeValues
return attributeValueChangeSpecDataObj
# DATA_TYPE: Create attributeValueIdDataObj data type.
def attributeValueIdsDataObj(self, name):
"""Create attributeValueIdDataObj data type
:param:
name [string] => Name of the automatically generated ID for the attribute value. Do not specify when creating an attribute value."""
attributeValueIds= self.__ConfServiceclient.factory.create("attributeValueIdDataObj")
attributeValueIds.name = name
return attributeValueIds
# DATA_TYPE: Create attributeValueSpecDataObj data type.
def attributeValues(self, deprecated, name):
"""Create attributeValueSpecDataObj data type.
:param:
deprecated [boolean] => Value of true if the specified attribute value is deprecated. Otherwise, false.
name [string] => Name of the attribute value."""
attributeValueSpecDataObj=self.__ConfServiceclient.factory.create("attributeValueSpecDataObj")
attributeValueSpecDataObj.deprecated= deprecated
attributeValueSpecDataObj.name= name
return attributeValueSpecDataObj
# DATA_TYPE: Create projectScopeDefectFilterSpecDataObj data type.
def projectScopeDefectFilterSpecDataObj(self, actionNameList, checkerCategoryList, checkerList, checkerTypeList, cidList,
clssificationNameList, componentIdExclude, componentIdList, cweList, firstDetectedEndDate,
firstDetectedStartDate, fixTargetNameList, impactNameList, issueKindList, lastDetectedEndDate,
lastDetectedStartDate, legacyNameList, ownerNameList, ownerNamePattern, severityNameList):
"""Create projectScopeDefectFilterSpecDataObj data type.
:param:
actionNameList [string] => Name/value pairs for a list of attributes.
checkerCategoryList [string] => List of checker categories.
checkerList [string] => List of checkers.
checkerTypeList [string] => List of checker types.
cidList [long] => List of CIDs.
classificationNameList [string] => Classification of the CID.
componentIdExclude [boolean] => If one or more component name filters is specified, set to true to exclude matching results from the specified components.
Defaults to false, including the matches from the components in the results.
componentIdList [string] => Name of a component that contains the CID.
cweList [long] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.
firstDetectedEndDate [dateTime]=> Ending date (and optionally, time) for the date range matching the First Detected date of a CID.
Example1: 2013-03-18T12:42:19.384-07:00 Example2: 2013-03-18
firstDetectedStartDate [dateTime]=> Starting date (and optionally, time) for the date range matching the First Detected date of a CID.
fixTargetNameList [string] => Fix target for the CID; a triage value for the CID.
impactNameList [string] => Probable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.
issueKindList [string] => Issue kind.
lastDetectedEndDate [dateTime]=> Ending date (and optionally, time) for the date range matching the Last Detected date of a CID.
lastDetectedStartDate [dateTime]=> Starting date (and optionally, time) for the date range matching the Last Detected date of a CID.
legacyNameList [string] => Legacy designation for the CID (true or false), a triage value for the CID. Built-in attribute. Defaults to false.
ownerNameList [string] => Owner of the CID.
ownerNamePattern [string] => Glob pattern matching the first or last name of the owner of a CID.
severityNameList [string] => Severity of the CID; a triage value for the CID."""
filterSpec= self.__DefServiceclient.factory.create("projectScopeDefectFilterSpecDataObj")
componentIdDataObj= self.__DefServiceclient.factory.create("componentIdDataObj")
componentIdDataObj.name = componentIdList
filterSpec.actionNameList=actionNameList
filterSpec.checkerCategoryList= checkerCategoryList
filterSpec.checkerList= checkerList
filterSpec.checkerTypeList= checkerTypeList
filterSpec.cidList= cidList
filterSpec.classificationNameList= clssificationNameList
filterSpec.componentIdExclude= componentIdExclude
filterSpec.componentIdList=componentIdDataObj
filterSpec.cweList= cweList
filterSpec.firstDetectedEndDate= firstDetectedEndDate
filterSpec.firstDetectedStartDate= firstDetectedStartDate
filterSpec.fixTargetNameList= fixTargetNameList
filterSpec.impactNameList= impactNameList
filterSpec.issueKindList= issueKindList
filterSpec.lastDetectedEndDate= lastDetectedEndDate
filterSpec.lastDetectedStartDate= lastDetectedStartDate
filterSpec.legacyNameList= legacyNameList
filterSpec.ownerNameList= ownerNameList
filterSpec.ownerNamePattern= ownerNamePattern
filterSpec.severityNameList= severityNameList
return filterSpec
# DATA_TYPE: Create pageSpecDataObj data type.
def pageSpecDataObj(self, pageSize, sortAscending, sortField, startIndex):
"""Create pageSpecDataObj data type.
:param:
pageSize [int] => Required. Up to 1000 records per page.
sortAscending [boolean] => Set to false to return records in reverse alphabetical or numerical order. Defaults to true.
sortField [string] => Name of the field to use for sorting results. Not all fields are supported. However, you can typically sort by a field that returns numeric results, such as cid and the date fields.
startIndex [int] => Zero-based index of records to return. Defaults to 0."""
pageSpec= self.__DefServiceclient.factory.create("pageSpecDataObj")
pageSpec.pageSize= pageSize
pageSpec.sortAscending= sortAscending
pageSpec.sortField= sortField
pageSpec.startIndex= startIndex
return pageSpec
# DATA_TYPE: Create snapshotScopeDefectFilterSpecDataObj data type.
def snapshotScopeDefectFilterSpecDataObj(self, actionNameList, attributeDefinitionValueFilterMap, checkerCategoryList, checkerList, checkerTypeList,
cidList, classificationNameList, componentIdExclude, componentIdList, cweList, externalReference, fileName,
firstDetectedEndDate, firstDetectedStartDate, fixTargetNameList, functionMergeName, functionName, impactNameList,
issueComparison, issueKindList, lastDetectedEndDate, lastDetectedStartDate, legacyNameList, maxOccurrenceCount,
mergeExtra, mergeKey, minOccurrenceCount, ownerNameList, ownerNamePattern, severityNameList, statusNameList,
streamExcludeNameList, streamExcludeQualifier, streamIncludeNameList, streamIncludeQualifier):
"""Create snapshotScopeDefectFilterSpecDataObj.
:param:
actionNameList [string] => Name/value pairs for a list of attributes.
attributeDefinitionValueFilterMap [attributeDefinitionValueFilterMapDataObj] => Specification of an attribute value.
checkerCategoryList [string] => List of checker categories.
checkerList [string] => List of checkers.
checkerTypeList [string] => List of checker types.
cidList [long] => List of CIDs.
classificationNameList [string] => Classification of the CID.
componentIdExclude [boolean] => If one or more component name filters is specified, set to true to exclude matching results from the specified components.
Defaults to false, including the matches from the components in the results.
componentIdList [componentIdDataObj] => Name of a component that contains the CID.
cweList [long] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.
externalReference [string] => An external reference for a CID that is used by your company to identify the software issue. Corresponds to a field in the Coverity Connect triage pane.
fileName [string] => A file name. Example: /test.c
firstDetectedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the First Detected date of a CID.
Example1: 2013-03-18T12:42:19.384-07:00 Example2: 3/18/2013
firstDetectedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the First Detected date of a CID.
fixTargetNameList [string] => Fix target for the CID; a triage value for the CID.
functionMergeName [string] => Internal function name used as one of the criteria for merging separate occurrences of the same software issue,
with the result that they are identified by the same CID.
functionName [string] => Name of the function or method.
impactNameList [string] => Probable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.
issueComparison [string] => If set to PRESENT, returns overlapping CIDs in a snapshot comparison, that is, CIDs found in snapshot(s) to which both the showSelector and compareSelector values of the snaphotScope parameter (snapshotScopeSpecDataObj) apply.
If set to ABSENT, returns CIDs that are present in the snapshot(s) to which the showSelector value applies but absent from those to which the compareSelector value applies.
If not set, values are PRESENT and ABSENT.
issueKindList [string] => Issue kind.
lastDetectedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the Last Detected date of a CID.
lastDetectedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the Last Detected date of a CID.
legacyNameList [string] => Legacy designation for the CID (true or false), a triage value for the CID. Built-in attribute. Defaults to false.
maxOccurrenceCount [int] => Maximum number of instances of software issues associated with a given CID.
mergeExtra [string] => Internal property used as one of the criteria for merging occurrences of an issue.
mergeKey [string] => Internal signature used to merge separate occurrences of the same software issue and identify them all by the same CID.
minOccurrenceCount [int] => Minimum number of instances of software issues associated with a given CID.
ownerNameList [string] => Owner of the CID.
ownerNamePattern [string] => Glob pattern matching the first or last name of the owner of a CID.
severityNameList [string] => Severity of the CID; a triage value for the CID.
statusNameList [string] => Status of the CID.
streamExcludeNameList [streamIdDataObj] => Identifier for a stream to exclude.
streamExcludeQualifier [string] => If set to ANY, the filter will exclude from the results CIDs found in each of the streams listed in the streamExcludeNameList field.
If set to ALL, the filter will only exclude a CID if it is found in all listed streams. Valid values are ANY or ALL. Defaults to ANY.
streamIncludeNameList [streamIdDataObj] => Identifier for a stream to include.
streamIncludeQualifier [string] => If set to ANY, the filter will return CIDs found in each of the streams listed in the streamIncludeNameList field.
If set to ALL, the filter will only return a CID if it is found in all listed streams. Valid values are ANY or ALL. Defaults to ANY."""
filterSpec= self.__DefServiceclient.factory.create("snapshotScopeDefectFilterSpecDataObj");
componentIdListData= self.__DefServiceclient.factory.create("componentIdDataObj")
streamIdDataObj= self.__DefServiceclient.factory.create("streamIdDataObj")
streamIdDataObj2= self.__DefServiceclient.factory.create("streamIdDataObj")
componentIdListData.name= componentIdList
streamIdDataObj.name= streamExcludeNameList
streamIdDataObj2.name= streamIncludeNameList
filterSpec.actionNameList= actionNameList
filterSpec.attributeDefinitionValueFilterMap= attributeDefinitionValueFilterMap
filterSpec.checkerCategoryList= checkerCategoryList
filterSpec.checkerList= checkerList
filterSpec.checkerTypeList= checkerTypeList
filterSpec.cidList= cidList
filterSpec.classificationNameList= classificationNameList
filterSpec.componentIdExclude= componentIdExclude
filterSpec.componentIdList= componentIdListData
filterSpec.cweList= cweList
filterSpec.externalReference= externalReference
filterSpec.fileName= fileName
filterSpec.firstDetectedEndDate= firstDetectedEndDate
filterSpec.firstDetectedStartDate= firstDetectedStartDate
filterSpec.fixTargetNameList= fixTargetNameList
filterSpec.functionMergeName= functionMergeName
filterSpec.functionName= functionName
filterSpec.impactNameList= impactNameList
filterSpec.issueComparison= issueComparison
filterSpec.issueKindList= issueKindList
filterSpec.lastDetectedEndDate= lastDetectedEndDate
filterSpec.lastDetectedStartDate= lastDetectedStartDate
filterSpec.legacyNameList= legacyNameList
filterSpec.maxOccurrenceCount= maxOccurrenceCount
filterSpec.mergeExtra= mergeExtra
filterSpec.mergeKey= mergeKey
filterSpec.minOccurrenceCount= minOccurrenceCount
filterSpec.ownerNameList= ownerNameList
filterSpec.ownerNamePattern= ownerNamePattern
filterSpec.severityNameList= severityNameList
filterSpec.statusNameList= statusNameList
filterSpec.streamExcludeNameList= streamIdDataObj
filterSpec.streamExcludeQualifier= streamExcludeQualifier
filterSpec.streamIncludeNameList= streamIdDataObj2
filterSpec.streamIncludeQualifier= streamIncludeQualifier
return filterSpec
# DATA_TYPE: Create defectStateAttributeValueDataObj data type.
def defectStateAttributeValueDataObj(self, attributeName, valueOfAttribute):
"""Create defectStateAttributeValueDataObj data type.
:param:
attributeName [string] => Identifier for an attribute.
valueOfAttribute [string] => Value of the attribute."""
defectStateAttributeValues= self.__DefServiceclient.factory.create("defectStateAttributeValueDataObj ")
attributeDefinitionId= self.__DefServiceclient.factory.create("attributeDefinitionIdDataObj")
attributeValueId = self.__DefServiceclient.factory.create("attributeValueIdDataObj")
attributeDefinitionId.name= attributeName
attributeValueId.name= valueOfAttribute
defectStateAttributeValues.attributeDefinitionId= attributeDefinitionId
defectStateAttributeValues.attributeValueId= attributeValueId
return defectStateAttributeValues
# DATA_TYPE: Create mergedDefectFilterSpecDataObj data type.
def mergedDefectFilterSpecDataObj(self, cidList, checkerSubcategoryFilterSpecList, filenamePatternList, componentIdList, statusNameList, classificationNameList,
actionNameList, fixTargetNameList, severityNameList, legacyNameList, ownerNameList, issueKindList, attributeDefinitionValueFilterMap,
componentIdExclude, defectPropertyKey, defectPropertyPattern, externalReferencePattern, firstDetectedEndDate, firstDetectedStartDate,
functionNamePattern, lastDetectedEndDate, lastDetectedStartDate, lastFixedEndDate, lastFixedStartDate, lastTriagedEndDate, lastTriagedStartDate,
maxCid, maxOccurrenceCount, mergedDefectIdDataObjs, minCid, minOccurrenceCount, ownerNamePattern, snapshotComparisonField,streamExcludeNameList,
streamExcludeQualifier, streamIncludeNameList, streamIncludeQualifier):
"""Create mergedDefectFilterSpecDataObj data type.
:param:
cidList [long] => A CID. Multiple CIDs allowed.
checkerSubcategoryFilterSpecList [checkerSubcategoryFilterSpecDataObj ] => Checker subcategory specification. Multiple allowed.
filenamePatternList [string] => Filename pattern for source code files that containing software issues associated with the CIDs. Up to 20 patterns allowed.
componentIdList [componentIdDataObj] => Name of a component that contains the CID. Multiple components allowed.
statusNameList [string] => Status of the CID. Multiple statuses allowed.
classificationNameList [string] => Classification of the CID; a triage value for the CID. Multiple classifications allowed.
actionNameList [string] => Name/value pairs for a list of attributes.
fixTargetNameList [string] => Fix target for the CID; a triage value for the CID. Multiple fix targets allowed.
severityNameList [string] => Severity of the CID; a triage value for the CID. Multiple severities allowed.
legacyNameList [string] => Legacy designation for the CID (true or false); a triage value for the CID. Built-in attribute. Defaults to false.
ownerNameList [string] => Owner of the CID.
issueKindList [string] => Kind of issue identified by the CID.
attributeDefinitionValueFilterMap [attributeDefinitionValueFilterMapDataObj ] => Specification of an attribute value.
componentIdExclude [boolean] => If one or more component name filters is specified, set to true to exclude matching results from the specified components. Defaults to false, including the matches from the components in the results.
defectPropertyKey [string] => Do not use this field. The API does not process these values.
defectPropertyPattern [string] => Do not use this field. The API does not process these values.
externalReferencePattern [string] => Glob pattern matching the value of an Ext. Reference attribute value.
firstDetectedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the First Detected date of a CID.
Example1: 2013-03-18T12:42:19.384-07:00 Example2: 3/18/2013
firstDetectedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the First Detected date of a CID.
functionNamePattern [string] => Glob pattern matching the name of the function (or method) associated with a CID.
lastDetectedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the Last Detected date of a CID.
lastDetectedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the Last Detected date of a CID.
lastFixedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the Last Fixed date of a CID.
lastFixedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the Last Fixed date of a CID.
lastTriagedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the Last Triaged date of a CID.
lastTriagedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the Last Triaged date of a CID.
maxCid [long] => Upper numeric bound of CIDs to retrieve. For example, no greater than CID 25000.
maxOccurrenceCount [int] => Maximum number of instances of software issues associated with a given CID.
mergedDefectIdDataObjs [mergedDefectIdDataObj] => Identifier for a software issue.
Multiple specifications are allowed.
minCid [long] => Lower numeric bound of CIDs to retrieve. For example, no smaller than CID 24500.
minOccurrenceCount [int] => Minimum number of instances of software issues associated with a given CID.
ownerNamePattern [string] => Glob pattern matching the first or last name of the owner of a CID.
snapshotComparisonField [string] =>
streamExcludeNameList [streamIdDataObj] => Identifier for a stream to exclude. Multiple streams are allowed.
streamExcludeQualifier [string] =>
streamIncludeNameList [streamIdDataObj] => Identifier for a stream to include. Multiple streams are allowed.
streamIncludeQualifier [string] => """
filterSpec= self.__DefServiceclient.factory.create("mergedDefectFilterSpecDataObj")
filterSpec.cidList= cidList
filterSpec.checkerSubcategoryFilterSpecList=checkerSubcategoryFilterSpecList
filterSpec.filenamePatternList= filenamePatternList
filterSpec.componentIdList= componentIdList
filterSpec.statusNameList= statusNameList
filterSpec.classificationNameList= classificationNameList
filterSpec.actionNameList= actionNameList
filterSpec.fixTargetNameList= fixTargetNameList
filterSpec.severityNameList= severityNameList
filterSpec.legacyNameList= legacyNameList
filterSpec.ownerNameList= ownerNameList
filterSpec.issueKindList= issueKindList
filterSpec.attributeDefinitionValueFilterMap= attributeDefinitionValueFilterMap
filterSpec.componentIdExclude= componentIdExclude
filterSpec.defectPropertyKey= defectPropertyKey
filterSpec.defectPropertyPattern= defectPropertyPattern
filterSpec.externalReferencePattern= externalReferencePattern
filterSpec.firstDetectedEndDate= firstDetectedEndDate
filterSpec.firstDetectedStartDate= firstDetectedStartDate
filterSpec.functionNamePattern= functionNamePattern
filterSpec.lastDetectedEndDate= lastDetectedEndDate
filterSpec.lastDetectedStartDate= lastDetectedStartDate
filterSpec.lastFixedEndDate= lastFixedEndDate
filterSpec.lastFixedStartDate= lastDetectedStartDate
filterSpec.lastTriagedEndDate= lastTriagedEndDate
filterSpec.lastTriagedStartDate= lastTriagedStartDate
filterSpec.maxCid= maxCid
filterSpec.maxOccurrenceCount= maxOccurrenceCount
filterSpec.mergedDefectIdDataObjs= mergedDefectIdDataObjs
filterSpec.minCid= minCid
filterSpec.minOccurrenceCount= minOccurrenceCount
filterSpec.ownerNamePattern= ownerNamePattern
filterSpec.snapshotComparisonField= snapshotComparisonField
filterSpec.streamExcludeNameList= streamExcludeNameList
filterSpec.streamExcludeQualifier= streamExcludeQualifier
filterSpec.streamIncludeNameList= streamIncludeNameList
filterSpec.streamIncludeQualifier= streamIncludeQualifier
return filterSpec
# DATA_TYPE: Create checkerSubcategoryFilterSpecDataObj data type.
def checkerSubcategoryFilterSpecDataObj(self, checkerName, domain, subcategory):
""""Create checkerSubcategoryFilterSpecDataObj data type.
:param:
checkerName [string] => Checker associated with the subcategory.
domain [string] => Domain associated with the subcategory.
subcategory [string] => Subcategory on which to filter. """
checkerSubcategoryFilterSpec= self.__DefServiceclient.factory.create("checkerSubcategoryFilterSpecDataObj")
checkerSubcategoryFilterSpec.checkerName= checkerName
checkerSubcategoryFilterSpec.domain= domain
checkerSubcategoryFilterSpec.subcategory= subcategory
return checkerSubcategoryFilterSpec
# DATA_TYPE: Create componentIdDataObj data type.
def componentIdDataObj (self, name):
"""Create componentIdDataObj data type.
:param:
name [string] => Name of a component in the project in the form [componentMap].[component]."""
componentIdList= self.__DefServiceclient.factory.create("componentIdDataObj")
componentIdList.name= name
return componentIdList
# DATA_TYPE: Create attributeDefinitionValueFilterMapDataObj data type.
def attributeDefinitionValueFilterMapDataObj(self, attributeDefinitionId, attributeValueIdsData):
""""Create attributeDefinitionValueFilterMapDataObj data type.
:param:
attributeDefinitionId [attributeDefinitionIdDataObj] => Identifier for the attribute to filter.
attributeValueIds [attributeValueIdDataObj] => Value of the attribute to filter. Multiple values allowed."""
attributeDefinitionValueFilterMap= self.__DefServiceclient.factory.create("attributeDefinitionValueFilterMapDataObj")
attributeDefinitionValueFilterMap.attributeDefinitionId= attributeDefinitionId
attributeDefinitionValueFilterMap.attributeValueIds= attributeValueIdsData
return attributeDefinitionValueFilterMap
# DATA_TYPE: Create attributeDefinitionIdDataObj data type.
def attributeDefinitionIdDataObj(self, name):
"""Create attributeDefinitionIdDataObj data type.
:param:
name [string] => Name of the attribute."""
attributeDefinitionId= self.__DefServiceclient.factory.create("attributeDefinitionIdDataObj")
attributeDefinitionId.name= name
return attributeDefinitionId
# DATA_TYPE: Create mergedDefectIdDataObj data type.
def mergedDefectIdDataObj(self, cid, mergeKey):
"""Create mergedDefectIdDataObj data type.
:param:
cid [long] => CID.
mergeKey [string] => Numeric key for a CID."""
mergedDefectIdDataObjs= self.__DefServiceclient.factory.client("mergedDefectIdDataObj")
mergedDefectIdDataObjs.cid= cid
mergedDefectIdDataObjs.mergeKey= mergeKey
return mergedDefectIdDataObjs
# DATA_TYPE: Create streamIdDataObj data type.
def streamIdDataObj(self, name):
"""Create streamIdDataObj data type.
:param:
name [string] => Required. Name of the stream."""
streamIdData= self.__DefServiceclient.factory.create("streamIdDataObj")
streamIdData.name= name
return streamIdData
| [
"from suds.client import Client\n",
"from suds.wsse import *\n",
"import socket, logging\n",
"import suds\n",
"CURSOR=None\n",
"__ConfServiceClient=None\n",
"__DefServiceClient=None\n",
"__host = None\n",
"__port = None\n",
"__usr = None\n",
"__pwd = None\n",
"__ssl = None\n",
"\n",
"class ConnectionService(object):\n",
" \"\"\"Connnection Initialization.\n",
" :param: \n",
" host [string] => host name or ip-address.\n",
" port [string] => Port connection.\n",
" usr [string] => Username.\n",
" pwd [string] => Password.\n",
" ssl [boolean] => Use https and ssl security.\"\"\"\n",
" def __init__( self , host , port , usr , pwd, ssl):\n",
" socket.setdefaulttimeout(None)\n",
" logging.basicConfig(level=logging.INFO)\n",
" logging.getLogger(\"suds.client\").setLevel(logging.INFO)\n",
" \n",
" self.__host = host \n",
" self.__port = port \n",
" self.__usr = usr \n",
" self.__pwd = pwd\n",
" self.__ssl = ssl\n",
" self.__confservice()\n",
" self.__defservice()\n",
"#Create the object for the ConfigurationService connection \n",
" def __confservice(self):\n",
" if self.__ssl == True:\n",
" Url=\"https://\"+self.__host+\":\"+self.__port\n",
" else:\n",
" Url=\"http://\"+self.__host+\":\"+self.__port\n",
" SrvWsdl=Url+\"/ws/v8/configurationservice?wsdl\"\n",
" Security=suds.wsse.Security()\n",
" Security.tokens.append(suds.wsse.UsernameToken(self.__usr, self.__pwd))\n",
" self.__ConfServiceclient=suds.client.Client(SrvWsdl, timeout=3600)\n",
" self.__ConfServiceclient.set_options(wsse=Security)\n",
" \n",
"#Create the object for DefectService connection.\n",
" def __defservice(self):\n",
" if self.__ssl == True:\n",
" Url=\"https://\"+self.__host+\":\"+self.__port\n",
" else:\n",
" Url=\"http://\"+self.__host+\":\"+self.__port\n",
" SrvWsdl=Url+\"/ws/v8/defectservice?wsdl\"\n",
" Security=suds.wsse.Security()\n",
" Security.tokens.append(suds.wsse.UsernameToken(self.__usr, self.__pwd))\n",
" self.__DefServiceclient=suds.client.Client(SrvWsdl, timeout=3600)\n",
" self.__DefServiceclient.set_options(wsse=Security)\n",
" \n",
"# METHOD: ConfigurationService->getProjects.\n",
" def getProjects(self, descriptionPattern, includeChildren, includeStreams, namePattern):\n",
" \"\"\"Get a list of projects specifications (for all projects or for all filtered set of projects).\n",
" :param:\n",
" descriptionPattern [string] => Glob pattern matching the description of one or more projects.\n",
" includeChildren [boolean]=> Value of false if the results should not include roles and other properties associated with the project. Defaults to true.\n",
" includeStreams [boolean]=> Value of false if the results should not include streams associated with the project. Defaults to true.\n",
" namePattern [string] => Glob pattern matching the name of one or more projects.\"\"\"\n",
" projectfilter=self.__ConfServiceclient.factory.create(\"projectFilterSpecDataObj\")\n",
" projectfilter.descriptionPattern = descriptionPattern\n",
" projectfilter.includeChildren = includeChildren\n",
" projectfilter.includeStreams = includeStreams\n",
" projectfilter.namePattern= namePattern\n",
" try:\n",
" return self.__ConfServiceclient.service.getProjects(projectfilter)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: ConfigurationService->copyStream.\n",
" def copyStream(self, projectName, streamName):\n",
" \"\"\"Make a copy of a stream. Does noy copy stream role assignments.\n",
" :param:\n",
" name [string] => Required. Name of the project.\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" projectIdDataObj=self.__ConfServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectIdDataObj.name=projectName\n",
" streamIdObj=self.__ConfServiceclient.factory.create(\"streamIdObj\")\n",
" streamIdObj.name=streamName\n",
" try:\n",
" return self.__ConfServiceclient.service.copyStream(projectIdDataObj, streamIdObj)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: ConfigurationService->createAttribute.\n",
" def createAttribute(self, attributeName, attributeType, attributeValueChangeSpecData, defaultValue, description, showInTriage):\n",
" \"\"\"\"Create an attribute.\n",
" :param:\n",
" attributeName [string] => Name of the attribute. Required\n",
" attributeType [string] => The type of attribute. Required\n",
" attributeValueChangeSpec [attributeValueChangeSpecDataObj] => For a LIST_OF_VALUES attribute type only: The set of values available to the attribute.\n",
" defaultValue [string] => For a LIST_OF_VALUES attribute type only: The default attribute value.\n",
" description [string] => Descriptions of the attribute\n",
" showInTriage [boolean] => If true, makes the attribute available for use in the Triage pane of the UI.\"\"\"\n",
" attributeDefinitionSpecDataObj= self.__ConfServiceclient.factory.create(\"attributeDefinitionSpecDataObj\")\n",
" attributeDefinitionSpecDataObj.attributeName= attributeName\n",
" attributeDefinitionSpecDataObj.attributeType= attributeType\n",
" attributeDefinitionSpecDataObj.attributeValueChangeSpec= attributeValueChangeSpecData\n",
" attributeDefinitionSpecDataObj.defaultValue= defaultValue\n",
" attributeDefinitionSpecDataObj.description= description\n",
" attributeDefinitionSpecDataObj.showInTriage=showInTriage\n",
" try:\n",
" self.__ConfServiceclient.service.createAttribute(attributeDefinitionSpecDataObj)\n",
" return \"Attribute created\"\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getAllLdapConfigurations\n",
" def getAllLdapConfigurations(self):\n",
" \"\"\"Return all LDAP Configurations.\"\"\"\n",
" return self.__ConfServiceclient.service.getAllLdapConfigurations()\n",
"\n",
"# METHOD: ConfigurationService->getAttribute.\n",
" def getAttribute(self, name):\n",
" \"\"\"Retrieve the properties of a specified attribute.\n",
" :param:\n",
" name [string] => Required. Name of the attribute.\"\"\"\n",
" attributeDefinitionId=self.__ConfServiceclient.factory.create(\"attributeDefinitionIdDataObj\")\n",
" attributeDefinitionId.name = name\n",
" try:\n",
" return self.__ConfServiceclient.service.getAttribute(attributeDefinitionId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getCheckerProperties.\n",
" def getCheckerProperties(self, checkerNameList, subcategoryList, domainList, categoryList, cweCategoryList, impactList, projectIdname):\n",
" \"\"\"Retrieve a list of available checker properties.\n",
" :param:\n",
" checkerNameList [string] => Name of the checker. Zero or more checker names allowed.\n",
" subcategoryList [string] => Subcategorization of the software issue found by the checker.\n",
" domainList [string] => Domain of the checker. Zero or more domains allowed.\n",
" categoryList [string] => Categorization of the software issue found by checker. Zero or more categories allowed.\n",
" cweCategoryList [string] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.\n",
" impactList [string] => Probable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.\n",
" projectIdname [string] => Required. Name of the project.\"\"\"\n",
" filterSpec= self.__ConfServiceclient.factory.create(\"checkerPropertyFilterSpecDataObj\")\n",
" projectId= self.__ConfServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectId.name= projectIdname\n",
" filterSpec.checkerNameList= checkerNameList\n",
" filterSpec.subcategoryList= subcategoryList\n",
" filterSpec.domainList= domainList\n",
" filterSpec.categoryList= categoryList\n",
" filterSpec.cweCategoryList= cweCategoryList\n",
" filterSpec.impactList= impactList\n",
" filterSpec.projectId= projectId\n",
" try:\n",
" return self.__ConfServiceclient.service.getCheckerProperties(filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfiguratioService->getComponent.\n",
" def getComponent(self, name):\n",
" \"\"\"Retrieve the properties of a component.\n",
" :param:\n",
" name [string] => Required. Name of a component in the form componentMapName.componentName (for example, myComponentMap.myComponent).\"\"\"\n",
" componentId= self.__ConfServiceclient.factory.create(\"componentIdDataObj\")\n",
" componentId.name =name\n",
" try:\n",
" return self.__ConfServiceclient.service.getComponent(componentId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: ConfigurationService->getComponentMaps.\n",
" def getComponentMaps(self, namePattern):\n",
" \"\"\"Retrieve a list of component maps that matches a component name pattern.\n",
" :param:\n",
" namePattern [string] => Glob pattern matching the name of one or more component maps.\"\"\"\n",
" filterSpec= self.__ConfServiceclient.factory.create(\"componentMapFilterSpecDataObj\")\n",
" filterSpec.namePattern= namePattern\n",
" try:\n",
" return self.__ConfServiceclient.service.getComponentMaps(filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getDefectStatuses.\n",
" def getDefectStatuses(self):\n",
" \"\"\"Retrieve a list of Status attribute values that can be associated with a software issue.\"\"\"\n",
" return self.__ConfServiceclient.service.getDefectStatuses()\n",
"\n",
"# METHOD: ConfigurationService->getGroup.\n",
" def getGroup(self, displayName, domain, name):\n",
" \"\"\"Retrieve the properties of a user group.\n",
" :param:\n",
" displayName [string] => The name of a user group. To retrieve an LDAP group, you use <groupname>@<LDAPserver>.\n",
" domain [string] => Name of the LDAP server domain.\n",
" name [string] => Required. Name of the local or LDAP group.\"\"\"\n",
" groupId=self.__ConfServiceclient.factory.create(\"groupIdDataObj\")\n",
" domainId= self.__ConfServiceclient.factory.create(\"serverDomainDataObj\")\n",
" domainId.name= domain\n",
" groupId.displayName= displayName\n",
" groupId.domain= domainId\n",
" groupId.name= name\n",
" try:\n",
" return self.__ConfServiceclient.service.getGroup(groupId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getGroups.\n",
" def getGroups(self, ldap, namePattern, name, userList):\n",
" \"\"\"Get a list of groups.\n",
" :param:\n",
" ldap [boolean] => Value of true for LDAP groups only; otherwise, false.\n",
" namePattern [string] => Glob pattern matching the name of one or more groups.\n",
" name [string] => Name of a project with which the group must have a role association.\n",
" userList [string] => User name of a user that must belong to the group.\"\"\"\n",
" filterSpec= self.__ConfServiceclient.factory.create(\"groupFilterSpecDataObj\")\n",
" projectIdDataObj= self.__ConfServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectIdDataObj.name= name\n",
" filterSpec.ldap= ldap\n",
" filterSpec.namePattern = namePattern\n",
" filterSpec.projectIdDataObj= projectIdDataObj\n",
" filterSpec.userList= userList\n",
" try:\n",
" return self.__ConfServiceclient.service.getGroups(filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: ConfigurationService->getSnapshotInformation.\n",
" def getSnapshotInformation(self, id):\n",
" \"\"\"Retrieve information about a snashot in a stream.\n",
" :param:\n",
" id [long] => Numeric identifier for the snapshot. Required.\"\"\"\n",
" snapshotIdData=self.__ConfServiceclient.factory.create(\"snapshotIdDataObj\")\n",
" snapshotIdData.id=id\n",
" try:\n",
" return self.__ConfServiceclient.service.getSnapshotInformation(snapshotIdData)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getSnapshotsForStream.\n",
" def getSnapshotsForStream(self, name):\n",
" \"\"\"Retrieve a set of snapshots that belong to a spcecified stream.\n",
" :param:\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" streamId= self.__ConfServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamId.name= name\n",
" try:\n",
" return self.__ConfServiceclient.service.getSnapshotsForStream(streamId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getStreams.\n",
" def getStreams(self, languageList, descriptionPattern, namePattern):\n",
" \"\"\"Retrieve a set of streams.\n",
" :param:\n",
" languageList [string] => Programming language matching that of one or more streams. Zero or more language filters allowed.\n",
" descriptionPattern [string] => Glob pattern matching the description of one or more streams.\n",
" namePattern [string] => Glob pattern matching the name of one or more streams.\"\"\"\n",
" filterSpec= self.__ConfServiceclient.factory.create(\"streamFilterSpecDataObj\")\n",
" filterSpec.languageList= languageList\n",
" filterSpec.descriptionPattern= descriptionPattern\n",
" filterSpec.namePattern= namePattern\n",
" try:\n",
" return self.__ConfServiceclient.service.getStreams(filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getUser.\n",
" def getUser(self,username):\n",
" \"\"\"Retrieve a user by user name.\n",
" :param:\n",
" username [string] => User name.\"\"\"\n",
" try:\n",
" return self.__ConfServiceclient.service.getUser(username)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->getUsers.\n",
" def getUsers(self, assignable, disabled, groupList, includeDetails, ldap, locked, namePattern, name, pageSpecData):\n",
" \"\"\"Get users (filtered or unfiltered).\n",
" :param:\n",
" assignable [boolean] => Set to true to retrieve only those users who can own software issues; false to retrieve only those who cannot. Otherwise, do not set.\n",
" disabled [boolean] => Set to true to retrieve disabled users only. Set to false to retrieve enabled users only. Otherwise, do not set.\n",
" groupList [string] => Name of user group to which the retrieved users must belong. Zero or more groups allowed.\n",
" includeDetails [boolean] => Set to false to prevent the inclusion of role assignments and other user details in the reqponse. Defaults to true.\n",
" ldap [boolean] => Set to true to retrieve only LDAP users; false to retrieve only local users. Otherwise, do not set.\n",
" locked [boolean] => Set to true to retrieve only those users who have been locked out; false to retrieve only unlocked users. Otherwise, do not set.\n",
" namePattern [string] => Glob pattern that matches the user name of the users to retrieve.\n",
" name [string] => Name of project to which the retrieved set of users must have a role association.\"\"\"\n",
" filterSpec= self.__ConfServiceclient.factory.create(\"userFilterSpecDataObj\")\n",
" projectIdDataObj= self.__ConfServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectIdDataObj.name= name\n",
" filterSpec.assignable= assignable\n",
" filterSpec.disabled= disabled\n",
" filterSpec.groupsList= groupList\n",
" filterSpec.includeDetails= includeDetails\n",
" filterSpec.ldap= ldap\n",
" filterSpec.locked= locked\n",
" filterSpec.namePattern= namePattern\n",
" filterSpec.projectIdDataObj= projectIdDataObj\n",
" try:\n",
" return self.__ConfServiceclient.service.getUsers(filterSpec,pageSpecData)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->notify.\n",
" def notify(self, usernames, subject, message):\n",
" \"\"\"Send an emmail notification to a specified user.\n",
" :param:\n",
" usernames [string] => One or more usernames.\n",
" subject [string] => Subject-line text for the email.\n",
" message [string] => Body text for the email.\"\"\"\n",
" try:\n",
" return self.__ConfServiceclient.service.notify(usernames, subject, message)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: ConfigurationService->updateAttribute.\n",
" def updateAttribute(self, attributeName, attributeType, attributeValueChangeSpecData, defaultValue, description, showInTriage):\n",
" \"\"\"Update an attribute specification.\n",
" attributeName [string] => Required. Name for the attribute.\n",
" attributeType [string] => The type of attribute. Required when using createAttribute().\n",
" attributeValueChangeSpec [attributeValueChangeSpecDataObj] => For a LIST_OF_VALUES attribute type only: The set of values available to the attribute.\n",
" defaultValue [string] => For a LIST_OF_VALUES attribute type only: The default attribute value.\n",
" description [string] => Description of the attribute.\n",
" showInTriage [boolean]=> If true, makes the attribute available for use in the Triage pane of the UI.\"\"\"\n",
" attributeDefinitionId= self.__ConfServiceclient.factory.create(\"attributeDefinitionDataObj\")\n",
" attributeDefinitionId= attributeName\n",
" attributeDefinitionSpec= self.__ConfServiceclient.factory.create(\"attributeDefinitionSpecDataObj\")\n",
" attributeDefinitionSpec.attributeName= None\n",
" attributeDefinitionSpec.attributeType= attributeType\n",
" attributeDefinitionSpec.attributeValueChangeSpec= attributeValueChangeSpecData\n",
" attributeDefinitionSpec.defaultValue= defaultValue\n",
" attributeDefinitionSpec.description= description\n",
" attributeDefinitionSpec.showInTriage= showInTriage\n",
" try:\n",
" self.__ConfServiceclient.service.updateAttribute(attributeDefinitionId, attributeDefinitionSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getCheckerSubcategoriesForProject.\n",
" def getCheckerSubcategoriesForProject(self, name):\n",
" \"\"\"Retrieve a list of subcatergories of software issues in the project that were found by checkers used in the analysis.\n",
" :param:\n",
" name [string] => Required. Name of the project.\"\"\"\n",
" projectId= self.__DefServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectId.name = name\n",
" try:\n",
" return self.__DefServiceclient.service.getCheckerSubcategoriesForProject(projectId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getCheckerSubcategoriesForStreams.\n",
" def getCheckerSubcategoriesForStreams(self, name):\n",
" \"\"\"Retrieve a list of subcategories of software issues in the stream that were found by checkers used in the analysis.\n",
" :param:\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" streamIds= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamIds.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getCheckerSubcategoriesForStreams(streamIds)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getComponentMetricsForProject .\n",
" def getComponentMetricsForProject (self, projectName, componentName):\n",
" \"\"\"Retrieve metrics on components associated with streams in a specific project.\n",
" :param:\n",
" projectName [string] => Required. Name of the project.\n",
" componentName [string] => Name of the component in the project in the form [ComponentMap].[component].\"\"\"\n",
" projectId= self.__DefServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectId.name= projectName\n",
" componentIds= self.__DefServiceclient.factory.create(\"componentIdDataObj\")\n",
" componentIds.name= componentName\n",
" try:\n",
" return self.__DefServiceclient.service.getComponentMetricsForProject(projectId,componentIds)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getFileContents.\n",
" def getFileContents(self, name, contentsMD5, filePathname):\n",
" \"\"\"Retrieve the Base64-encoded contents of a file that contains an instance of a CID.\n",
" :param:\n",
" name [string] => Required. Name of the stream.\n",
" contentMD5 [string] => Required. MD5 checksum (a fingerprint or message digest) of the file contents. \n",
" You can get the contentsMD5 and filePathname for an instance of a CID by using getStreamDefects() with the includeDefectInstances filter set to true.\n",
" filePathname [string] => Required. Path to the file that contains the instance of the CID. \n",
" You can get the contentsMD5 and filePathname for an instance of a CID by using getStreamDefects() with the includeDefectInstances filter set to true.\"\"\"\n",
" streamId= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamId.name =name\n",
" fileId= self.__DefServiceclient.factory.create(\"fileIdDataObj\")\n",
" fileId.contentMD5= contentsMD5\n",
" fileId.filePathname= filePathname\n",
" try:\n",
" return self.__DefServiceclient.service.getFileContents(streamId, fileId)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getMergedDefectDetectionHistory.\n",
" def getMergedDefectDetectionHistory(self, cid, mergeKey, name):\n",
" \"\"\"Retrieves detection history for a software issue. The return data is similar to the Detection History information in the Coverity Connect UI.\n",
" :param:\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" mergedDefectIdDataObj= self.__DefServiceclient.factory.create(\"mergedDefectIdDataObj\")\n",
" streamIds= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" mergedDefectIdDataObj.cid= cid\n",
" mergedDefectIdDataObj.mergeKey= mergeKey\n",
" streamIds.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getMergedDefectDetectionHistory(mergedDefectIdDataObj, streamIds)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getMergedDefectHistory.\n",
" def getMergedDefectHistory(self, cid, mergeKey, name):\n",
" \"\"\"Retrieve a date and time stamped list of changes to attributes used to triage a specified CID.\n",
" :param:\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" mergedDefectIdDataObj= self.__DefServiceclient.factory.create(\"mergedDefectIdDataObj\")\n",
" streamIds= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" mergedDefectIdDataObj.cid= cid\n",
" mergedDefectIdDataObj.mergeKey= mergeKey\n",
" streamIds.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getMergedDefectHistory(mergedDefectIdDataObj,streamIds)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getMergedDefectsForProjectScope.\n",
" def getMergedDefectsForProjectScope(self, name, projectScopeDefectFilterSpecData, pageSpecData):\n",
" \"\"\"Retrieve CIDs (filtered or unfiltered) that are in a specified project. \n",
" :param:\n",
" name [string] => Required. Name of the project.\n",
" projectScopeDefectFilterSpecData [projectScopeDefectFilterSpecDataObj] => An optional filters on the results to return.\n",
" pageSpecData [pageSpecDataObj] => Page specifications for results.\"\"\"\n",
" projectId= self.__DefServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectId.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getMergedDefectsForProjectScope(projectId,projectScopeDefectFilterSpecData,pageSpecData)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getMergedDefectsForSnapshotScope.\n",
" def getMergedDefectsForSnapshotScope(self, name, snapshotScopeDefectFilterSpecData, pageSpecData):\n",
" \"\"\"Retrieve CIDs (filtered or unfiltered) that are in the current or specified snapshots. Optionally, perform snapshot comparisons.\n",
" :param:\n",
" name [string] => Required. Name of the project.\n",
" projectScopeDefectFilterSpecData [projectScopeDefectFilterSpecDataObj] => An optional filters on the results to return.\n",
" pageSpecData [pageSpecDataObj] => Page specifications for results.\"\"\" \n",
" projectId= self.__DefServiceclient.factory.create(\"projectIdDataObj\")\n",
" projectId.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getMergedDefectsForSnapshotScope(projectId, snapshotScopeDefectFilterSpecData,pageSpecData)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getMergedDefectsForStreams.\n",
" def getMergedDefectsForStreams(self, name, mergedDefectFilterSpecData, pageSpecData, compareOutdatedStreams, compareSelector, showOutdatedStreams, showSelector):\n",
" \"\"\"Retrieve the current attributes and other properties of CIDs (filtered or unfiltered) in a specified stream.\n",
" :param:\n",
" name [string] => Required. Name of the stream.\n",
" mergedDefectFilterSpecData [mergedDefectFilterSpecDataObj] => Filter properties used to match CIDs to return from the specified stream.\n",
" pageSpecData [pageSpecDataObj] => Page specifications for results.\n",
" compareOutdatedStreams [boolean] => If set to true, includes outdated streams found in snapshots specified by compareSelector. If false, the default, only non-outdated streams are included.\n",
" compareSelector [string] => Snapshot ID or snapshot grammar value that is used to set the scope of snapshots to compare with the showSelector snapshot scope.\n",
" showOutdatedStreams [boolean] => If set to true, includes outdated streams found in snapshots specified by showSelector. If false, the default, only non-outdated streams are included.\n",
" showSelector [string] => Required: Snapshot ID or snapshot grammar value that is used to set the scope of snapshots\n",
" Default: last() which iincludes the latest snapshot of each stream in the project.\"\"\"\n",
" streamIds= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" snapshotScope= self.__DefServiceclient.factory.create(\"snapshotScopeSpecDataObj\")\n",
" streamIds.name= name\n",
" snapshotScope.compareOutdatedStreams= compareOutdatedStreams\n",
" snapshotScope.compareSelector= compareSelector\n",
" snapshotScope.showOutdatedStreams= showOutdatedStreams\n",
" snapshotScope.showSelector= showSelector\n",
" try:\n",
" return self.__DefServiceclient.service.getMergedDefectsForStreams(streamIds, mergedDefectFilterSpecData, pageSpecData, snapshotScope)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: DefectService->getStreamDefects.\n",
" def getStreamDefects(self, cid, mergeKey, defectStateEndDate, defectStateStartDate, includeDefectInstances, includeHistory, name):\n",
" \"\"\"Retrieve instances of software issues for one or more CIDs.\n",
" :param:\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\n",
" defectStateEndDate [dateTime]=> Ending date (and optionally, time) for the CIDs to return. \n",
" defectStateStartDate [dateTime]=> Starting date (and optionally, time) for the CIDs to return.\n",
" includeDefectInstances[boolean] => Set to true for data on each instance of software issue, including the ID. Defaults to false. \n",
" includeHistory [boolean] => Set to true for historical triage data on each instance of the software issue. \n",
" name [string] => Required. Name of the stream \"\"\"\n",
" mergedDefectIdDataObjs= self.__DefServiceclient.factory.create(\"mergedDefectIdDataObj\")\n",
" filterSpec= self.__DefServiceclient.factory.create(\"streamDefectFilterSpecDataObj\")\n",
" streamIdDataObj= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamIdDataObj.name= name\n",
" mergedDefectIdDataObjs.cid= cid\n",
" mergedDefectIdDataObjs.mergeKey= mergeKey\n",
" filterSpec.defectStateEndDate= defectStateEndDate\n",
" filterSpec.defectStateStartDate= defectStateStartDate\n",
" filterSpec.includeDefectInstances= includeDefectInstances\n",
" filterSpec.includeHistory= includeHistory\n",
" filterSpec.streamIdList= streamIdDataObj\n",
" try:\n",
" return self.__DefServiceclient.service.getStreamDefects(mergedDefectIdDataObjs, filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: DefectService->getTrendRecordsForProject.\n",
" def getTrendRecordsForProject(self, name, endDate, startDate):\n",
" \"\"\"Retrieve daily records on CIDs and source code in a project.\n",
" :param:\n",
" name [string] => Required. Name of the project.\n",
" endDate [dateTime] => End date (and optionally, time) for the set of records to retrieve.\n",
" startDate [dateTime] => Start date (and optionally, time) for the set of records to retrieve.\"\"\"\n",
" projectId= self.__DefServiceclient.factory.create(\"projectIdDataObj\")\n",
" filterSpec= self.__DefServiceclient.factory.create(\"projectTrendRecordFilterSpecDataObj\")\n",
" projectId.name = name\n",
" filterSpec.endDate= endDate\n",
" filterSpec.startDate= startDate\n",
" try:\n",
" return self.__DefServiceclient.service.getTrendRecordsForProject(projectId, filterSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->getTriageHistory.\n",
" def getTriageHistory(self, cid, mergeKey, name):\n",
" \"\"\"Retrieve the triage history for a software issue.\n",
" :param:\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\n",
" name [string] => Required. Name of the triage store.\"\"\"\n",
" mergedDefectIdDataObj= self.__DefServiceclient.factory.create(\"mergedDefectIdDataObj\")\n",
" triageStoreIds= self.__DefServiceclient.factory.create(\"triageStoreIdDataObj\")\n",
" mergedDefectIdDataObj.cid= cid\n",
" mergedDefectIdDataObj.mergeKey= mergeKey\n",
" triageStoreIds.name= name\n",
" try:\n",
" return self.__DefServiceclient.service.getTriageHistory(mergedDefectIdDataObj, triageStoreIds)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# METHOD: DefectService->updateStreamDefects.\n",
" def updateStreamDefects(self, defectTriageId, defectTriageVerNum, id, verNum, defectStateAttributeValues):\n",
" \"\"\"Update the one or more attribute values for all instances of a CID found in a given stream. Note that this update will apply to all instances of the CID in all streams that share the same triage store. \n",
" :param:\n",
" defectTriageId [long] => Internal value for the last known triage ID. This ID changes when developers triage the issue that is associated with the id.\n",
" defectTriageVerNum [int] => Internal value for the last known triage version. This number changes when developers triage the issue that is associated with the id.\n",
" id [long] => Internal identifier for the software issue within the context of the stream.\n",
" verNum [int] => Version number associated with the id. \n",
" defectStateAttributeValues [defectStateAttributeValueDataObj] => Attribute name/value pair. One or more pairs required. \"\"\"\n",
" streamDefectIds= self.__DefServiceclient.factory.create(\"streamDefectIdDataObj\")\n",
" defectStateSpec= self.__DefServiceclient.factory.create(\"defectStateSpecDataObj\")\n",
" streamDefectIds.defectTriageId=defectTriageId\n",
" streamDefectIds.defectTriageVerNum= defectTriageVerNum\n",
" streamDefectIds.id= id\n",
" streamDefectIds.verNum= verNum\n",
" defectStateSpec.defectStateAttributeValues= defectStateAttributeValues\n",
" try:\n",
" self.__DefServiceclient.service.updateStreamDefects(streamDefectIds, defectStateSpec)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
" \n",
"# METHOD: DefectService->updateTriageForCIDsInTriageStore.\n",
" def updateTriageForCIDsInTriageStore(self, name, cid, mergeKey, defectStateAttributeValues):\n",
" \"\"\"Update one or more attribute values for a CID in a specified triage store.\n",
" :param:\n",
" name [string] => Required. Name of triage store.\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\n",
" defectStateAttributeValues [defectStateAttributeValueDataObj] => Attribute name/value pair. One or more pairs required.\"\"\"\n",
" triageStore= self.__DefServiceclient.factory.create(\"triageStoreIdDataObj\")\n",
" mergedDefectIdDataObjs= self.__DefServiceclient.factory.create(\"mergedDefectIdDataObj\")\n",
" defectState= self.__DefServiceclient.factory.create(\"defectStateSpecDataObj\")\n",
" triageStore.name = name\n",
" mergedDefectIdDataObjs.cid= cid\n",
" mergedDefectIdDataObjs.mergeKey= mergeKey\n",
" defectState.defectStateAttributeValues= defectStateAttributeValues\n",
" try:\n",
" self.__DefServiceclient.service.updateTriageForCIDsInTriageStore(triageStore, mergedDefectIdDataObjs, defectState)\n",
" except suds.WebFault as detail:\n",
" return detail\n",
"\n",
"# DATA_TYPE: Create attributeValueChangeSpecDataObj data type.\n",
" def attributeValueChangeSpec(self, attributeValueIds, attributeValues):\n",
" \"\"\"Create attributeValueChangeSpecDataObj data type.\n",
" :param:\n",
" attributeValueIds [attributeValueIdDataObj] => Automatically generated set of IDs for attribute values.\n",
" attributeValues [ attributeValueSpecDataObj] => Set of values available to an attribute.\"\"\"\n",
" attributeValueChangeSpecDataObj= self.__ConfServiceclient.factory.create(\"attributeValueChangeSpecDataObj\")\n",
" attributeValueChangeSpecDataObj.attributeValueIds= attributeValueIds\n",
" attributeValueChangeSpecDataObj.attributeValues= attributeValues\n",
" return attributeValueChangeSpecDataObj \n",
"\n",
"# DATA_TYPE: Create attributeValueIdDataObj data type.\n",
" def attributeValueIdsDataObj(self, name):\n",
" \"\"\"Create attributeValueIdDataObj data type\n",
" :param:\n",
" name [string] => Name of the automatically generated ID for the attribute value. Do not specify when creating an attribute value.\"\"\"\n",
" attributeValueIds= self.__ConfServiceclient.factory.create(\"attributeValueIdDataObj\")\n",
" attributeValueIds.name = name\n",
" return attributeValueIds\n",
"\n",
"# DATA_TYPE: Create attributeValueSpecDataObj data type.\n",
" def attributeValues(self, deprecated, name):\n",
" \"\"\"Create attributeValueSpecDataObj data type.\n",
" :param:\n",
" deprecated [boolean] => Value of true if the specified attribute value is deprecated. Otherwise, false.\n",
" name [string] => Name of the attribute value.\"\"\"\n",
" attributeValueSpecDataObj=self.__ConfServiceclient.factory.create(\"attributeValueSpecDataObj\")\n",
" attributeValueSpecDataObj.deprecated= deprecated\n",
" attributeValueSpecDataObj.name= name\n",
" return attributeValueSpecDataObj\n",
"\n",
"# DATA_TYPE: Create projectScopeDefectFilterSpecDataObj data type.\n",
" def projectScopeDefectFilterSpecDataObj(self, actionNameList, checkerCategoryList, checkerList, checkerTypeList, cidList, \n",
" clssificationNameList, componentIdExclude, componentIdList, cweList, firstDetectedEndDate, \n",
" firstDetectedStartDate, fixTargetNameList, impactNameList, issueKindList, lastDetectedEndDate,\n",
" lastDetectedStartDate, legacyNameList, ownerNameList, ownerNamePattern, severityNameList):\n",
" \"\"\"Create projectScopeDefectFilterSpecDataObj data type.\n",
" :param:\n",
" actionNameList [string] => Name/value pairs for a list of attributes.\n",
" checkerCategoryList [string] => List of checker categories.\n",
" checkerList [string] => List of checkers.\n",
" checkerTypeList [string] => List of checker types.\n",
" cidList [long] => List of CIDs.\n",
" classificationNameList [string] => Classification of the CID.\n",
" componentIdExclude [boolean] => If one or more component name filters is specified, set to true to exclude matching results from the specified components. \n",
" Defaults to false, including the matches from the components in the results.\n",
" componentIdList [string] => Name of a component that contains the CID. \n",
" cweList [long] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.\n",
" firstDetectedEndDate [dateTime]=> Ending date (and optionally, time) for the date range matching the First Detected date of a CID.\n",
" Example1: 2013-03-18T12:42:19.384-07:00 Example2: 2013-03-18\n",
" firstDetectedStartDate [dateTime]=> Starting date (and optionally, time) for the date range matching the First Detected date of a CID. \n",
" fixTargetNameList [string] => Fix target for the CID; a triage value for the CID.\n",
" impactNameList [string] => Probable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.\n",
" issueKindList [string] => Issue kind.\n",
" lastDetectedEndDate [dateTime]=> Ending date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
" lastDetectedStartDate [dateTime]=> Starting date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
" legacyNameList [string] => Legacy designation for the CID (true or false), a triage value for the CID. Built-in attribute. Defaults to false.\n",
" ownerNameList [string] => Owner of the CID.\n",
" ownerNamePattern [string] => Glob pattern matching the first or last name of the owner of a CID.\n",
" severityNameList [string] => Severity of the CID; a triage value for the CID.\"\"\"\n",
" filterSpec= self.__DefServiceclient.factory.create(\"projectScopeDefectFilterSpecDataObj\")\n",
" componentIdDataObj= self.__DefServiceclient.factory.create(\"componentIdDataObj\")\n",
" componentIdDataObj.name = componentIdList\n",
" filterSpec.actionNameList=actionNameList\n",
" filterSpec.checkerCategoryList= checkerCategoryList\n",
" filterSpec.checkerList= checkerList\n",
" filterSpec.checkerTypeList= checkerTypeList\n",
" filterSpec.cidList= cidList\n",
" filterSpec.classificationNameList= clssificationNameList\n",
" filterSpec.componentIdExclude= componentIdExclude\n",
" filterSpec.componentIdList=componentIdDataObj\n",
" filterSpec.cweList= cweList\n",
" filterSpec.firstDetectedEndDate= firstDetectedEndDate\n",
" filterSpec.firstDetectedStartDate= firstDetectedStartDate\n",
" filterSpec.fixTargetNameList= fixTargetNameList\n",
" filterSpec.impactNameList= impactNameList\n",
" filterSpec.issueKindList= issueKindList\n",
" filterSpec.lastDetectedEndDate= lastDetectedEndDate\n",
" filterSpec.lastDetectedStartDate= lastDetectedStartDate\n",
" filterSpec.legacyNameList= legacyNameList\n",
" filterSpec.ownerNameList= ownerNameList\n",
" filterSpec.ownerNamePattern= ownerNamePattern\n",
" filterSpec.severityNameList= severityNameList\n",
" return filterSpec\n",
"\n",
"# DATA_TYPE: Create pageSpecDataObj data type.\n",
" def pageSpecDataObj(self, pageSize, sortAscending, sortField, startIndex):\n",
" \"\"\"Create pageSpecDataObj data type.\n",
" :param:\n",
" pageSize [int] => Required. Up to 1000 records per page.\n",
" sortAscending [boolean] => Set to false to return records in reverse alphabetical or numerical order. Defaults to true.\n",
" sortField [string] => Name of the field to use for sorting results. Not all fields are supported. However, you can typically sort by a field that returns numeric results, such as cid and the date fields.\n",
" startIndex [int] => Zero-based index of records to return. Defaults to 0.\"\"\"\n",
" pageSpec= self.__DefServiceclient.factory.create(\"pageSpecDataObj\")\n",
" pageSpec.pageSize= pageSize\n",
" pageSpec.sortAscending= sortAscending\n",
" pageSpec.sortField= sortField\n",
" pageSpec.startIndex= startIndex\n",
" return pageSpec\n",
"\n",
"# DATA_TYPE: Create snapshotScopeDefectFilterSpecDataObj data type.\n",
" def snapshotScopeDefectFilterSpecDataObj(self, actionNameList, attributeDefinitionValueFilterMap, checkerCategoryList, checkerList, checkerTypeList,\n",
" cidList, classificationNameList, componentIdExclude, componentIdList, cweList, externalReference, fileName,\n",
" firstDetectedEndDate, firstDetectedStartDate, fixTargetNameList, functionMergeName, functionName, impactNameList,\n",
" issueComparison, issueKindList, lastDetectedEndDate, lastDetectedStartDate, legacyNameList, maxOccurrenceCount,\n",
" mergeExtra, mergeKey, minOccurrenceCount, ownerNameList, ownerNamePattern, severityNameList, statusNameList,\n",
" streamExcludeNameList, streamExcludeQualifier, streamIncludeNameList, streamIncludeQualifier):\n",
" \"\"\"Create snapshotScopeDefectFilterSpecDataObj.\n",
" :param:\n",
" actionNameList \t [string] => Name/value pairs for a list of attributes.\n",
" attributeDefinitionValueFilterMap [attributeDefinitionValueFilterMapDataObj] => Specification of an attribute value.\n",
" checkerCategoryList \t [string] => List of checker categories.\n",
" checkerList \t [string] => List of checkers.\n",
" checkerTypeList \t [string] => List of checker types.\n",
" cidList \t [long] => List of CIDs.\n",
" classificationNameList [string] => Classification of the CID. \n",
" componentIdExclude \t [boolean] => If one or more component name filters is specified, set to true to exclude matching results from the specified components. \n",
" Defaults to false, including the matches from the components in the results.\n",
" componentIdList [componentIdDataObj] => Name of a component that contains the CID. \n",
" cweList \t [long] => Common Weakness Enumeration identifier of the type of issue found by the checker. Zero or more identifiers allowed.\n",
" externalReference [string] => An external reference for a CID that is used by your company to identify the software issue. Corresponds to a field in the Coverity Connect triage pane.\n",
" fileName \t [string] => A file name. Example: /test.c\n",
" firstDetectedEndDate [dateTime] => Ending date (and optionally, time) for the date range matching the First Detected date of a CID.\n",
"\t\t\t\t Example1: 2013-03-18T12:42:19.384-07:00 Example2: 3/18/2013\n",
" firstDetectedStartDate [dateTime] => Starting date (and optionally, time) for the date range matching the First Detected date of a CID.\n",
"\t\t fixTargetNameList \t [string] => Fix target for the CID; a triage value for the CID.\n",
" functionMergeName \t [string] => Internal function name used as one of the criteria for merging separate occurrences of the same software issue, \n",
" with the result that they are identified by the same CID.\n",
" functionName \t [string] =>\tName of the function or method.\n",
" impactNameList \t [string] =>\tProbable impact (High, Medium, or Low) of the issue found by the checker. Zero or more impact levels allowed.\n",
" issueComparison \t [string] =>\tIf set to PRESENT, returns overlapping CIDs in a snapshot comparison, that is, CIDs found in snapshot(s) to which both the showSelector and compareSelector values of the snaphotScope parameter (snapshotScopeSpecDataObj) apply.\n",
" If set to ABSENT, returns CIDs that are present in the snapshot(s) to which the showSelector value applies but absent from those to which the compareSelector value applies. \n",
" If not set, values are PRESENT and ABSENT.\n",
" issueKindList \t [string] =>\tIssue kind. \n",
" lastDetectedEndDate \t [dateTime] =>\tEnding date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
"\t\t lastDetectedStartDate \t [dateTime] =>\tStarting date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
"\t\t legacyNameList \t [string] =>\tLegacy designation for the CID (true or false), a triage value for the CID. Built-in attribute. Defaults to false.\n",
" maxOccurrenceCount \t [int] =>\tMaximum number of instances of software issues associated with a given CID.\n",
"\t\t mergeExtra \t [string] =>\tInternal property used as one of the criteria for merging occurrences of an issue.\n",
" mergeKey \t [string] =>\tInternal signature used to merge separate occurrences of the same software issue and identify them all by the same CID.\n",
" minOccurrenceCount \t [int] =>\tMinimum number of instances of software issues associated with a given CID.\n",
"\t\t ownerNameList \t [string] =>\tOwner of the CID.\n",
" ownerNamePattern \t [string] =>\tGlob pattern matching the first or last name of the owner of a CID.\n",
" severityNameList \t [string] =>\tSeverity of the CID; a triage value for the CID.\n",
" statusNameList \t [string] =>\tStatus of the CID. \n",
" streamExcludeNameList \t [streamIdDataObj] =>\tIdentifier for a stream to exclude. \n",
" streamExcludeQualifier \t [string] =>\tIf set to ANY, the filter will exclude from the results CIDs found in each of the streams listed in the streamExcludeNameList field. \n",
" If set to ALL, the filter will only exclude a CID if it is found in all listed streams. Valid values are ANY or ALL. Defaults to ANY.\n",
" streamIncludeNameList \t [streamIdDataObj] =>\tIdentifier for a stream to include.\n",
" streamIncludeQualifier \t [string] =>\tIf set to ANY, the filter will return CIDs found in each of the streams listed in the streamIncludeNameList field.\n",
" If set to ALL, the filter will only return a CID if it is found in all listed streams. Valid values are ANY or ALL. Defaults to ANY.\"\"\"\n",
" filterSpec= self.__DefServiceclient.factory.create(\"snapshotScopeDefectFilterSpecDataObj\");\n",
" componentIdListData= self.__DefServiceclient.factory.create(\"componentIdDataObj\")\n",
" streamIdDataObj= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamIdDataObj2= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
"\n",
" componentIdListData.name= componentIdList\n",
" streamIdDataObj.name= streamExcludeNameList\n",
" streamIdDataObj2.name= streamIncludeNameList\n",
" filterSpec.actionNameList= actionNameList\n",
" filterSpec.attributeDefinitionValueFilterMap= attributeDefinitionValueFilterMap\n",
" filterSpec.checkerCategoryList= checkerCategoryList\n",
" filterSpec.checkerList= checkerList\n",
" filterSpec.checkerTypeList= checkerTypeList\n",
" filterSpec.cidList= cidList\n",
" filterSpec.classificationNameList= classificationNameList\n",
" filterSpec.componentIdExclude= componentIdExclude\n",
" filterSpec.componentIdList= componentIdListData\n",
" filterSpec.cweList= cweList\n",
" filterSpec.externalReference= externalReference\n",
" filterSpec.fileName= fileName\n",
" filterSpec.firstDetectedEndDate= firstDetectedEndDate\n",
" filterSpec.firstDetectedStartDate= firstDetectedStartDate\n",
" filterSpec.fixTargetNameList= fixTargetNameList\n",
" filterSpec.functionMergeName= functionMergeName\n",
" filterSpec.functionName= functionName\n",
" filterSpec.impactNameList= impactNameList\n",
" filterSpec.issueComparison= issueComparison\n",
" filterSpec.issueKindList= issueKindList\n",
" filterSpec.lastDetectedEndDate= lastDetectedEndDate\n",
" filterSpec.lastDetectedStartDate= lastDetectedStartDate\n",
" filterSpec.legacyNameList= legacyNameList\n",
" filterSpec.maxOccurrenceCount= maxOccurrenceCount\n",
" filterSpec.mergeExtra= mergeExtra\n",
" filterSpec.mergeKey= mergeKey\n",
" filterSpec.minOccurrenceCount= minOccurrenceCount\n",
" filterSpec.ownerNameList= ownerNameList\n",
" filterSpec.ownerNamePattern= ownerNamePattern\n",
" filterSpec.severityNameList= severityNameList\n",
" filterSpec.statusNameList= statusNameList\n",
" filterSpec.streamExcludeNameList= streamIdDataObj\n",
" filterSpec.streamExcludeQualifier= streamExcludeQualifier\n",
" filterSpec.streamIncludeNameList= streamIdDataObj2\n",
" filterSpec.streamIncludeQualifier= streamIncludeQualifier\n",
" return filterSpec\n",
"\n",
"# DATA_TYPE: Create defectStateAttributeValueDataObj data type.\n",
" def defectStateAttributeValueDataObj(self, attributeName, valueOfAttribute):\n",
" \"\"\"Create defectStateAttributeValueDataObj data type.\n",
" :param:\n",
" attributeName [string] => Identifier for an attribute.\n",
" valueOfAttribute [string] => Value of the attribute.\"\"\"\n",
" defectStateAttributeValues= self.__DefServiceclient.factory.create(\"defectStateAttributeValueDataObj \")\n",
" attributeDefinitionId= self.__DefServiceclient.factory.create(\"attributeDefinitionIdDataObj\")\n",
" attributeValueId = self.__DefServiceclient.factory.create(\"attributeValueIdDataObj\")\n",
" attributeDefinitionId.name= attributeName\n",
" attributeValueId.name= valueOfAttribute\n",
" defectStateAttributeValues.attributeDefinitionId= attributeDefinitionId\n",
" defectStateAttributeValues.attributeValueId= attributeValueId\n",
" return defectStateAttributeValues\n",
"\n",
"# DATA_TYPE: Create mergedDefectFilterSpecDataObj data type.\n",
" def mergedDefectFilterSpecDataObj(self, cidList, checkerSubcategoryFilterSpecList, filenamePatternList, componentIdList, statusNameList, classificationNameList, \n",
" actionNameList, fixTargetNameList, severityNameList, legacyNameList, ownerNameList, issueKindList, attributeDefinitionValueFilterMap,\n",
" componentIdExclude, defectPropertyKey, defectPropertyPattern, externalReferencePattern, firstDetectedEndDate, firstDetectedStartDate,\n",
" functionNamePattern, lastDetectedEndDate, lastDetectedStartDate, lastFixedEndDate, lastFixedStartDate, lastTriagedEndDate, lastTriagedStartDate,\n",
" maxCid, maxOccurrenceCount, mergedDefectIdDataObjs, minCid, minOccurrenceCount, ownerNamePattern, snapshotComparisonField,streamExcludeNameList,\n",
" streamExcludeQualifier, streamIncludeNameList, streamIncludeQualifier):\n",
" \"\"\"Create mergedDefectFilterSpecDataObj data type.\n",
" :param:\n",
" cidList \t[long] =>\tA CID. Multiple CIDs allowed.\n",
" checkerSubcategoryFilterSpecList \t[checkerSubcategoryFilterSpecDataObj ] =>\tChecker subcategory specification. Multiple allowed.\n",
" filenamePatternList \t[string] =>\tFilename pattern for source code files that containing software issues associated with the CIDs. Up to 20 patterns allowed.\n",
" componentIdList \t[componentIdDataObj] =>\tName of a component that contains the CID. Multiple components allowed.\n",
" statusNameList \t[string] =>\tStatus of the CID. Multiple statuses allowed.\n",
" classificationNameList \t[string] =>\tClassification of the CID; a triage value for the CID. Multiple classifications allowed.\n",
" actionNameList \t[string] =>\tName/value pairs for a list of attributes.\n",
" fixTargetNameList \t[string] =>\tFix target for the CID; a triage value for the CID. Multiple fix targets allowed.\n",
" severityNameList \t[string] =>\tSeverity of the CID; a triage value for the CID. Multiple severities allowed.\n",
" legacyNameList \t[string] =>\tLegacy designation for the CID (true or false); a triage value for the CID. Built-in attribute. Defaults to false.\n",
" ownerNameList \t[string] =>\tOwner of the CID.\n",
" issueKindList \t[string] =>\tKind of issue identified by the CID.\n",
" attributeDefinitionValueFilterMap \t[attributeDefinitionValueFilterMapDataObj ] =>\tSpecification of an attribute value.\n",
" componentIdExclude \t[boolean] =>\tIf one or more component name filters is specified, set to true to exclude matching results from the specified components. Defaults to false, including the matches from the components in the results.\n",
" defectPropertyKey \t[string] =>\tDo not use this field. The API does not process these values.\n",
" defectPropertyPattern \t[string] =>\tDo not use this field. The API does not process these values.\n",
" externalReferencePattern \t[string] =>\tGlob pattern matching the value of an Ext. Reference attribute value.\n",
" firstDetectedEndDate \t[dateTime] =>\tEnding date (and optionally, time) for the date range matching the First Detected date of a CID.\n",
"\t\t Example1: 2013-03-18T12:42:19.384-07:00 \tExample2: 3/18/2013\n",
" firstDetectedStartDate \t[dateTime] =>\tStarting date (and optionally, time) for the date range matching the First Detected date of a CID. \n",
"\t functionNamePattern \t[string] =>\tGlob pattern matching the name of the function (or method) associated with a CID.\n",
" lastDetectedEndDate \t[dateTime] =>\tEnding date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
"\t\t lastDetectedStartDate \t[dateTime] =>\tStarting date (and optionally, time) for the date range matching the Last Detected date of a CID.\n",
"\t\t lastFixedEndDate \t[dateTime] =>\tEnding date (and optionally, time) for the date range matching the Last Fixed date of a CID.\n",
"\t\t lastFixedStartDate \t[dateTime] =>\tStarting date (and optionally, time) for the date range matching the Last Fixed date of a CID.\n",
"\t\t lastTriagedEndDate \t[dateTime] =>\tEnding date (and optionally, time) for the date range matching the Last Triaged date of a CID.\n",
"\t\t lastTriagedStartDate \t[dateTime] =>\tStarting date (and optionally, time) for the date range matching the Last Triaged date of a CID.\n",
"\t\t maxCid \t[long] =>\tUpper numeric bound of CIDs to retrieve. For example, no greater than CID 25000.\n",
"\t maxOccurrenceCount \t[int] =>\tMaximum number of instances of software issues associated with a given CID.\n",
"\t\t mergedDefectIdDataObjs \t[mergedDefectIdDataObj] =>\tIdentifier for a software issue.\n",
"\t\t Multiple specifications are allowed.\n",
" minCid \t[long] =>\tLower numeric bound of CIDs to retrieve. For example, no smaller than CID 24500.\n",
"\t\t minOccurrenceCount \t[int] =>\tMinimum number of instances of software issues associated with a given CID.\n",
"\t\t ownerNamePattern \t[string] =>\tGlob pattern matching the first or last name of the owner of a CID.\n",
" snapshotComparisonField \t[string] =>\t\n",
" streamExcludeNameList \t[streamIdDataObj] =>\tIdentifier for a stream to exclude. Multiple streams are allowed.\n",
" streamExcludeQualifier \t[string] =>\t\n",
" streamIncludeNameList \t[streamIdDataObj] =>\tIdentifier for a stream to include. Multiple streams are allowed.\n",
" streamIncludeQualifier \t[string] =>\t\"\"\"\n",
" filterSpec= self.__DefServiceclient.factory.create(\"mergedDefectFilterSpecDataObj\")\n",
" filterSpec.cidList= cidList\n",
" filterSpec.checkerSubcategoryFilterSpecList=checkerSubcategoryFilterSpecList\n",
" filterSpec.filenamePatternList= filenamePatternList\n",
" filterSpec.componentIdList= componentIdList\n",
" filterSpec.statusNameList= statusNameList\n",
" filterSpec.classificationNameList= classificationNameList\n",
" filterSpec.actionNameList= actionNameList\n",
" filterSpec.fixTargetNameList= fixTargetNameList\n",
" filterSpec.severityNameList= severityNameList\n",
" filterSpec.legacyNameList= legacyNameList\n",
" filterSpec.ownerNameList= ownerNameList\n",
" filterSpec.issueKindList= issueKindList\n",
" filterSpec.attributeDefinitionValueFilterMap= attributeDefinitionValueFilterMap\n",
" filterSpec.componentIdExclude= componentIdExclude\n",
" filterSpec.defectPropertyKey= defectPropertyKey\n",
" filterSpec.defectPropertyPattern= defectPropertyPattern\n",
" filterSpec.externalReferencePattern= externalReferencePattern\n",
" filterSpec.firstDetectedEndDate= firstDetectedEndDate\n",
" filterSpec.firstDetectedStartDate= firstDetectedStartDate\n",
" filterSpec.functionNamePattern= functionNamePattern\n",
" filterSpec.lastDetectedEndDate= lastDetectedEndDate\n",
" filterSpec.lastDetectedStartDate= lastDetectedStartDate\n",
" filterSpec.lastFixedEndDate= lastFixedEndDate\n",
" filterSpec.lastFixedStartDate= lastDetectedStartDate\n",
" filterSpec.lastTriagedEndDate= lastTriagedEndDate\n",
" filterSpec.lastTriagedStartDate= lastTriagedStartDate\n",
" filterSpec.maxCid= maxCid\n",
" filterSpec.maxOccurrenceCount= maxOccurrenceCount\n",
" filterSpec.mergedDefectIdDataObjs= mergedDefectIdDataObjs\n",
" filterSpec.minCid= minCid\n",
" filterSpec.minOccurrenceCount= minOccurrenceCount\n",
" filterSpec.ownerNamePattern= ownerNamePattern\n",
" filterSpec.snapshotComparisonField= snapshotComparisonField\n",
" filterSpec.streamExcludeNameList= streamExcludeNameList\n",
" filterSpec.streamExcludeQualifier= streamExcludeQualifier\n",
" filterSpec.streamIncludeNameList= streamIncludeNameList\n",
" filterSpec.streamIncludeQualifier= streamIncludeQualifier\n",
" return filterSpec\n",
"\n",
"# DATA_TYPE: Create checkerSubcategoryFilterSpecDataObj data type.\n",
" def checkerSubcategoryFilterSpecDataObj(self, checkerName, domain, subcategory):\n",
" \"\"\"\"Create checkerSubcategoryFilterSpecDataObj data type.\n",
" :param:\n",
" checkerName [string] => Checker associated with the subcategory.\n",
" domain [string] => Domain associated with the subcategory.\n",
" subcategory [string] => Subcategory on which to filter. \"\"\"\n",
" checkerSubcategoryFilterSpec= self.__DefServiceclient.factory.create(\"checkerSubcategoryFilterSpecDataObj\")\n",
" checkerSubcategoryFilterSpec.checkerName= checkerName\n",
" checkerSubcategoryFilterSpec.domain= domain\n",
" checkerSubcategoryFilterSpec.subcategory= subcategory\n",
" return checkerSubcategoryFilterSpec\n",
"\n",
"# DATA_TYPE: Create componentIdDataObj data type.\n",
" def componentIdDataObj (self, name):\n",
" \"\"\"Create componentIdDataObj data type.\n",
" :param:\n",
" name [string] => Name of a component in the project in the form [componentMap].[component].\"\"\"\n",
" componentIdList= self.__DefServiceclient.factory.create(\"componentIdDataObj\")\n",
" componentIdList.name= name\n",
" return componentIdList\n",
" \n",
"# DATA_TYPE: Create attributeDefinitionValueFilterMapDataObj data type.\n",
" def attributeDefinitionValueFilterMapDataObj(self, attributeDefinitionId, attributeValueIdsData):\n",
" \"\"\"\"Create attributeDefinitionValueFilterMapDataObj data type.\n",
" :param:\n",
" attributeDefinitionId [attributeDefinitionIdDataObj] => Identifier for the attribute to filter. \n",
" attributeValueIds [attributeValueIdDataObj] => Value of the attribute to filter. Multiple values allowed.\"\"\"\n",
" attributeDefinitionValueFilterMap= self.__DefServiceclient.factory.create(\"attributeDefinitionValueFilterMapDataObj\")\n",
" attributeDefinitionValueFilterMap.attributeDefinitionId= attributeDefinitionId\n",
" attributeDefinitionValueFilterMap.attributeValueIds= attributeValueIdsData\n",
" return attributeDefinitionValueFilterMap\n",
"\n",
"# DATA_TYPE: Create attributeDefinitionIdDataObj data type.\n",
" def attributeDefinitionIdDataObj(self, name):\n",
" \"\"\"Create attributeDefinitionIdDataObj data type.\n",
" :param:\n",
" name [string] => Name of the attribute.\"\"\"\n",
" attributeDefinitionId= self.__DefServiceclient.factory.create(\"attributeDefinitionIdDataObj\")\n",
" attributeDefinitionId.name= name\n",
" return attributeDefinitionId\n",
"\n",
"# DATA_TYPE: Create mergedDefectIdDataObj data type.\n",
" def mergedDefectIdDataObj(self, cid, mergeKey):\n",
" \"\"\"Create mergedDefectIdDataObj data type.\n",
" :param:\n",
" cid [long] => CID.\n",
" mergeKey [string] => Numeric key for a CID.\"\"\"\n",
" mergedDefectIdDataObjs= self.__DefServiceclient.factory.client(\"mergedDefectIdDataObj\")\n",
" mergedDefectIdDataObjs.cid= cid\n",
" mergedDefectIdDataObjs.mergeKey= mergeKey\n",
" return mergedDefectIdDataObjs\n",
"\n",
"# DATA_TYPE: Create streamIdDataObj data type.\n",
" def streamIdDataObj(self, name):\n",
" \"\"\"Create streamIdDataObj data type.\n",
" :param:\n",
" name [string] => Required. Name of the stream.\"\"\"\n",
" streamIdData= self.__DefServiceclient.factory.create(\"streamIdDataObj\")\n",
" streamIdData.name= name\n",
" return streamIdData\n"
] | [
0,
0,
0.043478260869565216,
0,
0.08333333333333333,
0.04,
0.041666666666666664,
0,
0,
0.07142857142857142,
0.07142857142857142,
0.07142857142857142,
0,
0.030303030303030304,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0.08928571428571429,
0,
0,
0,
0.07142857142857142,
0.03571428571428571,
0.03571428571428571,
0.07407407407407407,
0,
0,
0,
0,
0.03225806451612903,
0.034482758620689655,
0.03225806451612903,
0.01818181818181818,
0,
0.018518518518518517,
0.01818181818181818,
0.02631578947368421,
0,
0.013333333333333334,
0,
0.5,
0.02040816326530612,
0,
0.03225806451612903,
0.01818181818181818,
0,
0.018518518518518517,
0.020833333333333332,
0.02631578947368421,
0,
0.013513513513513514,
0,
0.06666666666666667,
0,
0.010752688172043012,
0.009523809523809525,
0,
0.00909090909090909,
0.0058823529411764705,
0.006622516556291391,
0.009433962264150943,
0.022222222222222223,
0,
0,
0,
0.02127659574468085,
0,
0.0125,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0.023529411764705882,
0.023809523809523808,
0.013333333333333334,
0.027777777777777776,
0,
0.010638297872340425,
0,
0,
0.07142857142857142,
0,
0.007575757575757576,
0,
0,
0,
0,
0.006024096385542169,
0.008695652173913044,
0,
0.00819672131147541,
0.017543859649122806,
0.014705882352941176,
0.014705882352941176,
0.02127659574468085,
0.015151515151515152,
0.015625,
0.015384615384615385,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0.007142857142857143,
0,
0,
0.009900990099009901,
0.009433962264150943,
0.010309278350515464,
0.007575757575757576,
0.00625,
0.006493506493506494,
0,
0.020833333333333332,
0.012658227848101266,
0.02631578947368421,
0.019230769230769232,
0.019230769230769232,
0.023809523809523808,
0.021739130434782608,
0.019230769230769232,
0.023809523809523808,
0.025,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.006578947368421052,
0.024096385542168676,
0.03225806451612903,
0,
0,
0,
0,
0.125,
0,
0,
0.011904761904761904,
0,
0.009523809523809525,
0.021505376344086023,
0.022727272727272728,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.007874015748031496,
0,
0.011627906976744186,
0.013513513513513514,
0.024691358024691357,
0.03333333333333333,
0.024390243902439025,
0.030303030303030304,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0.010526315789473684,
0.00909090909090909,
0.010526315789473684,
0.023255813953488372,
0.023255813953488372,
0.027777777777777776,
0.03333333333333333,
0,
0.018518518518518517,
0.02631578947368421,
0,
0,
0.024390243902439025,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.023809523809523808,
0.034482758620689655,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0.03571428571428571,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.006896551724137931,
0.009174311926605505,
0.009523809523809525,
0.022988505747126436,
0.021739130434782608,
0.017241379310344827,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0,
0.0056179775280898875,
0.006369426751592357,
0.007462686567164179,
0.00625,
0.006944444444444444,
0.005780346820809248,
0.00909090909090909,
0.007751937984496124,
0.023529411764705882,
0.023255813953488372,
0.027777777777777776,
0.023809523809523808,
0.02631578947368421,
0.024390243902439025,
0.02,
0.03333333333333333,
0.029411764705882353,
0.022727272727272728,
0.018518518518518517,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0.007575757575757576,
0,
0,
0.01020408163265306,
0.006289308176100629,
0.009433962264150943,
0,
0.008695652173913044,
0.019801980198019802,
0.022222222222222223,
0.018691588785046728,
0.019230769230769232,
0.01639344262295082,
0.022988505747126436,
0.01694915254237288,
0.017543859649122806,
0.01694915254237288,
0,
0.009174311926605505,
0,
0,
0,
0,
0,
0.007751937984496124,
0,
0,
0.01282051282051282,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0.007874015748031496,
0,
0,
0.012987012987012988,
0.034482758620689655,
0,
0.010416666666666666,
0,
0,
0,
0,
0.013513513513513514,
0.011235955056179775,
0,
0,
0.00819672131147541,
0.01282051282051282,
0.027777777777777776,
0.024096385542168676,
0.024390243902439025,
0,
0.01904761904761905,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0.01652892561983471,
0.0053475935828877,
0.018518518518518517,
0.005263157894736842,
0.013157894736842105,
0.03571428571428571,
0.013888888888888888,
0.02564102564102564,
0.023809523809523808,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0.006535947712418301,
0,
0,
0,
0,
0.021052631578947368,
0.012987012987012988,
0.02564102564102564,
0.02040816326530612,
0.034482758620689655,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0.021052631578947368,
0.012987012987012988,
0.02564102564102564,
0.02040816326530612,
0.034482758620689655,
0,
0.018691588785046728,
0,
0,
0,
0,
0.009900990099009901,
0.023809523809523808,
0,
0,
0.007352941176470588,
0.011627906976744186,
0.01282051282051282,
0.034482758620689655,
0,
0.02142857142857143,
0,
0,
0,
0,
0.009708737864077669,
0.007142857142857143,
0,
0,
0.007352941176470588,
0.022988505747126436,
0.01282051282051282,
0.034482758620689655,
0,
0.013986013986013986,
0,
0,
0,
0,
0.006024096385542169,
0.008333333333333333,
0,
0.012345679012345678,
0.006578947368421052,
0.010752688172043012,
0.0048543689320388345,
0.0055248618784530384,
0.0049261083743842365,
0.007042253521126761,
0.0072992700729927005,
0.012987012987012988,
0.022222222222222223,
0.034482758620689655,
0.014492753623188406,
0.01818181818181818,
0.015873015873015872,
0.02040816326530612,
0,
0.00684931506849315,
0,
0,
0.1111111111111111,
0,
0.007407407407407408,
0,
0,
0,
0,
0.017857142857142856,
0.009009009009009009,
0.013793103448275862,
0.015384615384615385,
0.012048192771084338,
0.020833333333333332,
0.021739130434782608,
0.024096385542168676,
0.02857142857142857,
0.025,
0.02,
0.017241379310344827,
0.016129032258064516,
0.015151515151515152,
0.02,
0.02040816326530612,
0,
0.009615384615384616,
0,
0,
0.5,
0,
0,
0,
0,
0,
0.009174311926605505,
0.008849557522123894,
0.01282051282051282,
0.02040816326530612,
0,
0.027777777777777776,
0.025,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021052631578947368,
0.022988505747126436,
0.02564102564102564,
0.02040816326530612,
0.029411764705882353,
0,
0.009345794392523364,
0,
0,
0,
0,
0.009009009009009009,
0.009345794392523364,
0,
0.005813953488372093,
0.0055248618784530384,
0.008130081300813009,
0.023255813953488372,
0.0070921985815602835,
0.02247191011235955,
0.022222222222222223,
0.018518518518518517,
0.015873015873015872,
0.03225806451612903,
0.02564102564102564,
0.012658227848101266,
0,
0.01020408163265306,
0,
0,
0.1111111111111111,
0,
0.010309278350515464,
0.011627906976744186,
0,
0,
0,
0,
0.007194244604316547,
0.023809523809523808,
0.020833333333333332,
0.023255813953488372,
0,
0.025,
0.02,
0.013333333333333334,
0,
0.007874015748031496,
0,
0,
0,
0,
0,
0,
0,
0.008130081300813009,
0.00909090909090909,
0.017241379310344827,
0.012987012987012988,
0.0136986301369863,
0.020833333333333332,
0,
0,
0,
0,
0,
0.006711409395973154,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0.019417475728155338,
0.017543859649122806,
0.022222222222222223,
0,
0,
0,
0.015748031496062992,
0.014705882352941176,
0.007194244604316547,
0.007407407407407408,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0.011363636363636364,
0.007751937984496124,
0.020833333333333332,
0.005952380952380952,
0.006711409395973154,
0.008771929824561403,
0.013157894736842105,
0.009615384615384616,
0.006172839506172839,
0,
0.006756756756756757,
0.006666666666666667,
0.005988023952095809,
0,
0.008333333333333333,
0.009615384615384616,
0.02040816326530612,
0.02247191011235955,
0,
0.02040816326530612,
0.016666666666666666,
0.022727272727272728,
0.019230769230769232,
0.027777777777777776,
0.015384615384615385,
0.017241379310344827,
0.018518518518518517,
0.027777777777777776,
0.016129032258064516,
0.015151515151515152,
0.017857142857142856,
0.02,
0.020833333333333332,
0.016666666666666666,
0.015625,
0.02,
0.020833333333333332,
0.018518518518518517,
0.018518518518518517,
0,
0,
0,
0.0125,
0,
0,
0.012048192771084338,
0.0072992700729927005,
0.004424778761061947,
0.009900990099009901,
0.013157894736842105,
0.027777777777777776,
0.021739130434782608,
0.02631578947368421,
0.025,
0,
0,
0,
0.006535947712418301,
0.006535947712418301,
0.006289308176100629,
0.006369426751592357,
0.006493506493506494,
0.007142857142857143,
0,
0,
0.007194244604316547,
0.007518796992481203,
0.008264462809917356,
0.009009009009009009,
0.008620689655172414,
0.009345794392523364,
0.016129032258064516,
0.00909090909090909,
0.005780346820809248,
0.014285714285714285,
0.004784688995215311,
0.004016064257028112,
0.008064516129032258,
0.0051813471502590676,
0.020833333333333332,
0.010256410256410256,
0.02127659574468085,
0.014423076923076924,
0.006493506493506494,
0.007936507936507936,
0.0048543689320388345,
0.003125,
0.007407407407407408,
0.007194244604316547,
0.018518518518518517,
0.005291005291005291,
0.016042780748663103,
0.014634146341463415,
0.011627906976744186,
0.017341040462427744,
0.009345794392523364,
0.005813953488372093,
0.028037383177570093,
0.012345679012345678,
0.006993006993006993,
0.017241379310344827,
0.015151515151515152,
0.008695652173913044,
0.004347826086956522,
0.007633587786259542,
0.004739336492890996,
0.004291845493562232,
0.03,
0.022222222222222223,
0.024096385542168676,
0.023809523809523808,
0,
0.02,
0.019230769230769232,
0.018867924528301886,
0.02,
0.022727272727272728,
0.016666666666666666,
0.022727272727272728,
0.019230769230769232,
0.027777777777777776,
0.015151515151515152,
0.017241379310344827,
0.017857142857142856,
0.027777777777777776,
0.017857142857142856,
0.02631578947368421,
0.016129032258064516,
0.015151515151515152,
0.017857142857142856,
0.017857142857142856,
0.021739130434782608,
0.02,
0.019230769230769232,
0.020833333333333332,
0.016666666666666666,
0.015625,
0.02,
0.017241379310344827,
0.023809523809523808,
0.02631578947368421,
0.017241379310344827,
0.020833333333333332,
0.018518518518518517,
0.018518518518518517,
0.02,
0.017241379310344827,
0.015151515151515152,
0.01694915254237288,
0.015151515151515152,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0.017857142857142856,
0.0196078431372549,
0.010752688172043012,
0.02,
0.020833333333333332,
0.0125,
0.014285714285714285,
0,
0,
0,
0.017964071856287425,
0.005780346820809248,
0.005780346820809248,
0.005434782608695652,
0.010869565217391304,
0.009009009009009009,
0,
0,
0,
0.006896551724137931,
0.005780346820809248,
0.007751937984496124,
0.011111111111111112,
0.0070921985815602835,
0.011494252873563218,
0.007751937984496124,
0.008064516129032258,
0.006289308176100629,
0,
0,
0.007407407407407408,
0.004016064257028112,
0.009174311926605505,
0.008849557522123894,
0.008064516129032258,
0.006711409395973154,
0.02702702702702703,
0.012987012987012988,
0.0234375,
0.013605442176870748,
0.020689655172413793,
0.022222222222222223,
0.02158273381294964,
0.02158273381294964,
0.02097902097902098,
0.027522935779816515,
0.025423728813559324,
0.031914893617021274,
0.030303030303030304,
0.017391304347826087,
0.02608695652173913,
0.027777777777777776,
0.037037037037037035,
0.007936507936507936,
0.018867924528301886,
0.007936507936507936,
0,
0.021739130434782608,
0.027777777777777776,
0.023529411764705882,
0.016666666666666666,
0.019230769230769232,
0.02,
0.015151515151515152,
0.02,
0.017857142857142856,
0.018518518518518517,
0.02,
0.020833333333333332,
0.020833333333333332,
0.022727272727272728,
0.017241379310344827,
0.017857142857142856,
0.015625,
0.014285714285714285,
0.016129032258064516,
0.015151515151515152,
0.016666666666666666,
0.016666666666666666,
0.015625,
0.018518518518518517,
0.01639344262295082,
0.017241379310344827,
0.016129032258064516,
0.029411764705882353,
0.017241379310344827,
0.015151515151515152,
0.029411764705882353,
0.017241379310344827,
0.018518518518518517,
0.014705882352941176,
0.015625,
0.015151515151515152,
0.015625,
0.015151515151515152,
0,
0,
0,
0.011764705882352941,
0,
0,
0.012345679012345678,
0,
0,
0.017241379310344827,
0.016129032258064516,
0.019230769230769232,
0.016129032258064516,
0,
0,
0,
0.024390243902439025,
0,
0,
0.009009009009009009,
0.023255813953488372,
0.02857142857142857,
0,
0.5,
0,
0.00980392156862745,
0,
0,
0.017543859649122806,
0.007407407407407408,
0.015873015873015872,
0.022988505747126436,
0.024096385542168676,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0.0196078431372549,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0.025,
0.02,
0,
0,
0,
0,
0,
0,
0,
0.0125,
0.03125,
0
] | 942 | 0.012405 | false |
# Copyright (C) 2014 Robby Zeitfuchs (@robbyFux)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class ZeusMutexes(Signature):
name = "banker_zeus_mutex"
description = "Creates Zeus (Banking Trojan) mutexes"
severity = 3
categories = ["banker"]
families = ["zeus"]
authors = ["Robby Zeitfuchs", "KillerInstinct"]
minimum = "1.2"
references = ["https://malwr.com/analysis/NmNhODg5ZWRkYjc0NDY0M2I3YTJhNDRlM2FlOTZiMjA/#summary_mutexes",
"https://malwr.com/analysis/MmMwNDJlMTI0MTNkNGFjNmE0OGY3Y2I5MjhiMGI1NzI/#summary_mutexes",
"https://malwr.com/analysis/MzY5ZTM2NzZhMzI3NDY2YjgzMjJiODFkODZkYzIwYmQ/#summary_mutexes",
"https://www.virustotal.com/de/file/301fcadf53e6a6167e559c84d6426960af8626d12b2e25aa41de6dce511d0568/analysis/#behavioural-info",
"https://www.virustotal.com/de/file/d3cf49a7ac726ee27eae9d29dee648e34cb3e8fd9d494e1b347209677d62cdf9/analysis/#behavioural-info",
"https://www.virustotal.com/de/file/d3cf49a7ac726ee27eae9d29dee648e34cb3e8fd9d494e1b347209677d62cdf9/analysis/#behavioural-info",
"https://www.virustotal.com/de/file/301fcadf53e6a6167e559c84d6426960af8626d12b2e25aa41de6dce511d0568/analysis/#behavioural-info"]
def run(self):
indicators = [
"_AVIRA_.*",
"__SYSTEM__.*",
"_LILO_.*",
"_SOSI_.*",
".*MSIdent Logon",
".*MPSWabDataAccessMutex",
".*MPSWABOlkStoreNotifyMutex"
]
for indicator in indicators:
match = self.check_mutex(pattern=indicator, regex=True)
if match:
self.data.append({"mutex": match})
return True
indicator = r"(Local|Global)\\\{[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}\}"
matches = self.check_mutex(pattern=indicator, regex=True, all=True)
if matches and len(matches) > 10:
return True
return False
| [
"# Copyright (C) 2014 Robby Zeitfuchs (@robbyFux)\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"from lib.cuckoo.common.abstracts import Signature\n",
"\n",
"class ZeusMutexes(Signature):\n",
" name = \"banker_zeus_mutex\"\n",
" description = \"Creates Zeus (Banking Trojan) mutexes\"\n",
" severity = 3\n",
" categories = [\"banker\"]\n",
" families = [\"zeus\"]\n",
" authors = [\"Robby Zeitfuchs\", \"KillerInstinct\"]\n",
" minimum = \"1.2\"\n",
" references = [\"https://malwr.com/analysis/NmNhODg5ZWRkYjc0NDY0M2I3YTJhNDRlM2FlOTZiMjA/#summary_mutexes\",\n",
" \"https://malwr.com/analysis/MmMwNDJlMTI0MTNkNGFjNmE0OGY3Y2I5MjhiMGI1NzI/#summary_mutexes\",\n",
" \"https://malwr.com/analysis/MzY5ZTM2NzZhMzI3NDY2YjgzMjJiODFkODZkYzIwYmQ/#summary_mutexes\",\n",
" \"https://www.virustotal.com/de/file/301fcadf53e6a6167e559c84d6426960af8626d12b2e25aa41de6dce511d0568/analysis/#behavioural-info\",\n",
" \"https://www.virustotal.com/de/file/d3cf49a7ac726ee27eae9d29dee648e34cb3e8fd9d494e1b347209677d62cdf9/analysis/#behavioural-info\",\n",
" \"https://www.virustotal.com/de/file/d3cf49a7ac726ee27eae9d29dee648e34cb3e8fd9d494e1b347209677d62cdf9/analysis/#behavioural-info\",\n",
" \"https://www.virustotal.com/de/file/301fcadf53e6a6167e559c84d6426960af8626d12b2e25aa41de6dce511d0568/analysis/#behavioural-info\"]\n",
"\n",
" def run(self):\n",
" indicators = [\n",
" \"_AVIRA_.*\",\n",
" \"__SYSTEM__.*\",\n",
" \"_LILO_.*\",\n",
" \"_SOSI_.*\",\n",
" \".*MSIdent Logon\",\n",
" \".*MPSWabDataAccessMutex\",\n",
" \".*MPSWABOlkStoreNotifyMutex\"\n",
" ]\n",
"\n",
" for indicator in indicators:\n",
" match = self.check_mutex(pattern=indicator, regex=True)\n",
" if match:\n",
" self.data.append({\"mutex\": match})\n",
" return True\n",
"\n",
" indicator = r\"(Local|Global)\\\\\\{[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}\\}\"\n",
" matches = self.check_mutex(pattern=indicator, regex=True, all=True)\n",
" if matches and len(matches) > 10:\n",
" return True\n",
"\n",
" return False\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0.009174311926605505,
0.009174311926605505,
0.006756756756756757,
0.006756756756756757,
0.006756756756756757,
0.006756756756756757,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0,
0
] | 56 | 0.001741 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("System.Collections")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
class StatelessCoarseUniverseSelectionBenchmark(QCAlgorithm):
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Daily
self.SetStartDate(2017, 11, 1)
self.SetEndDate(2018, 1, 1)
self.SetCash(50000)
self.AddUniverse(self.CoarseSelectionFunction)
self.numberOfSymbols = 250
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
selected = [x for x in coarse if (x.HasFundamentalData)]
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(selected, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.numberOfSymbols] ]
def OnSecuritiesChanged(self, changes):
# if we have no changes, do nothing
if changes is None: return
# liquidate removed securities
for security in changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
for security in changes.AddedSecurities:
self.SetHoldings(security.Symbol, 0.001) | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System.Core\")\n",
"AddReference(\"System.Collections\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import QCAlgorithm\n",
"from QuantConnect.Data.UniverseSelection import *\n",
"\n",
"\n",
"class StatelessCoarseUniverseSelectionBenchmark(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" self.UniverseSettings.Resolution = Resolution.Daily\n",
"\n",
" self.SetStartDate(2017, 11, 1)\n",
" self.SetEndDate(2018, 1, 1)\n",
" self.SetCash(50000)\n",
"\n",
" self.AddUniverse(self.CoarseSelectionFunction)\n",
" self.numberOfSymbols = 250\n",
"\n",
" # sort the data by daily dollar volume and take the top 'NumberOfSymbols'\n",
" def CoarseSelectionFunction(self, coarse):\n",
"\n",
" selected = [x for x in coarse if (x.HasFundamentalData)]\n",
" # sort descending by daily dollar volume\n",
" sortedByDollarVolume = sorted(selected, key=lambda x: x.DollarVolume, reverse=True)\n",
"\n",
" # return the symbol objects of the top entries from our sorted collection\n",
" return [ x.Symbol for x in sortedByDollarVolume[:self.numberOfSymbols] ]\n",
"\n",
" def OnSecuritiesChanged(self, changes):\n",
" # if we have no changes, do nothing\n",
" if changes is None: return\n",
"\n",
" # liquidate removed securities\n",
" for security in changes.RemovedSecurities:\n",
" if security.Invested:\n",
" self.Liquidate(security.Symbol)\n",
"\n",
" for security in changes.AddedSecurities:\n",
" self.SetHoldings(security.Symbol, 0.001)"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02127659574468085,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0.012195121951219513,
0.037037037037037035,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 58 | 0.004447 | false |
''' Lib for Plugin
Authors:
Andrey Kvichansky (kvichans on github.com)
Version:
'1.0.3 2018-02-01'
Content
log Logger with timing
get_translation i18n
dlg_wrapper Wrapper for dlg_custom: pack/unpack values, h-align controls
ToDo: (see end of file)
'''
import sys, os, gettext, logging, inspect, re, subprocess
from time import perf_counter
import cudatext as app
import cudax_lib as apx
pass; # Logging
pass; from pprint import pformat
GAP = 5
c13,c10,c9 = chr(13),chr(10),chr(9)
REDUCTS = {'lb' :'label'
, 'ln-lb' :'linklabel'
, 'ed' :'edit'
, 'sp-ed' :'spinedit'
, 'me' :'memo'
, 'bt' :'button'
, 'rd' :'radio'
, 'ch' :'check'
, 'ch-bt' :'checkbutton'
, 'ch-gp' :'checkgroup'
, 'rd-gp' :'radiogroup'
, 'cb' :'combo'
, 'cb-ro' :'combo_ro'
, 'lbx' :'listbox'
, 'ch-lbx' :'checklistbox'
, 'lvw' :'listview'
, 'ch-lvw' :'checklistview'
, 'tabs' :'tabs'
}
def f(s, *args, **kwargs):return s.format(*args, **kwargs)
def log(msg='', *args, **kwargs):
if args or kwargs:
msg = msg.format(*args, **kwargs)
if Tr.tr is None:
Tr.tr=Tr()
return Tr.tr.log(msg)
class Tr :
tr=None
""" Трассировщик.
Основной (единственный) метод: log(строка) - выводит указанную строку в лог.
Управляется через команды в строках для вывода.
Команды:
>> Увеличить сдвиг при выводе будущих строк (пока жив возвращенный объект)
(:) Начать замер нового вложенного периода, закончить когда умрет возвращенный объект
(== Начать замер нового вложенного периода
==> Вывести длительность последнего периода
==) Вывести длительность последнего периода и закончить его замер
=}} Отменить все замеры
Вызов log с командой >> (увеличить сдвиг) возвращает объект,
который при уничтожении уменьшит сдвиг
"""
sec_digs = 2 # Точность отображения секунд, кол-во дробных знаков
se_fmt = ''
mise_fmt = ''
homise_fmt = ''
def __init__(self, log_to_file=None) :
# Поля объекта
self.gap = '' # Отступ
self.tm = perf_counter() # Отметка времени о запуске
self.stms = [] # Отметки времени о начале замера спец.периода
if log_to_file:
logging.basicConfig( filename=log_to_file
,filemode='w'
,level=logging.DEBUG
,format='%(message)s'
,datefmt='%H:%M:%S'
,style='%')
else: # to stdout
logging.basicConfig( stream=sys.stdout
,level=logging.DEBUG
,format='%(message)s'
,datefmt='%H:%M:%S'
,style='%')
# Tr()
def __del__(self):
logging.shutdown()
class TrLiver :
cnt = 0
""" Автоматически сокращает gap при уничножении
Показывает время своей жизни"""
def __init__(self, tr, ops) :
# Поля объекта
self.tr = tr
self.ops= ops
self.tm = 0
self.nm = Tr.TrLiver.cnt
if '(:)' in self.ops :
# Начать замер нового интервала
self.tm = perf_counter()
def log(self, msg='') :
if '(:)' in self.ops :
msg = '{}(:)=[{}]{}'.format( self.nm, Tr.format_tm( perf_counter() - self.tm ), msg )
logging.debug( self.tr.format_msg(msg, ops='') )
def __del__(self) :
#pass; logging.debug('in del')
if '(:)' in self.ops :
msg = '{}(:)=[{}]'.format( self.nm, Tr.format_tm( perf_counter() - self.tm ) )
logging.debug( self.tr.format_msg(msg, ops='') )
if '>>' in self.ops :
self.tr.gap = self.tr.gap[:-1]
def log(self, msg='') :
if '(:)' in msg :
Tr.TrLiver.cnt += 1
msg = msg.replace( '(:)', '{}(:)'.format(Tr.TrLiver.cnt) )
logging.debug( self.format_msg(msg) )
if '>>' in msg :
self.gap = self.gap + c9
# Создаем объект, который при разрушении сократит gap
if '>>' in msg or '(:)' in msg:
return Tr.TrLiver(self,('>>' if '>>' in msg else '')+('(:)' if '(:)' in msg else ''))
# return Tr.TrLiver(self,iif('>>' in msg,'>>','')+iif('(:)' in msg,'(:)',''))
else :
return self
# Tr.log
# def format_msg(self, msg, dpth=2, ops='+fun:ln +wait==') :
def format_msg(self, msg, dpth=3, ops='+fun:ln +wait==') :
if '(==' in msg :
# Начать замер нового интервала
self.stms = self.stms + [perf_counter()]
msg = msg.replace( '(==', '(==[' + Tr.format_tm(0) + ']' )
if '+fun:ln' in ops :
frCaller= inspect.stack()[dpth] # 0-format_msg, 1-Tr.log|Tr.TrLiver, 2-log, 3-need func
try:
cls = frCaller[0].f_locals['self'].__class__.__name__ + '.'
except:
cls = ''
fun = (cls + frCaller[3]).replace('.__init__','()')
ln = frCaller[2]
msg = '[{}]{}{}:{} '.format( Tr.format_tm( perf_counter() - self.tm ), self.gap, fun, ln ) + msg
else :
msg = '[{}]{}'.format( Tr.format_tm( perf_counter() - self.tm ), self.gap ) + msg
if '+wait==' in ops :
if ( '==)' in msg or '==>' in msg ) and len(self.stms)>0 :
# Закончить/продолжить замер последнего интервала и вывести его длительность
sign = '==)' if '==)' in msg else '==>'
# sign = icase( '==)' in msg, '==)', '==>' )
stm = '[{}]'.format( Tr.format_tm( perf_counter() - self.stms[-1] ) )
msg = msg.replace( sign, sign+stm )
if '==)' in msg :
del self.stms[-1]
if '=}}' in msg :
# Отменить все замеры
self.stms = []
return msg.replace('¬',c9).replace('¶',c10)
# Tr.format
@staticmethod
def format_tm(secs) :
""" Конвертация количества секунд в 12h34'56.78" """
if 0==len(Tr.se_fmt) :
Tr.se_fmt = '{:'+str(3+Tr.sec_digs)+'.'+str(Tr.sec_digs)+'f}"'
Tr.mise_fmt = "{:2d}'"+Tr.se_fmt
Tr.homise_fmt = "{:2d}h"+Tr.mise_fmt
h = int( secs / 3600 )
secs = secs % 3600
m = int( secs / 60 )
s = secs % 60
return Tr.se_fmt.format(s) \
if 0==h+m else \
Tr.mise_fmt.format(m,s) \
if 0==h else \
Tr.homise_fmt.format(h,m,s)
# return icase( 0==h+m, Tr.se_fmt.format(s)
# , 0==h, Tr.mise_fmt.format(m,s)
# , Tr.homise_fmt.format(h,m,s) )
# Tr.format_tm
# Tr
def get_translation(plug_file):
''' Part of i18n.
Full i18n-cycle:
1. All GUI-string in code are used in form
_('')
2. These string are extracted from code to
lang/messages.pot
with run
python.exe <python-root>\Tools\i18n\pygettext.py -p lang <plugin>.py
3. Poedit (or same program) create
<module>\lang\ru_RU\LC_MESSAGES\<module>.po
from (cmd "Update from POT")
lang/messages.pot
It allows to translate all "strings"
It creates (cmd "Save")
<module>\lang\ru_RU\LC_MESSAGES\<module>.mo
4. <module>.mo can be placed also in dir
CudaText\data\langpy\ru_RU\LC_MESSAGES\<module>.mo
The dir is used first.
5. get_translation uses the file to realize
_('')
'''
lng = app.app_proc(app.PROC_GET_LANG, '')
plug_dir= os.path.dirname(plug_file)
plug_mod= os.path.basename(plug_dir)
lng_dirs= [
app.app_path(app.APP_DIR_DATA) +os.sep+'langpy',
plug_dir +os.sep+'lang',
]
_ = lambda x: x
pass; #return _
for lng_dir in lng_dirs:
lng_mo = lng_dir+'/{}/LC_MESSAGES/{}.mo'.format(lng, plug_mod)
if os.path.isfile(lng_mo):
t = gettext.translation(plug_mod, lng_dir, languages=[lng])
_ = t.gettext
t.install()
break
return _
#def get_translation
def get_desktop_environment():
#From http://stackoverflow.com/questions/2035657/what-is-my-current-desktop-environment
# and http://ubuntuforums.org/showthread.php?t=652320
# and http://ubuntuforums.org/showthread.php?t=652320
# and http://ubuntuforums.org/showthread.php?t=1139057
if sys.platform in ["win32", "cygwin"]:
return "win"
elif sys.platform == "darwin":
return "mac"
else: #Most likely either a POSIX system or something not much common
desktop_session = os.environ.get("DESKTOP_SESSION")
if desktop_session is not None: #easier to match if we doesn't have to deal with character cases
desktop_session = desktop_session.lower()
if desktop_session in ["gnome","unity", "cinnamon", "mate", "xfce4", "lxde", "fluxbox",
"blackbox", "openbox", "icewm", "jwm", "afterstep","trinity", "kde"]:
return desktop_session
## Special cases ##
# Canonical sets $DESKTOP_SESSION to Lubuntu rather than LXDE if using LXDE.
# There is no guarantee that they will not do the same with the other desktop environments.
elif "xfce" in desktop_session or desktop_session.startswith("xubuntu"):
return "xfce4"
elif desktop_session.startswith("ubuntu"):
return "unity"
elif desktop_session.startswith("lubuntu"):
return "lxde"
elif desktop_session.startswith("kubuntu"):
return "kde"
elif desktop_session.startswith("razor"): # e.g. razorkwin
return "razor-qt"
elif desktop_session.startswith("wmaker"): # e.g. wmaker-common
return "windowmaker"
if os.environ.get('KDE_FULL_SESSION') == 'true':
return "kde"
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
if not "deprecated" in os.environ.get('GNOME_DESKTOP_SESSION_ID'):
return "gnome2"
#From http://ubuntuforums.org/showthread.php?t=652320
elif is_running("xfce-mcs-manage"):
return "xfce4"
elif is_running("ksmserver"):
return "kde"
return "unknown"
def is_running(process):
#From http://www.bloggerpolis.com/2011/05/how-to-check-if-a-process-is-running-using-python/
# and http://richarddingwall.name/2009/06/18/windows-equivalents-of-ps-and-kill-commands/
try: #Linux/Unix
s = subprocess.Popen(["ps", "axw"],stdout=subprocess.PIPE)
except: #Windows
s = subprocess.Popen(["tasklist", "/v"],stdout=subprocess.PIPE)
for x in s.stdout:
if re.search(process, str(x)):
return True
return False
ENV2FITS= {'win':
{'check' :-2
,'edit' :-3
,'button' :-4
,'combo_ro' :-4
,'combo' :-3
,'checkbutton':-4
,'linklabel' : 0
,'spinedit' :-3
}
,'unity':
{'check' :-3
,'edit' :-5
,'button' :-4
,'combo_ro' :-5
,'combo' :-6
,'checkbutton':-3
,'linklabel' : 0
,'spinedit' :-5
}
,'mac':
{'check' :-1
,'edit' :-3
,'button' :-3
,'combo_ro' :-2
,'combo' :-3
,'checkbutton':-2
,'linklabel' : 0
,'spinedit' : 0 ##??
}
}
fit_top_by_env__cash = {}
def fit_top_by_env__clear():
global fit_top_by_env__cash
fit_top_by_env__cash = {}
def fit_top_by_env(what_tp, base_tp='label'):
""" Get "fitting" to add to top of first control to vertical align inside text with text into second control.
The fittings rely to platform: win, unix(kde,gnome,...), mac
"""
if what_tp==base_tp:
return 0
if (what_tp, base_tp) in fit_top_by_env__cash:
pass; #log('cashed what_tp, base_tp={}',(what_tp, base_tp))
return fit_top_by_env__cash[(what_tp, base_tp)]
env = get_desktop_environment()
pass; #env = 'mac'
fit4lb = ENV2FITS.get(env, ENV2FITS.get('win'))
fit = 0
if base_tp=='label':
fit = apx.get_opt('dlg_wrapper_fit_va_for_'+what_tp, fit4lb.get(what_tp, 0))
else:
fit = fit_top_by_env(what_tp) - fit_top_by_env(base_tp)
pass; #log('what_tp, base_tp, fit={}',(what_tp, base_tp, fit))
return fit_top_by_env__cash.setdefault((what_tp, base_tp), fit)
#def fit_top_by_env
def dlg_wrapper(title, w, h, cnts, in_vals={}, focus_cid=None):
""" Wrapper for dlg_custom.
Params
title, w, h Title, Width, Height
cnts List of static control properties
[{cid:'*', tp:'*', t:1,l:1,w:1,r:1,b;1,h:1, cap:'*', hint:'*', en:'0', props:'*', items:[*], valign_to:'cid'}]
cid (opt)(str) C(ontrol)id. Need only for buttons and conrols with value (and for tid)
tp (str) Control types from wiki or short names
t (opt)(int) Top
tid (opt)(str) Ref to other control cid for horz-align text in both controls
l (int) Left
r,b,w,h (opt)(int) Position. w>>>r=l+w, h>>>b=t+h, b can be omitted
cap (str) Caption for labels and buttons
hint (opt)(str) Tooltip
en (opt)('0'|'1'|True|False) Enabled-state
props (opt)(str) See wiki
act (opt)('0'|'1'|True|False) Will close dlg when changed
items (str|list) String as in wiki. List structure by types:
[v1,v2,] For combo, combo_ro, listbox, checkgroup, radiogroup, checklistbox
(head, body) For listview, checklistview
head [(cap,width),(cap,width),]
body [[r0c0,r0c1,],[r1c0,r1c1,],[r2c0,r2c1,],]
in_vals Dict of start values for some controls
{'cid':val}
focus (opt) Control cid for start focus
Return
btn_cid Clicked/changed control cid
{'cid':val} Dict of new values for the same (as in_vals) controls
Format of values is same too.
[cid] List of controls with changed values
Short names for types
lb label
ln-lb linklabel
ed edit
sp-ed spinedit
me memo
bt button
rd radio
ch check
ch-bt checkbutton
ch-gp checkgroup
rd-gp radiogroup
cb combo
cb-ro combo_ro
lbx listbox
ch-lbx checklistbox
lvw listview
ch-lvw checklistview
Example.
def ask_number(ask, def_val):
cnts=[dict( tp='lb',tid='v',l=3 ,w=70,cap=ask)
,dict(cid='v',tp='ed',t=3 ,l=73,w=70)
,dict(cid='!',tp='bt',t=45 ,l=3 ,w=70,cap=_('OK'),props='1')
,dict(cid='-',tp='bt',t=45 ,l=73,w=70,cap=_('Cancel'))]
vals={'v':def_val}
while True:
btn,vals=dlg_wrapper('Example',146,75,cnts,vals,'v')
if btn is None or btn=='-': return def_val
if not re.match(r'\d+$', vals['v']): continue
return vals['v']
"""
pass; #log('in_vals={}',pformat(in_vals, width=120))
cid2i = {cnt['cid']:i for i,cnt in enumerate(cnts) if 'cid' in cnt}
if True:
# Checks
no_tids = {cnt['tid'] for cnt in cnts if 'tid' in cnt and cnt['tid'] not in cid2i}
if no_tids:
raise Exception(f('No cid(s) for tid(s): {}', no_tids))
no_vids = {cid for cid in in_vals if cid not in cid2i}
if no_vids:
raise Exception(f('No cid(s) for vals: {}', no_vids))
ctrls_l = []
for cnt in cnts:
tp = cnt['tp']
tp = REDUCTS.get(tp, tp)
if tp=='--':
# Horz-line
t = cnt.get('t')
l = cnt.get('l', 0) # def: from DlgLeft
r = cnt.get('r', l+cnt.get('w', w)) # def: to DlgRight
lst = ['type=label']
lst+= ['cap='+'—'*1000]
lst+= ['en=0']
lst+= ['pos={l},{t},{r},0'.format(l=l,t=t,r=r)]
ctrls_l+= [chr(1).join(lst)]
continue#for cnt
lst = ['type='+tp]
# Simple props
for k in ['cap', 'hint', 'props']:
if k in cnt:
lst += [k+'='+str(cnt[k])]
# Props with preparation
# Position:
# t[op] or tid, l[eft] required
# w[idth] >>> r[ight ]=l+w
# h[eight] >>> b[ottom]=t+h
# b dont need for buttons, edit, labels
l = cnt['l']
t = cnt.get('t', 0)
if 'tid' in cnt:
# cid for horz-align text
bs_cnt = cnts[cid2i[cnt['tid']]]
bs_tp = bs_cnt['tp']
t = bs_cnt['t'] + fit_top_by_env(tp, REDUCTS.get(bs_tp, bs_tp))
# t = bs_cnt['t'] + top_plus_for_os(tp, REDUCTS.get(bs_tp, bs_tp))
r = cnt.get('r', l+cnt.get('w', 0))
b = cnt.get('b', t+cnt.get('h', 0))
lst += ['pos={l},{t},{r},{b}'.format(l=l,t=t,r=r,b=b)]
if 'en' in cnt:
val = cnt['en']
lst += ['en='+('1' if val in [True, '1'] else '0')]
if 'items' in cnt:
items = cnt['items']
if isinstance(items, str):
pass
elif tp in ['listview', 'checklistview']:
# For listview, checklistview: "\t"-separated items.
# first item is column headers: title1+"="+size1 + "\r" + title2+"="+size2 + "\r" +...
# other items are data: cell1+"\r"+cell2+"\r"+...
# ([(hd,wd)], [[cells],[cells],])
items = '\t'.join(['\r'.join(['='.join((hd,sz)) for hd,sz in items[0]])]
+['\r'.join(row) for row in items[1]]
)
else:
# For combo, combo_ro, listbox, checkgroup, radiogroup, checklistbox: "\t"-separated lines
items = '\t'.join(items)
lst+= ['items='+items]
# Prepare val
if cnt.get('cid') in in_vals:
in_val = in_vals[cnt['cid']]
if False:pass
elif tp in ['check', 'radio', 'checkbutton'] and isinstance(in_val, bool):
# For check, radio, checkbutton: value "0"/"1"
in_val = '1' if in_val else '0'
elif tp=='memo':
# For memo: "\t"-separated lines (in lines "\t" must be replaced to chr(2))
if isinstance(in_val, list):
in_val = '\t'.join([v.replace('\t', chr(2)) for v in in_val])
else:
in_val = in_val.replace('\t', chr(2)).replace('\r\n','\n').replace('\r','\n').replace('\n','\t')
elif tp=='checkgroup' and isinstance(in_val, list):
# For checkgroup: ","-separated checks (values "0"/"1")
in_val = ','.join(in_val)
elif tp in ['checklistbox', 'checklistview'] and isinstance(in_val, tuple):
# For checklistbox, checklistview: index+";"+checks
in_val = ';'.join( (str(in_val[0]), ','.join( in_val[1]) ) )
lst+= ['val='+str(in_val)]
if 'act' in cnt: # must be last in lst
val = cnt['act']
lst += ['act='+('1' if val in [True, '1'] else '0')]
pass; #log('lst={}',lst)
ctrls_l+= [chr(1).join(lst)]
#for cnt
pass; #log('ok ctrls_l={}',pformat(ctrls_l, width=120))
ans = app.dlg_custom(title, w, h, '\n'.join(ctrls_l), cid2i.get(focus_cid, -1))
if ans is None: return None, None, None # btn_cid, {cid:v}, [cid]
btn_i, \
vals_ls = ans[0], ans[1].splitlines()
aid = cnts[btn_i]['cid']
# Parse output values
an_vals = {cid:vals_ls[cid2i[cid]] for cid in in_vals}
for cid in an_vals:
cnt = cnts[cid2i[cid]]
tp = cnt['tp']
tp = REDUCTS.get(tp, tp)
in_val = in_vals[cid]
an_val = an_vals[cid]
if False:pass
elif tp=='memo':
# For memo: "\t"-separated lines (in lines "\t" must be replaced to chr(2))
if isinstance(in_val, list):
an_val = [v.replace(chr(2), '\t') for v in an_val.split('\t')]
#in_val = '\t'.join([v.replace('\t', chr(2)) for v in in_val])
else:
an_val = an_val.replace('\t','\n').replace(chr(2), '\t')
#in_val = in_val.replace('\t', chr(2)).replace('\r\n','\n').replace('\r','\n').replace('\n','\t')
elif tp=='checkgroup' and isinstance(in_val, list):
# For checkgroup: ","-separated checks (values "0"/"1")
an_val = an_val.split(',')
#in_val = ','.join(in_val)
elif tp in ['checklistbox', 'checklistview'] and isinstance(in_val, tuple):
an_val = an_val.split(';')
an_val = (an_val[0], an_val[1].split(','))
#in_val = ';'.join(in_val[0], ','.join(in_val[1]))
elif isinstance(in_val, bool):
an_val = an_val=='1'
elif tp=='listview':
an_val = -1 if an_val=='' else int(an_val)
else:
an_val = type(in_val)(an_val)
an_vals[cid] = an_val
#for cid
return aid, an_vals, [cid for cid in in_vals if in_vals[cid]!=an_vals[cid]]
#def dlg_wrapper
def get_hotkeys_desc(cmd_id, ext_id=None, keys_js=None, def_ans=''):
""" Read one or two hotkeys for command
cmd_id [+ext_id]
from
settings\keys.json
Return
def_ans If no hotkeys for the command
'Ctrl+Q'
'Ctrl+Q * Ctrl+W' If one hotkey for the command
'Ctrl+Q/Ctrl+T'
'Ctrl+Q * Ctrl+W/Ctrl+T' If two hotkeys for the command
"""
if keys_js is None:
keys_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'keys.json'
keys_js = apx._json_loads(open(keys_json).read()) if os.path.exists(keys_json) else {}
cmd_id = f('{},{}', cmd_id, ext_id) if ext_id else cmd_id
if cmd_id not in keys_js:
return def_ans
cmd_keys= keys_js[cmd_id]
desc = '/'.join([' * '.join(cmd_keys.get('s1', []))
,' * '.join(cmd_keys.get('s2', []))
]).strip('/')
return desc
#def get_hotkeys_desc
if __name__ == '__main__' : # Tests
def test_ask_number(ask, def_val):
cnts=[dict( tp='lb',tid='v',l=3 ,w=70,cap=ask)
,dict(cid='v',tp='ed',t=3 ,l=73,w=70)
,dict(cid='!',tp='bt',t=45 ,l=3 ,w=70,cap=_('OK'),props='1')
,dict(cid='-',tp='bt',t=45 ,l=73,w=70,cap=_('Cancel'))]
vals={'v':def_val}
while True:
btn,vals=dlg_wrapper('Example',146,75,cnts,vals,'v')
if btn is None or btn=='-': return def_val
if not re.match(r'\d+$', vals['v']): continue
return vals['v']
ask_number('ask_____________', '____smth')
| [
"''' Lib for Plugin\n",
"Authors:\n",
" Andrey Kvichansky (kvichans on github.com)\n",
"Version:\n",
" '1.0.3 2018-02-01'\n",
"Content\n",
" log Logger with timing\n",
" get_translation i18n\n",
" dlg_wrapper Wrapper for dlg_custom: pack/unpack values, h-align controls\n",
"ToDo: (see end of file)\n",
"'''\n",
"\n",
"import sys, os, gettext, logging, inspect, re, subprocess\n",
"from time import perf_counter\n",
"import cudatext as app\n",
"import cudax_lib as apx\n",
"\n",
"pass; # Logging\n",
"pass; from pprint import pformat\n",
"\n",
"GAP = 5\n",
"c13,c10,c9 = chr(13),chr(10),chr(9)\n",
"REDUCTS = {'lb' :'label'\n",
" , 'ln-lb' :'linklabel'\n",
" , 'ed' :'edit'\n",
" , 'sp-ed' :'spinedit'\n",
" , 'me' :'memo'\n",
" , 'bt' :'button'\n",
" , 'rd' :'radio'\n",
" , 'ch' :'check'\n",
" , 'ch-bt' :'checkbutton'\n",
" , 'ch-gp' :'checkgroup'\n",
" , 'rd-gp' :'radiogroup'\n",
" , 'cb' :'combo'\n",
" , 'cb-ro' :'combo_ro'\n",
" , 'lbx' :'listbox'\n",
" , 'ch-lbx' :'checklistbox'\n",
" , 'lvw' :'listview'\n",
" , 'ch-lvw' :'checklistview'\n",
" , 'tabs' :'tabs'\n",
" }\n",
"\n",
"def f(s, *args, **kwargs):return s.format(*args, **kwargs)\n",
"\n",
"def log(msg='', *args, **kwargs):\n",
" if args or kwargs:\n",
" msg = msg.format(*args, **kwargs)\n",
" if Tr.tr is None:\n",
" Tr.tr=Tr()\n",
" return Tr.tr.log(msg)\n",
" \n",
"class Tr :\n",
" tr=None\n",
" \"\"\" Трассировщик.\n",
" Основной (единственный) метод: log(строка) - выводит указанную строку в лог.\n",
" Управляется через команды в строках для вывода.\n",
" Команды:\n",
" >> Увеличить сдвиг при выводе будущих строк (пока жив возвращенный объект) \n",
" (:) Начать замер нового вложенного периода, закончить когда умрет возвращенный объект \n",
" (== Начать замер нового вложенного периода \n",
" ==> Вывести длительность последнего периода \n",
" ==) Вывести длительность последнего периода и закончить его замер\n",
" =}} Отменить все замеры\n",
" Вызов log с командой >> (увеличить сдвиг) возвращает объект, \n",
" который при уничтожении уменьшит сдвиг \n",
" \"\"\"\n",
" sec_digs = 2 # Точность отображения секунд, кол-во дробных знаков\n",
" se_fmt = ''\n",
" mise_fmt = ''\n",
" homise_fmt = ''\n",
" def __init__(self, log_to_file=None) :\n",
" # Поля объекта\n",
" self.gap = '' # Отступ\n",
" self.tm = perf_counter() # Отметка времени о запуске\n",
" self.stms = [] # Отметки времени о начале замера спец.периода\n",
"\n",
" if log_to_file:\n",
" logging.basicConfig( filename=log_to_file\n",
" ,filemode='w'\n",
" ,level=logging.DEBUG\n",
" ,format='%(message)s'\n",
" ,datefmt='%H:%M:%S'\n",
" ,style='%')\n",
" else: # to stdout\n",
" logging.basicConfig( stream=sys.stdout\n",
" ,level=logging.DEBUG\n",
" ,format='%(message)s'\n",
" ,datefmt='%H:%M:%S'\n",
" ,style='%')\n",
" # Tr()\n",
" def __del__(self):\n",
" logging.shutdown()\n",
"\n",
" class TrLiver :\n",
" cnt = 0\n",
" \"\"\" Автоматически сокращает gap при уничножении \n",
" Показывает время своей жизни\"\"\"\n",
" def __init__(self, tr, ops) :\n",
" # Поля объекта\n",
" self.tr = tr\n",
" self.ops= ops\n",
" self.tm = 0\n",
" self.nm = Tr.TrLiver.cnt\n",
" if '(:)' in self.ops :\n",
" # Начать замер нового интервала\n",
" self.tm = perf_counter()\n",
" def log(self, msg='') :\n",
" if '(:)' in self.ops :\n",
" msg = '{}(:)=[{}]{}'.format( self.nm, Tr.format_tm( perf_counter() - self.tm ), msg ) \n",
" logging.debug( self.tr.format_msg(msg, ops='') )\n",
" def __del__(self) :\n",
" #pass; logging.debug('in del')\n",
" if '(:)' in self.ops :\n",
" msg = '{}(:)=[{}]'.format( self.nm, Tr.format_tm( perf_counter() - self.tm ) ) \n",
" logging.debug( self.tr.format_msg(msg, ops='') )\n",
" if '>>' in self.ops :\n",
" self.tr.gap = self.tr.gap[:-1]\n",
" \n",
" def log(self, msg='') :\n",
" if '(:)' in msg :\n",
" Tr.TrLiver.cnt += 1\n",
" msg = msg.replace( '(:)', '{}(:)'.format(Tr.TrLiver.cnt) ) \n",
" logging.debug( self.format_msg(msg) )\n",
" if '>>' in msg :\n",
" self.gap = self.gap + c9\n",
" # Создаем объект, который при разрушении сократит gap\n",
" if '>>' in msg or '(:)' in msg:\n",
" return Tr.TrLiver(self,('>>' if '>>' in msg else '')+('(:)' if '(:)' in msg else ''))\n",
" # return Tr.TrLiver(self,iif('>>' in msg,'>>','')+iif('(:)' in msg,'(:)',''))\n",
" else :\n",
" return self \n",
" # Tr.log\n",
" \n",
"# def format_msg(self, msg, dpth=2, ops='+fun:ln +wait==') :\n",
" def format_msg(self, msg, dpth=3, ops='+fun:ln +wait==') :\n",
" if '(==' in msg :\n",
" # Начать замер нового интервала\n",
" self.stms = self.stms + [perf_counter()]\n",
" msg = msg.replace( '(==', '(==[' + Tr.format_tm(0) + ']' )\n",
"\n",
" if '+fun:ln' in ops :\n",
" frCaller= inspect.stack()[dpth] # 0-format_msg, 1-Tr.log|Tr.TrLiver, 2-log, 3-need func\n",
" try:\n",
" cls = frCaller[0].f_locals['self'].__class__.__name__ + '.'\n",
" except:\n",
" cls = ''\n",
" fun = (cls + frCaller[3]).replace('.__init__','()')\n",
" ln = frCaller[2]\n",
" msg = '[{}]{}{}:{} '.format( Tr.format_tm( perf_counter() - self.tm ), self.gap, fun, ln ) + msg\n",
" else : \n",
" msg = '[{}]{}'.format( Tr.format_tm( perf_counter() - self.tm ), self.gap ) + msg\n",
"\n",
" if '+wait==' in ops :\n",
" if ( '==)' in msg or '==>' in msg ) and len(self.stms)>0 :\n",
" # Закончить/продолжить замер последнего интервала и вывести его длительность\n",
" sign = '==)' if '==)' in msg else '==>'\n",
" # sign = icase( '==)' in msg, '==)', '==>' )\n",
" stm = '[{}]'.format( Tr.format_tm( perf_counter() - self.stms[-1] ) )\n",
" msg = msg.replace( sign, sign+stm )\n",
" if '==)' in msg :\n",
" del self.stms[-1] \n",
"\n",
" if '=}}' in msg :\n",
" # Отменить все замеры\n",
" self.stms = []\n",
" \n",
" return msg.replace('¬',c9).replace('¶',c10)\n",
" # Tr.format\n",
"\n",
" @staticmethod\n",
" def format_tm(secs) :\n",
" \"\"\" Конвертация количества секунд в 12h34'56.78\" \"\"\"\n",
" if 0==len(Tr.se_fmt) :\n",
" Tr.se_fmt = '{:'+str(3+Tr.sec_digs)+'.'+str(Tr.sec_digs)+'f}\"'\n",
" Tr.mise_fmt = \"{:2d}'\"+Tr.se_fmt\n",
" Tr.homise_fmt = \"{:2d}h\"+Tr.mise_fmt\n",
" h = int( secs / 3600 )\n",
" secs = secs % 3600\n",
" m = int( secs / 60 )\n",
" s = secs % 60\n",
" return Tr.se_fmt.format(s) \\\n",
" if 0==h+m else \\\n",
" Tr.mise_fmt.format(m,s) \\\n",
" if 0==h else \\\n",
" Tr.homise_fmt.format(h,m,s)\n",
" # return icase( 0==h+m, Tr.se_fmt.format(s)\n",
" # , 0==h, Tr.mise_fmt.format(m,s)\n",
" # , Tr.homise_fmt.format(h,m,s) )\n",
" # Tr.format_tm\n",
" # Tr\n",
"\n",
"def get_translation(plug_file):\n",
" ''' Part of i18n.\n",
" Full i18n-cycle:\n",
" 1. All GUI-string in code are used in form\n",
" _('')\n",
" 2. These string are extracted from code to\n",
" lang/messages.pot\n",
" with run\n",
" python.exe <python-root>\\Tools\\i18n\\pygettext.py -p lang <plugin>.py\n",
" 3. Poedit (or same program) create\n",
" <module>\\lang\\ru_RU\\LC_MESSAGES\\<module>.po\n",
" from (cmd \"Update from POT\")\n",
" lang/messages.pot\n",
" It allows to translate all \"strings\"\n",
" It creates (cmd \"Save\")\n",
" <module>\\lang\\ru_RU\\LC_MESSAGES\\<module>.mo\n",
" 4. <module>.mo can be placed also in dir\n",
" CudaText\\data\\langpy\\ru_RU\\LC_MESSAGES\\<module>.mo\n",
" The dir is used first.\n",
" 5. get_translation uses the file to realize\n",
" _('')\n",
" '''\n",
" lng = app.app_proc(app.PROC_GET_LANG, '')\n",
" plug_dir= os.path.dirname(plug_file)\n",
" plug_mod= os.path.basename(plug_dir)\n",
" lng_dirs= [\n",
" app.app_path(app.APP_DIR_DATA) +os.sep+'langpy',\n",
" plug_dir +os.sep+'lang',\n",
" ]\n",
" _ = lambda x: x\n",
" pass; #return _\n",
" for lng_dir in lng_dirs:\n",
" lng_mo = lng_dir+'/{}/LC_MESSAGES/{}.mo'.format(lng, plug_mod)\n",
" if os.path.isfile(lng_mo):\n",
" t = gettext.translation(plug_mod, lng_dir, languages=[lng])\n",
" _ = t.gettext\n",
" t.install()\n",
" break\n",
" return _\n",
" #def get_translation\n",
"\n",
"def get_desktop_environment():\n",
" #From http://stackoverflow.com/questions/2035657/what-is-my-current-desktop-environment\n",
" # and http://ubuntuforums.org/showthread.php?t=652320\n",
" # and http://ubuntuforums.org/showthread.php?t=652320\n",
" # and http://ubuntuforums.org/showthread.php?t=1139057\n",
" if sys.platform in [\"win32\", \"cygwin\"]:\n",
" return \"win\"\n",
" elif sys.platform == \"darwin\":\n",
" return \"mac\"\n",
" else: #Most likely either a POSIX system or something not much common\n",
" desktop_session = os.environ.get(\"DESKTOP_SESSION\")\n",
" if desktop_session is not None: #easier to match if we doesn't have to deal with character cases\n",
" desktop_session = desktop_session.lower()\n",
" if desktop_session in [\"gnome\",\"unity\", \"cinnamon\", \"mate\", \"xfce4\", \"lxde\", \"fluxbox\", \n",
" \"blackbox\", \"openbox\", \"icewm\", \"jwm\", \"afterstep\",\"trinity\", \"kde\"]:\n",
" return desktop_session\n",
" ## Special cases ##\n",
" # Canonical sets $DESKTOP_SESSION to Lubuntu rather than LXDE if using LXDE.\n",
" # There is no guarantee that they will not do the same with the other desktop environments.\n",
" elif \"xfce\" in desktop_session or desktop_session.startswith(\"xubuntu\"):\n",
" return \"xfce4\"\n",
" elif desktop_session.startswith(\"ubuntu\"):\n",
" return \"unity\" \n",
" elif desktop_session.startswith(\"lubuntu\"):\n",
" return \"lxde\" \n",
" elif desktop_session.startswith(\"kubuntu\"): \n",
" return \"kde\" \n",
" elif desktop_session.startswith(\"razor\"): # e.g. razorkwin\n",
" return \"razor-qt\"\n",
" elif desktop_session.startswith(\"wmaker\"): # e.g. wmaker-common\n",
" return \"windowmaker\"\n",
" if os.environ.get('KDE_FULL_SESSION') == 'true':\n",
" return \"kde\"\n",
" elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):\n",
" if not \"deprecated\" in os.environ.get('GNOME_DESKTOP_SESSION_ID'):\n",
" return \"gnome2\"\n",
" #From http://ubuntuforums.org/showthread.php?t=652320\n",
" elif is_running(\"xfce-mcs-manage\"):\n",
" return \"xfce4\"\n",
" elif is_running(\"ksmserver\"):\n",
" return \"kde\"\n",
" return \"unknown\"\n",
"def is_running(process):\n",
" #From http://www.bloggerpolis.com/2011/05/how-to-check-if-a-process-is-running-using-python/\n",
" # and http://richarddingwall.name/2009/06/18/windows-equivalents-of-ps-and-kill-commands/\n",
" try: #Linux/Unix\n",
" s = subprocess.Popen([\"ps\", \"axw\"],stdout=subprocess.PIPE)\n",
" except: #Windows\n",
" s = subprocess.Popen([\"tasklist\", \"/v\"],stdout=subprocess.PIPE)\n",
" for x in s.stdout:\n",
" if re.search(process, str(x)):\n",
" return True\n",
" return False\n",
"\n",
"ENV2FITS= {'win':\n",
" {'check' :-2\n",
" ,'edit' :-3\n",
" ,'button' :-4\n",
" ,'combo_ro' :-4\n",
" ,'combo' :-3\n",
" ,'checkbutton':-4\n",
" ,'linklabel' : 0\n",
" ,'spinedit' :-3\n",
" }\n",
" ,'unity':\n",
" {'check' :-3\n",
" ,'edit' :-5\n",
" ,'button' :-4\n",
" ,'combo_ro' :-5\n",
" ,'combo' :-6\n",
" ,'checkbutton':-3\n",
" ,'linklabel' : 0\n",
" ,'spinedit' :-5\n",
" }\n",
" ,'mac':\n",
" {'check' :-1\n",
" ,'edit' :-3\n",
" ,'button' :-3\n",
" ,'combo_ro' :-2\n",
" ,'combo' :-3\n",
" ,'checkbutton':-2\n",
" ,'linklabel' : 0\n",
" ,'spinedit' : 0 ##??\n",
" }\n",
" }\n",
"fit_top_by_env__cash = {}\n",
"def fit_top_by_env__clear():\n",
" global fit_top_by_env__cash\n",
" fit_top_by_env__cash = {}\n",
"def fit_top_by_env(what_tp, base_tp='label'):\n",
" \"\"\" Get \"fitting\" to add to top of first control to vertical align inside text with text into second control.\n",
" The fittings rely to platform: win, unix(kde,gnome,...), mac\n",
" \"\"\"\n",
" if what_tp==base_tp:\n",
" return 0\n",
" if (what_tp, base_tp) in fit_top_by_env__cash:\n",
" pass; #log('cashed what_tp, base_tp={}',(what_tp, base_tp))\n",
" return fit_top_by_env__cash[(what_tp, base_tp)]\n",
" env = get_desktop_environment()\n",
" pass; #env = 'mac'\n",
" fit4lb = ENV2FITS.get(env, ENV2FITS.get('win'))\n",
" fit = 0\n",
" if base_tp=='label':\n",
" fit = apx.get_opt('dlg_wrapper_fit_va_for_'+what_tp, fit4lb.get(what_tp, 0))\n",
" else:\n",
" fit = fit_top_by_env(what_tp) - fit_top_by_env(base_tp)\n",
" pass; #log('what_tp, base_tp, fit={}',(what_tp, base_tp, fit))\n",
" return fit_top_by_env__cash.setdefault((what_tp, base_tp), fit)\n",
" #def fit_top_by_env\n",
"\n",
"def dlg_wrapper(title, w, h, cnts, in_vals={}, focus_cid=None):\n",
" \"\"\" Wrapper for dlg_custom. \n",
" Params\n",
" title, w, h Title, Width, Height \n",
" cnts List of static control properties\n",
" [{cid:'*', tp:'*', t:1,l:1,w:1,r:1,b;1,h:1, cap:'*', hint:'*', en:'0', props:'*', items:[*], valign_to:'cid'}]\n",
" cid (opt)(str) C(ontrol)id. Need only for buttons and conrols with value (and for tid)\n",
" tp (str) Control types from wiki or short names\n",
" t (opt)(int) Top\n",
" tid (opt)(str) Ref to other control cid for horz-align text in both controls\n",
" l (int) Left\n",
" r,b,w,h (opt)(int) Position. w>>>r=l+w, h>>>b=t+h, b can be omitted\n",
" cap (str) Caption for labels and buttons\n",
" hint (opt)(str) Tooltip\n",
" en (opt)('0'|'1'|True|False) Enabled-state\n",
" props (opt)(str) See wiki\n",
" act (opt)('0'|'1'|True|False) Will close dlg when changed\n",
" items (str|list) String as in wiki. List structure by types:\n",
" [v1,v2,] For combo, combo_ro, listbox, checkgroup, radiogroup, checklistbox\n",
" (head, body) For listview, checklistview \n",
" head [(cap,width),(cap,width),]\n",
" body [[r0c0,r0c1,],[r1c0,r1c1,],[r2c0,r2c1,],]\n",
" in_vals Dict of start values for some controls \n",
" {'cid':val}\n",
" focus (opt) Control cid for start focus\n",
" Return\n",
" btn_cid Clicked/changed control cid\n",
" {'cid':val} Dict of new values for the same (as in_vals) controls\n",
" Format of values is same too.\n",
" [cid] List of controls with changed values\n",
" Short names for types\n",
" lb label\n",
" ln-lb linklabel\n",
" ed edit\n",
" sp-ed spinedit\n",
" me memo\n",
" bt button\n",
" rd radio\n",
" ch check\n",
" ch-bt checkbutton\n",
" ch-gp checkgroup\n",
" rd-gp radiogroup\n",
" cb combo\n",
" cb-ro combo_ro\n",
" lbx listbox\n",
" ch-lbx checklistbox\n",
" lvw listview\n",
" ch-lvw checklistview\n",
" Example.\n",
" def ask_number(ask, def_val):\n",
" cnts=[dict( tp='lb',tid='v',l=3 ,w=70,cap=ask)\n",
" ,dict(cid='v',tp='ed',t=3 ,l=73,w=70)\n",
" ,dict(cid='!',tp='bt',t=45 ,l=3 ,w=70,cap=_('OK'),props='1')\n",
" ,dict(cid='-',tp='bt',t=45 ,l=73,w=70,cap=_('Cancel'))]\n",
" vals={'v':def_val}\n",
" while True:\n",
" btn,vals=dlg_wrapper('Example',146,75,cnts,vals,'v')\n",
" if btn is None or btn=='-': return def_val\n",
" if not re.match(r'\\d+$', vals['v']): continue\n",
" return vals['v']\n",
" \"\"\"\n",
" pass; #log('in_vals={}',pformat(in_vals, width=120))\n",
" cid2i = {cnt['cid']:i for i,cnt in enumerate(cnts) if 'cid' in cnt}\n",
" if True:\n",
" # Checks\n",
" no_tids = {cnt['tid'] for cnt in cnts if 'tid' in cnt and cnt['tid'] not in cid2i}\n",
" if no_tids:\n",
" raise Exception(f('No cid(s) for tid(s): {}', no_tids))\n",
" no_vids = {cid for cid in in_vals if cid not in cid2i}\n",
" if no_vids:\n",
" raise Exception(f('No cid(s) for vals: {}', no_vids))\n",
" ctrls_l = []\n",
" for cnt in cnts:\n",
" tp = cnt['tp']\n",
" tp = REDUCTS.get(tp, tp)\n",
" if tp=='--':\n",
" # Horz-line\n",
" t = cnt.get('t')\n",
" l = cnt.get('l', 0) # def: from DlgLeft\n",
" r = cnt.get('r', l+cnt.get('w', w)) # def: to DlgRight\n",
" lst = ['type=label']\n",
" lst+= ['cap='+'—'*1000]\n",
" lst+= ['en=0']\n",
" lst+= ['pos={l},{t},{r},0'.format(l=l,t=t,r=r)]\n",
" ctrls_l+= [chr(1).join(lst)]\n",
" continue#for cnt\n",
" \n",
" lst = ['type='+tp]\n",
" # Simple props\n",
" for k in ['cap', 'hint', 'props']:\n",
" if k in cnt:\n",
" lst += [k+'='+str(cnt[k])]\n",
" # Props with preparation\n",
" # Position:\n",
" # t[op] or tid, l[eft] required\n",
" # w[idth] >>> r[ight ]=l+w\n",
" # h[eight] >>> b[ottom]=t+h\n",
" # b dont need for buttons, edit, labels\n",
" l = cnt['l']\n",
" t = cnt.get('t', 0)\n",
" if 'tid' in cnt:\n",
" # cid for horz-align text\n",
" bs_cnt = cnts[cid2i[cnt['tid']]]\n",
" bs_tp = bs_cnt['tp']\n",
" t = bs_cnt['t'] + fit_top_by_env(tp, REDUCTS.get(bs_tp, bs_tp))\n",
"# t = bs_cnt['t'] + top_plus_for_os(tp, REDUCTS.get(bs_tp, bs_tp))\n",
" r = cnt.get('r', l+cnt.get('w', 0)) \n",
" b = cnt.get('b', t+cnt.get('h', 0)) \n",
" lst += ['pos={l},{t},{r},{b}'.format(l=l,t=t,r=r,b=b)]\n",
" if 'en' in cnt:\n",
" val = cnt['en']\n",
" lst += ['en='+('1' if val in [True, '1'] else '0')]\n",
"\n",
" if 'items' in cnt:\n",
" items = cnt['items']\n",
" if isinstance(items, str):\n",
" pass\n",
" elif tp in ['listview', 'checklistview']:\n",
" # For listview, checklistview: \"\\t\"-separated items.\n",
" # first item is column headers: title1+\"=\"+size1 + \"\\r\" + title2+\"=\"+size2 + \"\\r\" +...\n",
" # other items are data: cell1+\"\\r\"+cell2+\"\\r\"+...\n",
" # ([(hd,wd)], [[cells],[cells],])\n",
" items = '\\t'.join(['\\r'.join(['='.join((hd,sz)) for hd,sz in items[0]])]\n",
" +['\\r'.join(row) for row in items[1]]\n",
" )\n",
" else:\n",
" # For combo, combo_ro, listbox, checkgroup, radiogroup, checklistbox: \"\\t\"-separated lines\n",
" items = '\\t'.join(items)\n",
" lst+= ['items='+items]\n",
" \n",
" # Prepare val\n",
" if cnt.get('cid') in in_vals:\n",
" in_val = in_vals[cnt['cid']]\n",
" if False:pass\n",
" elif tp in ['check', 'radio', 'checkbutton'] and isinstance(in_val, bool):\n",
" # For check, radio, checkbutton: value \"0\"/\"1\" \n",
" in_val = '1' if in_val else '0'\n",
" elif tp=='memo':\n",
" # For memo: \"\\t\"-separated lines (in lines \"\\t\" must be replaced to chr(2)) \n",
" if isinstance(in_val, list):\n",
" in_val = '\\t'.join([v.replace('\\t', chr(2)) for v in in_val])\n",
" else:\n",
" in_val = in_val.replace('\\t', chr(2)).replace('\\r\\n','\\n').replace('\\r','\\n').replace('\\n','\\t')\n",
" elif tp=='checkgroup' and isinstance(in_val, list):\n",
" # For checkgroup: \",\"-separated checks (values \"0\"/\"1\") \n",
" in_val = ','.join(in_val)\n",
" elif tp in ['checklistbox', 'checklistview'] and isinstance(in_val, tuple):\n",
" # For checklistbox, checklistview: index+\";\"+checks \n",
" in_val = ';'.join( (str(in_val[0]), ','.join( in_val[1]) ) )\n",
" lst+= ['val='+str(in_val)]\n",
"\n",
" if 'act' in cnt: # must be last in lst\n",
" val = cnt['act']\n",
" lst += ['act='+('1' if val in [True, '1'] else '0')]\n",
" pass; #log('lst={}',lst)\n",
" ctrls_l+= [chr(1).join(lst)]\n",
" #for cnt\n",
" pass; #log('ok ctrls_l={}',pformat(ctrls_l, width=120))\n",
" \n",
" ans = app.dlg_custom(title, w, h, '\\n'.join(ctrls_l), cid2i.get(focus_cid, -1))\n",
" if ans is None: return None, None, None # btn_cid, {cid:v}, [cid]\n",
"\n",
" btn_i, \\\n",
" vals_ls = ans[0], ans[1].splitlines()\n",
" aid = cnts[btn_i]['cid']\n",
" # Parse output values\n",
" an_vals = {cid:vals_ls[cid2i[cid]] for cid in in_vals}\n",
" for cid in an_vals:\n",
" cnt = cnts[cid2i[cid]]\n",
" tp = cnt['tp']\n",
" tp = REDUCTS.get(tp, tp)\n",
" in_val = in_vals[cid]\n",
" an_val = an_vals[cid]\n",
" if False:pass\n",
" elif tp=='memo':\n",
" # For memo: \"\\t\"-separated lines (in lines \"\\t\" must be replaced to chr(2)) \n",
" if isinstance(in_val, list):\n",
" an_val = [v.replace(chr(2), '\\t') for v in an_val.split('\\t')]\n",
" #in_val = '\\t'.join([v.replace('\\t', chr(2)) for v in in_val])\n",
" else:\n",
" an_val = an_val.replace('\\t','\\n').replace(chr(2), '\\t')\n",
" #in_val = in_val.replace('\\t', chr(2)).replace('\\r\\n','\\n').replace('\\r','\\n').replace('\\n','\\t')\n",
" elif tp=='checkgroup' and isinstance(in_val, list):\n",
" # For checkgroup: \",\"-separated checks (values \"0\"/\"1\") \n",
" an_val = an_val.split(',')\n",
" #in_val = ','.join(in_val)\n",
" elif tp in ['checklistbox', 'checklistview'] and isinstance(in_val, tuple):\n",
" an_val = an_val.split(';')\n",
" an_val = (an_val[0], an_val[1].split(','))\n",
" #in_val = ';'.join(in_val[0], ','.join(in_val[1]))\n",
" elif isinstance(in_val, bool): \n",
" an_val = an_val=='1'\n",
" elif tp=='listview':\n",
" an_val = -1 if an_val=='' else int(an_val)\n",
" else: \n",
" an_val = type(in_val)(an_val)\n",
" an_vals[cid] = an_val\n",
" #for cid\n",
" return aid, an_vals, [cid for cid in in_vals if in_vals[cid]!=an_vals[cid]]\n",
" #def dlg_wrapper\n",
"\n",
"def get_hotkeys_desc(cmd_id, ext_id=None, keys_js=None, def_ans=''):\n",
" \"\"\" Read one or two hotkeys for command \n",
" cmd_id [+ext_id]\n",
" from \n",
" settings\\keys.json\n",
" Return \n",
" def_ans If no hotkeys for the command\n",
" 'Ctrl+Q' \n",
" 'Ctrl+Q * Ctrl+W' If one hotkey for the command\n",
" 'Ctrl+Q/Ctrl+T' \n",
" 'Ctrl+Q * Ctrl+W/Ctrl+T' If two hotkeys for the command\n",
" \"\"\"\n",
" if keys_js is None:\n",
" keys_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'keys.json'\n",
" keys_js = apx._json_loads(open(keys_json).read()) if os.path.exists(keys_json) else {}\n",
"\n",
" cmd_id = f('{},{}', cmd_id, ext_id) if ext_id else cmd_id\n",
" if cmd_id not in keys_js:\n",
" return def_ans\n",
" cmd_keys= keys_js[cmd_id]\n",
" desc = '/'.join([' * '.join(cmd_keys.get('s1', []))\n",
" ,' * '.join(cmd_keys.get('s2', []))\n",
" ]).strip('/')\n",
" return desc\n",
" #def get_hotkeys_desc\n",
"\n",
"if __name__ == '__main__' : # Tests\n",
" def test_ask_number(ask, def_val):\n",
" cnts=[dict( tp='lb',tid='v',l=3 ,w=70,cap=ask)\n",
" ,dict(cid='v',tp='ed',t=3 ,l=73,w=70)\n",
" ,dict(cid='!',tp='bt',t=45 ,l=3 ,w=70,cap=_('OK'),props='1')\n",
" ,dict(cid='-',tp='bt',t=45 ,l=73,w=70,cap=_('Cancel'))]\n",
" vals={'v':def_val}\n",
" while True:\n",
" btn,vals=dlg_wrapper('Example',146,75,cnts,vals,'v')\n",
" if btn is None or btn=='-': return def_val\n",
" if not re.match(r'\\d+$', vals['v']): continue\n",
" return vals['v']\n",
" ask_number('ask_____________', '____smth')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.03389830508474576,
0.05,
0.06451612903225806,
0.06451612903225806,
0,
0.023809523809523808,
0.03389830508474576,
0,
0.0625,
0.13513513513513514,
0.10344827586206896,
0.12121212121212122,
0.14285714285714285,
0.125,
0.14285714285714285,
0.13333333333333333,
0.13793103448275862,
0.13793103448275862,
0.11428571428571428,
0.11764705882352941,
0.11764705882352941,
0.13793103448275862,
0.125,
0.12903225806451613,
0.1111111111111111,
0.125,
0.10810810810810811,
0.10714285714285714,
0.1,
0,
0.03389830508474576,
0,
0.029411764705882353,
0,
0,
0,
0.05263157894736842,
0,
0.2,
0.18181818181818182,
0.08333333333333333,
0,
0.011764705882352941,
0,
0,
0.02247191011235955,
0.020202020202020204,
0.017857142857142856,
0.017543859649122806,
0,
0,
0.014285714285714285,
0.019230769230769232,
0,
0.020618556701030927,
0.04,
0.04,
0.04,
0.046511627906976744,
0,
0.02040816326530612,
0.014705882352941176,
0.022988505747126436,
0,
0,
0.037037037037037035,
0.06521739130434782,
0.05660377358490566,
0.05555555555555555,
0.057692307692307696,
0.045454545454545456,
0.038461538461538464,
0.0392156862745098,
0.05660377358490566,
0.05555555555555555,
0.057692307692307696,
0.045454545454545456,
0,
0.043478260869565216,
0,
0,
0.05,
0,
0.017543859649122806,
0,
0.02631578947368421,
0,
0,
0.038461538461538464,
0,
0,
0.02857142857142857,
0,
0,
0.0625,
0.02857142857142857,
0.05825242718446602,
0.03076923076923077,
0.07142857142857142,
0.016666666666666666,
0.02857142857142857,
0.0625,
0.03076923076923077,
0.029411764705882353,
0,
0.058823529411764705,
0.03571428571428571,
0.038461538461538464,
0,
0.05194805194805195,
0.043478260869565216,
0.04,
0,
0,
0,
0.02040816326530612,
0.011111111111111112,
0.06666666666666667,
0.04,
0,
0.07692307692307693,
0,
0.015873015873015872,
0.038461538461538464,
0,
0.01818181818181818,
0.028169014084507043,
0,
0.03333333333333333,
0.03,
0,
0,
0.05,
0,
0.029411764705882353,
0.029411764705882353,
0.05309734513274336,
0.125,
0.061224489795918366,
0,
0.03333333333333333,
0.056338028169014086,
0.010752688172043012,
0.01694915254237288,
0,
0.05813953488372093,
0.038461538461538464,
0.029411764705882353,
0.02564102564102564,
0,
0.03333333333333333,
0,
0.030303030303030304,
0.058823529411764705,
0.038461538461538464,
0,
0,
0,
0.038461538461538464,
0,
0.06451612903225806,
0.024691358024691357,
0.02040816326530612,
0.0196078431372549,
0.06451612903225806,
0,
0.06896551724137931,
0,
0,
0.06060606060606061,
0.04878048780487805,
0.06451612903225806,
0.06976744186046512,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0.04938271604938271,
0,
0.05357142857142857,
0,
0,
0,
0,
0.05357142857142857,
0,
0.06349206349206349,
0,
0,
0,
0,
0.02,
0.024390243902439025,
0.024390243902439025,
0.0625,
0.030303030303030304,
0.03125,
0,
0.14814814814814814,
0.04878048780487805,
0,
0.013888888888888888,
0,
0.013513513513513514,
0.03571428571428571,
0,
0,
0,
0.08333333333333333,
0,
0.03225806451612903,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0.02830188679245283,
0,
0.0297029702970297,
0.01904761904761905,
0,
0.03125,
0.011235955056179775,
0.009615384615384616,
0.011764705882352941,
0,
0,
0.02631578947368421,
0,
0.03225806451612903,
0.017543859649122806,
0.03333333333333333,
0.014084507042253521,
0,
0.013157894736842105,
0,
0,
0,
0,
0.012658227848101266,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0.04,
0.020618556701030927,
0.010638297872340425,
0.09523809523809523,
0.014925373134328358,
0.14285714285714285,
0.013888888888888888,
0,
0,
0,
0,
0,
0.1111111111111111,
0.13333333333333333,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.13333333333333333,
0.13333333333333333,
0.13333333333333333,
0.14285714285714285,
0.1,
0.1,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.13333333333333333,
0.13333333333333333,
0.13333333333333333,
0.14285714285714285,
0.1111111111111111,
0.1,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.13333333333333333,
0.13333333333333333,
0.10810810810810811,
0.07142857142857142,
0.08333333333333333,
0.034482758620689655,
0.034482758620689655,
0,
0.030303030303030304,
0.021739130434782608,
0.008771929824561403,
0,
0,
0.04,
0,
0,
0.03529411764705882,
0,
0.025,
0.045454545454545456,
0.018867924528301886,
0.0625,
0.04,
0.011764705882352941,
0,
0,
0.03409090909090909,
0,
0.08695652173913043,
0,
0.015625,
0.030303030303030304,
0,
0.02,
0,
0.006993006993006993,
0.007874015748031496,
0.010638297872340425,
0,
0.008547008547008548,
0,
0.009615384615384616,
0.011627906976744186,
0,
0.011904761904761904,
0,
0.01020408163265306,
0.009615384615384616,
0.007142857142857143,
0.0196078431372549,
0.010101010101010102,
0.008771929824561403,
0.014705882352941176,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0.015151515151515152,
0,
0,
0.02564102564102564,
0.038461538461538464,
0,
0,
0.058823529411764705,
0,
0,
0.049019607843137254,
0,
0,
0,
0,
0.03571428571428571,
0.02631578947368421,
0.047619047619047616,
0,
0.03225806451612903,
0.027777777777777776,
0.0136986301369863,
0,
0.027777777777777776,
0.037037037037037035,
0.05,
0.024390243902439025,
0.06896551724137931,
0.07692307692307693,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07407407407407407,
0.029411764705882353,
0,
0,
0.021739130434782608,
0.02857142857142857,
0.024390243902439025,
0.012048192771084338,
0.0392156862745098,
0.0392156862745098,
0.06060606060606061,
0,
0.03125,
0.014925373134328358,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0.04395604395604396,
0.0273972602739726,
0.02702702702702703,
0,
0.009345794392523364,
0.023255813953488372,
0.02857142857142857,
0.1111111111111111,
0,
0,
0,
0.07692307692307693,
0.011494252873563218,
0.015625,
0.02040816326530612,
0.034482758620689655,
0.021505376344086023,
0,
0.012195121951219513,
0,
0.03418803418803419,
0.015625,
0.0136986301369863,
0,
0.011363636363636364,
0.014492753623188406,
0.05194805194805195,
0.02564102564102564,
0,
0,
0.030303030303030304,
0.014705882352941176,
0.04,
0.02702702702702703,
0.125,
0.037037037037037035,
0.2,
0.022727272727272728,
0.013888888888888888,
0,
0,
0.023809523809523808,
0.030303030303030304,
0,
0.01694915254237288,
0,
0.02857142857142857,
0.03571428571428571,
0.02631578947368421,
0.03225806451612903,
0.03225806451612903,
0.09090909090909091,
0.04,
0.02247191011235955,
0,
0,
0.02564102564102564,
0,
0.0136986301369863,
0.02654867256637168,
0.016666666666666666,
0.014492753623188406,
0,
0.05263157894736842,
0.011904761904761904,
0,
0,
0.03225806451612903,
0.025,
0.030303030303030304,
0.034482758620689655,
0.01818181818181818,
0.06666666666666667,
0,
0.030303030303030304,
0.125,
0.037037037037037035,
0.1,
0,
0.014492753623188406,
0.022222222222222223,
0,
0.07142857142857142,
0.03225806451612903,
0.0625,
0,
0.030303030303030304,
0,
0.025,
0,
0,
0,
0.013157894736842105,
0.020202020202020204,
0,
0.015873015873015872,
0,
0,
0.03333333333333333,
0.03389830508474576,
0.01694915254237288,
0.02702702702702703,
0,
0.08,
0,
0.05,
0,
0.12903225806451613,
0.14814814814814814,
0.14473684210526316,
0.11267605633802817,
0.07407407407407407,
0,
0.1076923076923077,
0.03636363636363636,
0.017241379310344827,
0,
0
] | 582 | 0.031141 | false |
# Future Imports for py2/3 backwards compat.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import range
from nexpose import OpenNexposeSession, as_string
from future import standard_library
import http.client
standard_library.install_aliases()
def output(response):
print(as_string(response))
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
host = "localhost"
port = 0
username = "nxadmin"
password = "nxpassword"
session = OpenNexposeSession(host, port, username, password)
tags = session.RequestTagListing()[-1:]
for tag in tags:
l_id = tag.id
print(tag.id, tag.name.encode('ascii', 'xmlcharrefreplace'))
for attr in tag.attributes:
print(" ", attr.id, attr.name, attr.value)
assert tag.name == u"ÇçĞğİıÖöŞşÜü"
sites = session.RequestSiteListing()
# output(sites)
for site_id in range(1, 1 + 1): # sites.xpath("/SiteListingResponse/SiteSummary/@id"):
# output(session.RequestSiteDeviceListing(site_id))
# output(session.RequestSiteScanHistory(site_id))
# json_as_dict = json.loads(session.RequestTags())
# tag = NexposeTag()
# tag.id = 0
# tag.type = "CUSTOM"
# tag.name += "?"
# tag.id = None
# print tag.as_json()
print(session.RemoveTagFromSite(l_id, site_id))
print(session.AddTagToSite(l_id, site_id))
# output(session.RequestSystemInformation())
for tag in session.RequestAssetTagListing(2):
print(tag.id, tag.name.encode('ascii', 'xmlcharrefreplace'))
| [
"# Future Imports for py2/3 backwards compat.\n",
"from __future__ import (absolute_import, division, print_function,\n",
" unicode_literals)\n",
"from builtins import range\n",
"from nexpose import OpenNexposeSession, as_string\n",
"from future import standard_library\n",
"import http.client\n",
"standard_library.install_aliases()\n",
"\n",
"\n",
"def output(response):\n",
" print(as_string(response))\n",
"\n",
"\n",
"http.client.HTTPConnection._http_vsn = 10\n",
"http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'\n",
"\n",
"host = \"localhost\"\n",
"port = 0\n",
"username = \"nxadmin\"\n",
"password = \"nxpassword\"\n",
"\n",
"session = OpenNexposeSession(host, port, username, password)\n",
"\n",
"tags = session.RequestTagListing()[-1:]\n",
"for tag in tags:\n",
" l_id = tag.id\n",
" print(tag.id, tag.name.encode('ascii', 'xmlcharrefreplace'))\n",
" for attr in tag.attributes:\n",
" print(\" \", attr.id, attr.name, attr.value)\n",
" assert tag.name == u\"ÇçĞğİıÖöŞşÜü\"\n",
"\n",
"sites = session.RequestSiteListing()\n",
"# output(sites)\n",
"for site_id in range(1, 1 + 1): # sites.xpath(\"/SiteListingResponse/SiteSummary/@id\"):\n",
" # output(session.RequestSiteDeviceListing(site_id))\n",
" # output(session.RequestSiteScanHistory(site_id))\n",
" # json_as_dict = json.loads(session.RequestTags())\n",
" # tag = NexposeTag()\n",
" # tag.id = 0\n",
" # tag.type = \"CUSTOM\"\n",
" # tag.name += \"?\"\n",
" # tag.id = None\n",
" # print tag.as_json()\n",
" print(session.RemoveTagFromSite(l_id, site_id))\n",
" print(session.AddTagToSite(l_id, site_id))\n",
"# output(session.RequestSystemInformation())\n",
"\n",
"for tag in session.RequestAssetTagListing(2):\n",
" print(tag.id, tag.name.encode('ascii', 'xmlcharrefreplace'))\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 50 | 0.000227 | false |
# Copied (and slightly altered) from script.pseudotv.live
# with permission of Lunatixz:
# http://forum.xbmc.org/showthread.php?tid=177296
# On 21st January 2014
# https://github.com/Lunatixz/script.pseudotv.live/tree/master/resources/lib/parsers
import xbmc
import os, shutil
import codecs
import xbmcvfs
import xbmcaddon
VFS_AVAILABLE = True
__addon__ = xbmcaddon.Addon(id='script.tvtunes')
__addonid__ = __addon__.getAddonInfo('id')
def ascii(string):
if isinstance(string, basestring):
if isinstance(string, unicode):
string = string.encode('ascii', 'ignore')
return string
class FileAccess:
@staticmethod
def log(txt):
if __addon__.getSetting( "logEnabled" ) == "true":
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
@staticmethod
def open(filename, mode, encoding = "utf-8"):
fle = 0
FileAccess.log("trying to open " + filename)
try:
return VFSFile(filename, mode)
except UnicodeDecodeError:
return FileAccess.open(ascii(filename), mode, encoding)
return fle
@staticmethod
def copy(orgfilename, newfilename):
FileAccess.log('copying ' + orgfilename + ' to ' + newfilename)
xbmcvfs.copy(orgfilename, newfilename)
return True
@staticmethod
def exists(filename):
try:
return xbmcvfs.exists(filename)
except UnicodeDecodeError:
return FileAccess.exists(ascii(filename))
return False
@staticmethod
def openSMB(filename, mode, encoding = "utf-8"):
fle = 0
if os.name.lower() == 'nt':
newname = '\\\\' + filename[6:]
try:
fle = codecs.open(newname, mode, encoding)
except:
fle = 0
return fle
@staticmethod
def existsSMB(filename):
if os.name.lower() == 'nt':
filename = '\\\\' + filename[6:]
return FileAccess.exists(filename)
return False
@staticmethod
def rename(path, newpath):
FileAccess.log("rename " + path + " to " + newpath)
try:
if xbmcvfs.rename(path, newpath):
return True
except:
pass
if path[0:6].lower() == 'smb://' or newpath[0:6].lower() == 'smb://':
if os.name.lower() == 'nt':
FileAccess.log("Modifying name")
if path[0:6].lower() == 'smb://':
path = '\\\\' + path[6:]
if newpath[0:6].lower() == 'smb://':
newpath = '\\\\' + newpath[6:]
try:
os.rename(path, newpath)
FileAccess.log("os.rename")
return True
except:
pass
try:
shutil.move(path, newpath)
FileAccess.log("shutil.move")
return True
except:
pass
FileAccess.log("OSError")
raise OSError()
@staticmethod
def makedirs(directory):
try:
os.makedirs(directory)
except:
FileAccess._makedirs(directory)
@staticmethod
def _makedirs(path):
if len(path) == 0:
return False
if(xbmcvfs.exists(path)):
return True
success = xbmcvfs.mkdir(path)
if success == False:
if path == os.path.dirname(path):
return False
if FileAccess._makedirs(os.path.dirname(path)):
return xbmcvfs.mkdir(path)
return xbmcvfs.exists(path)
class VFSFile:
def __init__(self, filename, mode):
# log("VFSFile: trying to open " + filename)
if mode == 'w':
self.currentFile = xbmcvfs.File(filename, 'wb')
else:
self.currentFile = xbmcvfs.File(filename)
# log("VFSFile: Opening " + filename, xbmc.LOGDEBUG)
if self.currentFile == None:
log("VFSFile: Couldnt open " + filename, xbmc.LOGERROR)
def read(self, bytes):
return self.currentFile.read(bytes)
def write(self, data):
if isinstance(data, unicode):
data = bytearray(data, "utf-8")
data = bytes(data)
return self.currentFile.write(data)
def close(self):
return self.currentFile.close()
def seek(self, bytes, offset):
return self.currentFile.seek(bytes, offset)
def size(self):
loc = self.currentFile.size()
return loc
def readlines(self):
return self.currentFile.read().split('\n')
def writelines(self):
return self.currentFile.write().split('\n')
def tell(self):
loc = self.currentFile.seek(0, 1)
return loc
| [
"# Copied (and slightly altered) from script.pseudotv.live\n",
"# with permission of Lunatixz:\n",
"# http://forum.xbmc.org/showthread.php?tid=177296\n",
"# On 21st January 2014\n",
"# https://github.com/Lunatixz/script.pseudotv.live/tree/master/resources/lib/parsers\n",
"\n",
"import xbmc\n",
"import os, shutil\n",
"import codecs\n",
"import xbmcvfs\n",
"import xbmcaddon\n",
"VFS_AVAILABLE = True\n",
"\n",
"\n",
"__addon__ = xbmcaddon.Addon(id='script.tvtunes')\n",
"__addonid__ = __addon__.getAddonInfo('id')\n",
"\n",
"\n",
"def ascii(string):\n",
" if isinstance(string, basestring):\n",
" if isinstance(string, unicode):\n",
" string = string.encode('ascii', 'ignore')\n",
"\n",
" return string\n",
"\n",
"class FileAccess:\n",
" @staticmethod\n",
" def log(txt):\n",
" if __addon__.getSetting( \"logEnabled\" ) == \"true\":\n",
" if isinstance (txt,str):\n",
" txt = txt.decode(\"utf-8\")\n",
" message = u'%s: %s' % (__addonid__, txt)\n",
" xbmc.log(msg=message.encode(\"utf-8\"), level=xbmc.LOGDEBUG)\n",
"\n",
"\n",
" @staticmethod\n",
" def open(filename, mode, encoding = \"utf-8\"):\n",
" fle = 0\n",
" FileAccess.log(\"trying to open \" + filename)\n",
" \n",
" try:\n",
" return VFSFile(filename, mode)\n",
" except UnicodeDecodeError:\n",
" return FileAccess.open(ascii(filename), mode, encoding)\n",
"\n",
" return fle\n",
"\n",
"\n",
" @staticmethod\n",
" def copy(orgfilename, newfilename):\n",
" FileAccess.log('copying ' + orgfilename + ' to ' + newfilename)\n",
" xbmcvfs.copy(orgfilename, newfilename)\n",
" return True\n",
"\n",
"\n",
" @staticmethod\n",
" def exists(filename):\n",
" try:\n",
" return xbmcvfs.exists(filename)\n",
" except UnicodeDecodeError:\n",
" return FileAccess.exists(ascii(filename))\n",
"\n",
" return False\n",
"\n",
"\n",
" @staticmethod\n",
" def openSMB(filename, mode, encoding = \"utf-8\"):\n",
" fle = 0\n",
"\n",
" if os.name.lower() == 'nt':\n",
" newname = '\\\\\\\\' + filename[6:]\n",
"\n",
" try:\n",
" fle = codecs.open(newname, mode, encoding)\n",
" except:\n",
" fle = 0\n",
"\n",
" return fle\n",
"\n",
"\n",
" @staticmethod\n",
" def existsSMB(filename):\n",
" if os.name.lower() == 'nt':\n",
" filename = '\\\\\\\\' + filename[6:]\n",
" return FileAccess.exists(filename)\n",
"\n",
" return False\n",
"\n",
"\n",
" @staticmethod\n",
" def rename(path, newpath):\n",
" FileAccess.log(\"rename \" + path + \" to \" + newpath)\n",
"\n",
" try:\n",
" if xbmcvfs.rename(path, newpath):\n",
" return True\n",
" except:\n",
" pass\n",
"\n",
" if path[0:6].lower() == 'smb://' or newpath[0:6].lower() == 'smb://':\n",
" if os.name.lower() == 'nt':\n",
" FileAccess.log(\"Modifying name\")\n",
" if path[0:6].lower() == 'smb://':\n",
" path = '\\\\\\\\' + path[6:]\n",
"\n",
" if newpath[0:6].lower() == 'smb://':\n",
" newpath = '\\\\\\\\' + newpath[6:]\n",
"\n",
" try:\n",
" os.rename(path, newpath)\n",
" FileAccess.log(\"os.rename\")\n",
" return True\n",
" except:\n",
" pass\n",
"\n",
" try:\n",
" shutil.move(path, newpath)\n",
" FileAccess.log(\"shutil.move\")\n",
" return True\n",
" except:\n",
" pass\n",
"\n",
" FileAccess.log(\"OSError\")\n",
" raise OSError()\n",
"\n",
"\n",
" @staticmethod\n",
" def makedirs(directory):\n",
" try:\n",
" os.makedirs(directory)\n",
" except:\n",
" FileAccess._makedirs(directory)\n",
"\n",
"\n",
" @staticmethod\n",
" def _makedirs(path):\n",
" if len(path) == 0:\n",
" return False\n",
"\n",
" if(xbmcvfs.exists(path)):\n",
" return True\n",
"\n",
" success = xbmcvfs.mkdir(path)\n",
"\n",
" if success == False:\n",
" if path == os.path.dirname(path):\n",
" return False\n",
"\n",
" if FileAccess._makedirs(os.path.dirname(path)):\n",
" return xbmcvfs.mkdir(path)\n",
"\n",
" return xbmcvfs.exists(path)\n",
"\n",
"\n",
"\n",
"class VFSFile:\n",
" def __init__(self, filename, mode):\n",
" # log(\"VFSFile: trying to open \" + filename)\n",
"\n",
" if mode == 'w':\n",
" self.currentFile = xbmcvfs.File(filename, 'wb')\n",
" else: \n",
" self.currentFile = xbmcvfs.File(filename)\n",
"\n",
" # log(\"VFSFile: Opening \" + filename, xbmc.LOGDEBUG)\n",
" \n",
" if self.currentFile == None:\n",
" log(\"VFSFile: Couldnt open \" + filename, xbmc.LOGERROR)\n",
"\n",
"\n",
" def read(self, bytes):\n",
" return self.currentFile.read(bytes)\n",
" \n",
" \n",
" def write(self, data):\n",
" if isinstance(data, unicode):\n",
" data = bytearray(data, \"utf-8\")\n",
" data = bytes(data)\n",
" \n",
" return self.currentFile.write(data)\n",
" \n",
" \n",
" def close(self):\n",
" return self.currentFile.close()\n",
" \n",
" \n",
" def seek(self, bytes, offset):\n",
" return self.currentFile.seek(bytes, offset)\n",
" \n",
" \n",
" def size(self):\n",
" loc = self.currentFile.size()\n",
" return loc\n",
" \n",
" \n",
" def readlines(self):\n",
" return self.currentFile.read().split('\\n') \n",
" \n",
" def writelines(self):\n",
" return self.currentFile.write().split('\\n')\n",
" \n",
" \n",
" def tell(self):\n",
" loc = self.currentFile.seek(0, 1)\n",
" return loc\n",
" \n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.022222222222222223,
0,
0,
0,
0,
0,
0.018867924528301886,
0,
0,
0,
0.05555555555555555,
0,
0,
0.03389830508474576,
0.05405405405405406,
0,
0,
0,
0,
0,
0.05555555555555555,
0.04,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0.03773584905660377,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0.0625,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0.1111111111111111,
0.02702702702702703,
0,
0,
0,
0.037037037037037035,
0,
0.1111111111111111,
0.1111111111111111,
0.037037037037037035,
0,
0,
0,
0.2,
0,
0.1111111111111111,
0.1111111111111111,
0.047619047619047616,
0,
0.1111111111111111,
0.1111111111111111,
0.02857142857142857,
0,
0.1111111111111111,
0.1111111111111111,
0.05,
0,
0,
0.1111111111111111,
0.1111111111111111,
0.04,
0.01818181818181818,
0.1111111111111111,
0,
0,
0.1111111111111111,
0.1111111111111111,
0.05,
0,
0,
0.1111111111111111,
1
] | 207 | 0.021937 | false |
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, urllib2, urllib, re, string, sys, os, gzip, StringIO
from uuid import uuid4
from random import random,randint
from math import floor
import hashlib
import time
import simplejson
# Plugin constants
__addonname__ = "奇艺视频(QIYI)"
__addonid__ = "plugin.video.qiyi"
__addon__ = xbmcaddon.Addon(id=__addonid__)
CHANNEL_LIST = [['电影','1'], ['电视剧','2'], ['纪录片','3'], ['动漫','4'], ['音乐','5'], ['综艺','6'], ['娱乐','7'], ['旅游','9'], ['片花','10'], ['教育','12'], ['时尚','13']]
ORDER_LIST = [['4','更新时间'], ['11','热门']]
PAYTYPE_LIST = [['','全部影片'], ['0','免费影片'], ['1','会员免费'], ['2','付费点播']]
'''
class QYPlayer(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
def play(self, name, thumb, baseurl, vlinks, gen_uid):
self.is_active = True
self.name = name
self.thumb = thumb
self.baseurl = baseurl
self.vlinks = vlinks
self.gen_uid = gen_uid
self.curpos = 0
self.geturl()
self.playrun()
def geturl(self):
if self.curpos < len(self.vlinks):
vlink = self.vlinks[self.curpos]["l"]
if not vlink.startswith("/"):
#vlink is encode
vlink=getVrsEncodeCode(vlink)
key=getDispathKey(vlink.split("/")[-1].split(".")[0])
baseurl=self.baseurl.split("/")
baseurl.insert(-1,key)
url="/".join(baseurl)+vlink+'?su='+self.gen_uid+'&qyid='+uuid4().hex+'&client=&z=&bt=&ct=&tn='+str(randint(10000,20000))
self.videourl=simplejson.loads(GetHttpData(url))["l"]
self.curpos = self.curpos + 1
else:
self.videourl = None
def playrun(self):
title = self.name + " - 第"+str(self.curpos)+"/"+str(len(self.vlinks)) + "节"
listitem = xbmcgui.ListItem(title,thumbnailImage=self.thumb)
listitem.setInfo(type="Video",infoLabels={"Title":title})
xbmc.Player.play(self, self.videourl, listitem)
self.geturl()
def onPlayBackStarted(self):
xbmc.Player.onPlayBackStarted(self)
def onPlayBackSeek(self, time, seekOffset):
xbmc.Player.onPlayBackSeek(self, time, seekOffset)
def onPlayBackSeekChapter(self, chapter):
xbmc.Player.onPlayBackSeek(self, chapter)
def onPlayBackEnded(self):
if self.videourl:
self.playrun()
else:
self.is_active = False
xbmc.Player.onPlayBackEnded(self)
def onPlayBackStopped(self):
self.is_active = False
qyplayer = QYPlayer()
'''
def GetHttpData(url):
print "getHttpData: " + url
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)')
try:
response = urllib2.urlopen(req)
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
charset = response.headers.getparam('charset')
response.close()
except:
xbmc.log( "%s: %s (%d) [%s]" % (
__addonname__,
sys.exc_info()[ 2 ].tb_frame.f_code.co_name,
sys.exc_info()[ 2 ].tb_lineno,
sys.exc_info()[ 1 ]
), level=xbmc.LOGERROR)
return ''
match = re.compile('<meta http-equiv=["]?[Cc]ontent-[Tt]ype["]? content="text/html;[\s]?charset=(.+?)"').findall(httpdata)
if len(match)>0:
charset = match[0]
if charset:
charset = charset.lower()
if (charset != 'utf-8') and (charset != 'utf8'):
httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')
return httpdata
def urlExists(url):
try:
resp = urllib2.urlopen(url)
result = True
resp.close()
except:
result = False
return result
def searchDict(dlist,idx):
for i in range(0,len(dlist)):
if dlist[i][0] == idx:
return dlist[i][1]
return ''
def getcatList(listpage, id, cat):
# 类型(电影,纪录片,动漫,娱乐,旅游), 分类(电视剧,综艺,片花), 流派(音乐), 一级分类(教育), 行业(时尚)
match = re.compile('<h3>(类型|分类|流派|一级分类|行业):</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
if id in ('3','9'): # 纪录片&旅游
catlist = re.compile('/www/' + id + '/(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id in ('5','10'): # 音乐&片花
catlist = re.compile('/www/' + id + '/\d*-\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id == '12': # 教育
catlist = re.compile('/www/' + id + '/\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id == '13': # 时尚
catlist = re.compile('/www/' + id + '/\d*-\d*-\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
else:
catlist = re.compile('/www/' + id + '/\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0][1])
if match1:
catlist.append((cat, match1.group(1)))
return catlist
def getareaList(listpage, id, area):
match = re.compile('<h3>地区:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
if id == '7': # 娱乐
arealist = re.compile('/www/' + id + '/\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0])
elif id in ('9','10'): # 旅游&片花
arealist = re.compile('/www/' + id + '/\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0])
else:
arealist = re.compile('/www/' + id + '/(\d*)-[^>]+>(.*?)</a>').findall(match[0])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0])
if match1:
arealist.append((area, match1.group(1)))
return arealist
def getyearList(listpage, id, year):
match = re.compile('<h3>年代:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
yearlist = re.compile('/www/' + id + '/\d*-\d*---------\d*-([\d_]*)-[^>]+>(.*?)</a>').findall(match[0])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0])
if match1:
yearlist.append((year, match1.group(1)))
return yearlist
def rootList():
for name, id in CHANNEL_LIST:
li = xbmcgui.ListItem(name)
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus("")+"&area="+urllib.quote_plus("")+"&year="+urllib.quote_plus("")+"&order="+urllib.quote_plus("11")+"&page="+urllib.quote_plus("1")+"&paytype="+urllib.quote_plus("0")
xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
# id c1 c2 c3 c4 c5 c11 c12 c14
# 电影 1 area cat paytype year order
# 电视剧 2 area cat paytype year order
# 纪录片 3 cat paytype order
# 动漫 4 area cat ver age paytype order
# 音乐 5 area lang cat grp paytype order
# 综艺 6 area cat paytype order
# 娱乐 7 cat area paytype order
# 旅游 9 cat area paytype order
# 片花 10 area cat paytype order
# 教育 12 cat paytype order
# 时尚 13 cat paytype order
def progList(name,id,page,cat,area,year,order,paytype):
c1 = ''
c2 = ''
c3 = ''
c4 = ''
if id == '7': # 娱乐
c3 = area
elif id in ('9','10'): # 旅游&片花
c2 = area
elif id != '3': # 非纪录片
c1 = area
if id in ('3','9'): # 纪录片&旅游
c1 = cat
elif id in ('5','10'): # 音乐&片花
c4 = cat
elif id == '12': # 教育
c3 = cat
elif id == '13': # 时尚
c5 = cat
else:
c2 = cat
url = 'http://list.iqiyi.com/www/' + id + '/' + c1 + '-' + c2 + '-' + c3 + '-' + c4 + '-------' +\
paytype + '-' + year + '--' + order + '-' + page + '-1-iqiyi--.html'
currpage = int(page)
link = GetHttpData(url)
match1 = re.compile('data-key="([0-9]+)"').findall(link)
if len(match1) == 0:
totalpages = 1
else:
totalpages = int(match1[len(match1) - 1])
match = re.compile('<!-- 分类 -->(.+?)<!-- 分类 end-->', re.DOTALL).findall(link)
if match:
listpage = match[0]
else:
listpage = ''
match = re.compile('<div class="wrapper-piclist"(.+?)<!-- 页码 开始 -->', re.DOTALL).findall(link)
if match:
match = re.compile('<li[^>]*>(.+?)</li>', re.DOTALL).findall(match[0])
totalItems = len(match) + 1
if currpage > 1: totalItems = totalItems + 1
if currpage < totalpages: totalItems = totalItems + 1
if cat == '':
catstr = '全部类型'
else:
catlist = getcatList(listpage, id, cat)
catstr = searchDict(catlist, cat)
selstr = '[COLOR FFFF0000]' + catstr + '[/COLOR]'
if not (id in ('3','12','13')):
if area == '':
areastr = '全部地区'
else:
arealist = getareaList(listpage, id, area)
areastr = searchDict(arealist, area)
selstr += '/[COLOR FF00FF00]' + areastr + '[/COLOR]'
if id in ('1', '2'):
if year == '':
yearstr = '全部年份'
else:
yearlist = getyearList(listpage, id, year)
yearstr = searchDict(yearlist, year)
selstr += '/[COLOR FFFFFF00]' + yearstr + '[/COLOR]'
selstr += '/[COLOR FF00FFFF]' + searchDict(ORDER_LIST, order) + '[/COLOR]'
selstr += '/[COLOR FFFF00FF]' + searchDict(PAYTYPE_LIST, paytype) + '[/COLOR]'
li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)【'+selstr+'】(按此选择)')
u = sys.argv[0]+"?mode=4&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&paytype="+urllib.quote_plus(paytype)+"&page="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
for i in range(0,len(match)):
p_name = re.compile('alt="(.+?)"').findall(match[i])[0]
p_thumb = re.compile('src\s*=\s*"(.+?)"').findall(match[i])[0]
#p_id = re.compile('data-qidanadd-albumid="(\d+)"').search(match[i]).group(1)
p_id = re.compile('href="([^"]*)"').search(match[i]).group(1)
try:
p_episode = re.compile('data-qidanadd-episode="(\d)"').search(match[i]).group(1) == '1'
except:
p_episode = False
match1 = re.compile('<span class="icon-vInfo">([^<]+)</span>').search(match[i])
if match1:
msg = match1.group(1).strip()
p_name1 = p_name + '(' + msg + ')'
if (msg.find('更新至') == 0) or (msg.find('共') == 0):
p_episode = True
else:
p_name1 = p_name
if p_episode:
mode = 2
isdir = True
p_id = re.compile('data-qidanadd-albumid="(\d+)"').search(match[i]).group(1)
else:
mode = 3
isdir = False
match1 = re.compile('<p class="dafen2">\s*<strong class="fRed"><span>(\d*)</span>([\.\d]*)</strong><span>分</span>\s*</p>').search(match[i])
if match1:
p_rating = float(match1.group(1)+match1.group(2))
else:
p_rating = 0
match1 = re.compile('<span>导演:</span>(.+?)</p>', re.DOTALL).search(match[i])
if match1:
p_director = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))
else:
p_director = ''
match1 = re.compile('<em>主演:</em>(.+?)</div>', re.DOTALL).search(match[i])
if match1:
p_cast = re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1))
else:
p_cast = []
match1 = re.compile('<span>类型:</span>(.+?)</p>', re.DOTALL).search(match[i])
if match1:
p_genre = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))
else:
p_genre = ''
match1 = re.compile('<p class="s1">\s*<span>([^<]*)</span>\s*</p>').search(match[i])
if match1:
p_plot = match1.group(1)
else:
p_plot = ''
li = xbmcgui.ListItem(str(i + 1) + '.' + p_name1, iconImage = '', thumbnailImage = p_thumb)
li.setArt({ 'poster': p_thumb })
u = sys.argv[0]+"?mode="+str(mode)+"&name="+urllib.quote_plus(p_name)+"&id="+urllib.quote_plus(p_id)+"&thumb="+urllib.quote_plus(p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Genre":p_genre, "Plot":p_plot, "Cast":p_cast, "Rating":p_rating})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, isdir, totalItems)
if currpage > 1:
li = xbmcgui.ListItem('上一页')
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&page="+urllib.quote_plus(str(currpage-1))+"&paytype="+paytype
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
if currpage < totalpages:
li = xbmcgui.ListItem('下一页')
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&page="+urllib.quote_plus(str(currpage+1))+"&paytype="+paytype
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def seriesList(name,id,thumb,page):
url = 'http://cache.video.qiyi.com/a/%s' % (id)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
if json_response['data']['tvYear']:
p_year = int(json_response['data']['tvYear'])
else:
p_year = 0
p_director = ' / '.join(json_response['data']['directors']).encode('utf-8')
p_cast = [x.encode('utf-8') for x in json_response['data']['mainActors']]
p_plot = json_response['data']['tvDesc'].encode('utf-8')
albumType = json_response['data']['albumType']
sourceId = json_response['data']['sourceId']
if albumType in (1, 6, 9, 12, 13) and sourceId<>0:
url = 'http://cache.video.qiyi.com/jp/sdvlst/%d/%d/?categoryId=%d&sourceId=%d' % (albumType, sourceId, albumType, sourceId)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
totalItems = len(json_response['data'])
for item in json_response['data']:
tvId = str(item['tvId'])
videoId = item['vid'].encode('utf-8')
p_id = '%s,%s' % (tvId, videoId)
p_thumb = item['aPicUrl'].encode('utf-8')
p_name = item['videoName'].encode('utf-8')
p_name = '%s %s' % (p_name, item['tvYear'].encode('utf-8'))
li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Cast":p_cast, "Plot":p_plot, "Year":p_year})
u = sys.argv[0] + "?mode=3&name=" + urllib.quote_plus(p_name) + "&id=" + urllib.quote_plus(p_id)+ "&thumb=" + urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
else:
url = 'http://cache.video.qiyi.com/avlist/%s/%s/' % (id, page)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
totalItems = len(json_response['data']['vlist']) + 1
totalpages = json_response['data']['pgt']
currpage = int(page)
if currpage > 1: totalItems = totalItems + 1
if currpage < totalpages: totalItems = totalItems + 1
li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)')
u = sys.argv[0]+"?mode=99"
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
for item in json_response['data']['vlist']:
tvId = str(item['id'])
videoId = item['vid'].encode('utf-8')
p_id = '%s,%s' % (tvId, videoId)
p_thumb = item['vpic'].encode('utf-8')
p_name = item['vn'].encode('utf-8')
if item['vt']:
p_name = '%s %s' % (p_name, item['vt'].encode('utf-8'))
li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)
li.setArt({ 'poster': thumb })
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Cast":p_cast, "Plot":p_plot, "Year":p_year})
u = sys.argv[0] + "?mode=3&name=" + urllib.quote_plus(p_name) + "&id=" + urllib.quote_plus(p_id)+ "&thumb=" + urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
if currpage > 1:
li = xbmcgui.ListItem('上一页')
u = sys.argv[0]+"?mode=2&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&thumb="+urllib.quote_plus(thumb)+"&page="+urllib.quote_plus(str(currpage-1))
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
if currpage < totalpages:
li = xbmcgui.ListItem('下一页')
u = sys.argv[0]+"?mode=2&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&thumb="+urllib.quote_plus(thumb)+"&page="+urllib.quote_plus(str(currpage+1))
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def selResolution(items):
ratelist = []
for i in range(0,len(items)):
if items[i] == 96: ratelist.append([7, '极速', i]) # 清晰度设置值, 清晰度, match索引
if items[i] == 1: ratelist.append([6, '流畅', i])
if items[i] == 2: ratelist.append([5, '标清', i])
if items[i] == 3: ratelist.append([4, '超清', i])
if items[i] == 4 or items[i] == 17: ratelist.append([3, '720P', i])
if items[i] == 5 or items[i] == 18: ratelist.append([2, '1080P', i])
if items[i] == 10 or items[i] == 19: ratelist.append([1, '4K', i])
ratelist.sort()
if len(ratelist) > 1:
resolution = int(__addon__.getSetting('resolution'))
if resolution == 0: # 每次询问点播视频清晰度
dialog = xbmcgui.Dialog()
list = [x[1] for x in ratelist]
sel = dialog.select('清晰度(低网速请选择低清晰度)', list)
if sel == -1:
return -1
else:
sel = 0
while sel < len(ratelist)-1 and resolution > ratelist[sel][0]: sel = sel + 1
else:
sel = 0
return ratelist[sel][2]
'''
def getVRSXORCode(arg1,arg2):
loc3=arg2 %3
if loc3 == 1:
return arg1^121
if loc3 == 2:
return arg1^72
return arg1^103
def getVrsEncodeCode(vlink):
loc6=0
loc2=''
loc3=vlink.split("-")
loc4=len(loc3)
# loc5=loc4-1
for i in range(loc4-1,-1,-1):
loc6=getVRSXORCode(int(loc3[loc4-i-1],16),i)
loc2+=chr(loc6)
return loc2[::-1]
def mix(tvid):
salt = '4a1caba4b4465345366f28da7c117d20'
tm = str(randint(2000,4000))
src = 'eknas'
sc = hashlib.md5(salt + tm + tvid).hexdigest()
return tm,sc,src
def getVMS(tvid,vid,uid):
#tm ->the flash run time for md5 usage
#um -> vip 1 normal 0
#authkey -> for password protected video ,replace '' with your password
#puid user.passportid may empty?
#TODO: support password protected video
tm,sc,src = mix(tvid)
vmsreq='http://cache.video.qiyi.com/vms?key=fvip&src=1702633101b340d8917a69cf8a4b8c7' +\
"&tvId="+tvid+"&vid="+vid+"&vinfo=1&tm="+tm+\
"&enc="+sc+\
"&qyid="+uid+"&tn="+str(random()) +"&um=1" +\
"&authkey="+hashlib.md5(hashlib.md5(b'').hexdigest()+str(tm)+tvid).hexdigest()
return simplejson.loads(GetHttpData(vmsreq))
def getDispathKey(rid):
tp=")(*&^flash@#$%a" #magic from swf
time=simplejson.loads(GetHttpData("http://data.video.qiyi.com/t?tn="+str(random())))["t"]
t=str(int(floor(int(time)/(10*60.0))))
return hashlib.md5(t+tp+rid).hexdigest()
'''
def getVMS(tvid, vid):
t = int(time.time() * 1000)
src = '76f90cbd92f94a2e925d83e8ccd22cb7'
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
sc = hashlib.md5(str(t) + key + vid).hexdigest()
vmsreq = 'http://cache.m.iqiyi.com/tmts/{0}/{1}/?t={2}&sc={3}&src={4}'.format(tvid,vid,t,sc,src)
print vmsreq
return simplejson.loads(GetHttpData(vmsreq))
def PlayVideo(name,id,thumb):
id = id.split(',')
if len(id) == 1:
try:
if ("http:" in id[0]):
link = GetHttpData(id[0])
tvId = re.compile('data-player-tvid="(.+?)"', re.DOTALL).findall(link)[0]
videoId = re.compile('data-player-videoid="(.+?)"', re.DOTALL).findall(link)[0]
else:
url = 'http://cache.video.qiyi.com/avlist/%s/' % (id[0])
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
tvId = str(json_response['data']['vlist'][0]['id'])
videoId = json_response['data']['vlist'][0]['vid'].encode('utf-8')
except:
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__, '未能获取视频地址')
return
else:
tvId = id[0]
videoId = id[1]
info = getVMS(tvId, videoId)
if info["code"] != "A00000":
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__, '无法播放此视频')
return
vs = info["data"]["vidl"]
sel = selResolution([x['vd'] for x in vs])
if sel == -1:
return
video_links = vs[sel]["m3u"]
listitem = xbmcgui.ListItem(name,thumbnailImage=thumb)
listitem.setInfo(type="Video",infoLabels={"Title":name})
xbmc.Player().play(video_links, listitem)
def performChanges(name,id,listpage,cat,area,year,order,paytype):
change = False
catlist= getcatList(listpage, id, cat)
dialog = xbmcgui.Dialog()
if len(catlist)>0:
list = [x[1] for x in catlist]
sel = dialog.select('类型', list)
if sel != -1:
cat = catlist[sel][0]
change = True
if not (id in ('3','12','13')):
arealist = getareaList(listpage, id, area)
if len(arealist)>0:
list = [x[1] for x in arealist]
sel = dialog.select('地区', list)
if sel != -1:
area = arealist[sel][0]
change = True
if id in ('1','2'):
yearlist = getyearList(listpage, id, year)
if len(yearlist)>0:
list = [x[1] for x in yearlist]
sel = dialog.select('年份', list)
if sel != -1:
year = yearlist[sel][0]
change = True
list = [x[1] for x in ORDER_LIST]
sel = dialog.select('排序方式', list)
if sel != -1:
order = ORDER_LIST[sel][0]
change = True
if change:
progList(name,id,'1',cat,area,year,order,paytype)
def get_params():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
return param
params = get_params()
mode = None
name = None
id = None
cat = ''
area = ''
year = ''
order = '3'
paytype = '0'
num = '1'
page = '1'
url = None
thumb = None
try:
thumb = urllib.unquote_plus(params["thumb"])
except:
pass
try:
url = urllib.unquote_plus(params["url"])
except:
pass
try:
page = urllib.unquote_plus(params["page"])
except:
pass
try:
num = urllib.unquote_plus(params["num"])
except:
pass
try:
res = urllib.unquote_plus(params["paytype"])
except:
pass
try:
order = urllib.unquote_plus(params["order"])
except:
pass
try:
year = urllib.unquote_plus(params["year"])
except:
pass
try:
area = urllib.unquote_plus(params["area"])
except:
pass
try:
cat = urllib.unquote_plus(params["cat"])
except:
pass
try:
id = urllib.unquote_plus(params["id"])
except:
pass
try:
name = urllib.unquote_plus(params["name"])
except:
pass
try:
mode = int(params["mode"])
except:
pass
if mode == None:
rootList()
elif mode == 1:
progList(name,id,page,cat,area,year,order,paytype)
elif mode == 2:
seriesList(name,id,thumb,page)
elif mode == 3:
PlayVideo(name,id,thumb)
elif mode == 4:
performChanges(name,id,page,cat,area,year,order,paytype)
| [
"# -*- coding: utf-8 -*-\n",
"import xbmc, xbmcgui, xbmcplugin, xbmcaddon, urllib2, urllib, re, string, sys, os, gzip, StringIO\n",
"from uuid import uuid4\n",
"from random import random,randint\n",
"from math import floor\n",
"import hashlib\n",
"import time\n",
"import simplejson\n",
"\n",
"# Plugin constants \n",
"__addonname__ = \"奇艺视频(QIYI)\"\n",
"__addonid__ = \"plugin.video.qiyi\"\n",
"__addon__ = xbmcaddon.Addon(id=__addonid__)\n",
"\n",
"CHANNEL_LIST = [['电影','1'], ['电视剧','2'], ['纪录片','3'], ['动漫','4'], ['音乐','5'], ['综艺','6'], ['娱乐','7'], ['旅游','9'], ['片花','10'], ['教育','12'], ['时尚','13']]\n",
"ORDER_LIST = [['4','更新时间'], ['11','热门']]\n",
"PAYTYPE_LIST = [['','全部影片'], ['0','免费影片'], ['1','会员免费'], ['2','付费点播']]\n",
"\n",
"'''\n",
"class QYPlayer(xbmc.Player):\n",
" def __init__(self):\n",
" xbmc.Player.__init__(self)\n",
"\n",
" def play(self, name, thumb, baseurl, vlinks, gen_uid):\n",
" self.is_active = True\n",
" self.name = name\n",
" self.thumb = thumb\n",
" self.baseurl = baseurl\n",
" self.vlinks = vlinks\n",
" self.gen_uid = gen_uid\n",
" self.curpos = 0\n",
" self.geturl()\n",
" self.playrun()\n",
"\n",
" def geturl(self):\n",
" if self.curpos < len(self.vlinks):\n",
" vlink = self.vlinks[self.curpos][\"l\"]\n",
" if not vlink.startswith(\"/\"):\n",
" #vlink is encode\n",
" vlink=getVrsEncodeCode(vlink)\n",
" key=getDispathKey(vlink.split(\"/\")[-1].split(\".\")[0])\n",
" baseurl=self.baseurl.split(\"/\")\n",
" baseurl.insert(-1,key)\n",
" url=\"/\".join(baseurl)+vlink+'?su='+self.gen_uid+'&qyid='+uuid4().hex+'&client=&z=&bt=&ct=&tn='+str(randint(10000,20000))\n",
" self.videourl=simplejson.loads(GetHttpData(url))[\"l\"]\n",
" self.curpos = self.curpos + 1\n",
" else:\n",
" self.videourl = None\n",
"\n",
" def playrun(self):\n",
" title = self.name + \" - 第\"+str(self.curpos)+\"/\"+str(len(self.vlinks)) + \"节\"\n",
" listitem = xbmcgui.ListItem(title,thumbnailImage=self.thumb)\n",
" listitem.setInfo(type=\"Video\",infoLabels={\"Title\":title})\n",
" xbmc.Player.play(self, self.videourl, listitem)\n",
" self.geturl()\n",
"\n",
" def onPlayBackStarted(self):\n",
" xbmc.Player.onPlayBackStarted(self)\n",
"\n",
" def onPlayBackSeek(self, time, seekOffset):\n",
" xbmc.Player.onPlayBackSeek(self, time, seekOffset)\n",
"\n",
" def onPlayBackSeekChapter(self, chapter):\n",
" xbmc.Player.onPlayBackSeek(self, chapter)\n",
"\n",
" def onPlayBackEnded(self):\n",
" if self.videourl:\n",
" self.playrun()\n",
" else:\n",
" self.is_active = False\n",
" xbmc.Player.onPlayBackEnded(self)\n",
"\n",
" def onPlayBackStopped(self):\n",
" self.is_active = False\n",
"\n",
"qyplayer = QYPlayer()\n",
"'''\n",
"\n",
"def GetHttpData(url):\n",
" print \"getHttpData: \" + url\n",
" req = urllib2.Request(url)\n",
" req.add_header('User-Agent', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)')\n",
" try:\n",
" response = urllib2.urlopen(req)\n",
" httpdata = response.read()\n",
" if response.headers.get('content-encoding', None) == 'gzip':\n",
" httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()\n",
" charset = response.headers.getparam('charset')\n",
" response.close()\n",
" except:\n",
" xbmc.log( \"%s: %s (%d) [%s]\" % (\n",
" __addonname__,\n",
" sys.exc_info()[ 2 ].tb_frame.f_code.co_name,\n",
" sys.exc_info()[ 2 ].tb_lineno,\n",
" sys.exc_info()[ 1 ]\n",
" ), level=xbmc.LOGERROR)\n",
" return ''\n",
" match = re.compile('<meta http-equiv=[\"]?[Cc]ontent-[Tt]ype[\"]? content=\"text/html;[\\s]?charset=(.+?)\"').findall(httpdata)\n",
" if len(match)>0:\n",
" charset = match[0]\n",
" if charset:\n",
" charset = charset.lower()\n",
" if (charset != 'utf-8') and (charset != 'utf8'):\n",
" httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')\n",
" return httpdata\n",
" \n",
"def urlExists(url):\n",
" try:\n",
" resp = urllib2.urlopen(url)\n",
" result = True\n",
" resp.close()\n",
" except:\n",
" result = False\n",
" return result\n",
"\n",
"def searchDict(dlist,idx):\n",
" for i in range(0,len(dlist)):\n",
" if dlist[i][0] == idx:\n",
" return dlist[i][1]\n",
" return ''\n",
"\n",
"def getcatList(listpage, id, cat):\n",
" # 类型(电影,纪录片,动漫,娱乐,旅游), 分类(电视剧,综艺,片花), 流派(音乐), 一级分类(教育), 行业(时尚)\n",
" match = re.compile('<h3>(类型|分类|流派|一级分类|行业):</h3>(.*?)</ul>', re.DOTALL).findall(listpage)\n",
" if id in ('3','9'): # 纪录片&旅游\n",
" catlist = re.compile('/www/' + id + '/(\\d*)-[^>]+>(.*?)</a>').findall(match[0][1])\n",
" elif id in ('5','10'): # 音乐&片花\n",
" catlist = re.compile('/www/' + id + '/\\d*-\\d*-\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0][1])\n",
" elif id == '12': # 教育\n",
" catlist = re.compile('/www/' + id + '/\\d*-\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0][1])\n",
" elif id == '13': # 时尚\n",
" catlist = re.compile('/www/' + id + '/\\d*-\\d*-\\d*-\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0][1])\n",
" else:\n",
" catlist = re.compile('/www/' + id + '/\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0][1])\n",
" match1 = re.compile('<a href=\"#\">(.*?)</a>').search(match[0][1])\n",
" if match1:\n",
" catlist.append((cat, match1.group(1)))\n",
" return catlist\n",
"\n",
"def getareaList(listpage, id, area):\n",
" match = re.compile('<h3>地区:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)\n",
" if id == '7': # 娱乐\n",
" arealist = re.compile('/www/' + id + '/\\d*-\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0])\n",
" elif id in ('9','10'): # 旅游&片花\n",
" arealist = re.compile('/www/' + id + '/\\d*-(\\d*)-[^>]+>(.*?)</a>').findall(match[0])\n",
" else:\n",
" arealist = re.compile('/www/' + id + '/(\\d*)-[^>]+>(.*?)</a>').findall(match[0])\n",
" match1 = re.compile('<a href=\"#\">(.*?)</a>').search(match[0])\n",
" if match1:\n",
" arealist.append((area, match1.group(1)))\n",
" return arealist\n",
"\n",
"def getyearList(listpage, id, year):\n",
" match = re.compile('<h3>年代:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)\n",
" yearlist = re.compile('/www/' + id + '/\\d*-\\d*---------\\d*-([\\d_]*)-[^>]+>(.*?)</a>').findall(match[0])\n",
" match1 = re.compile('<a href=\"#\">(.*?)</a>').search(match[0])\n",
" if match1:\n",
" yearlist.append((year, match1.group(1)))\n",
" return yearlist\n",
"\n",
"def rootList():\n",
" for name, id in CHANNEL_LIST:\n",
" li = xbmcgui.ListItem(name)\n",
" u = sys.argv[0]+\"?mode=1&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&cat=\"+urllib.quote_plus(\"\")+\"&area=\"+urllib.quote_plus(\"\")+\"&year=\"+urllib.quote_plus(\"\")+\"&order=\"+urllib.quote_plus(\"11\")+\"&page=\"+urllib.quote_plus(\"1\")+\"&paytype=\"+urllib.quote_plus(\"0\")\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True)\n",
" xbmcplugin.endOfDirectory(int(sys.argv[1]))\n",
"\n",
"# id c1 c2 c3 c4 c5 c11 c12 c14\n",
"# 电影 1 area cat paytype year order\n",
"# 电视剧 2 area cat paytype year order\n",
"# 纪录片 3 cat paytype order\n",
"# 动漫 4 area cat ver age paytype order\n",
"# 音乐 5 area lang cat grp paytype order\n",
"# 综艺 6 area cat paytype order\n",
"# 娱乐 7 cat area paytype order\n",
"# 旅游 9 cat area paytype order\n",
"# 片花 10 area cat paytype order\n",
"# 教育 12 cat paytype order\n",
"# 时尚 13 cat paytype order\n",
"def progList(name,id,page,cat,area,year,order,paytype):\n",
" c1 = ''\n",
" c2 = ''\n",
" c3 = ''\n",
" c4 = ''\n",
" if id == '7': # 娱乐\n",
" c3 = area\n",
" elif id in ('9','10'): # 旅游&片花\n",
" c2 = area\n",
" elif id != '3': # 非纪录片\n",
" c1 = area\n",
" if id in ('3','9'): # 纪录片&旅游\n",
" c1 = cat\n",
" elif id in ('5','10'): # 音乐&片花\n",
" c4 = cat\n",
" elif id == '12': # 教育\n",
" c3 = cat\n",
" elif id == '13': # 时尚\n",
" c5 = cat\n",
" else:\n",
" c2 = cat\n",
" url = 'http://list.iqiyi.com/www/' + id + '/' + c1 + '-' + c2 + '-' + c3 + '-' + c4 + '-------' +\\\n",
" paytype + '-' + year + '--' + order + '-' + page + '-1-iqiyi--.html'\n",
" currpage = int(page)\n",
" link = GetHttpData(url)\n",
" match1 = re.compile('data-key=\"([0-9]+)\"').findall(link)\n",
" if len(match1) == 0:\n",
" totalpages = 1\n",
" else:\n",
" totalpages = int(match1[len(match1) - 1])\n",
" match = re.compile('<!-- 分类 -->(.+?)<!-- 分类 end-->', re.DOTALL).findall(link)\n",
" if match:\n",
" listpage = match[0]\n",
" else:\n",
" listpage = ''\n",
" match = re.compile('<div class=\"wrapper-piclist\"(.+?)<!-- 页码 开始 -->', re.DOTALL).findall(link)\n",
" if match:\n",
" match = re.compile('<li[^>]*>(.+?)</li>', re.DOTALL).findall(match[0])\n",
" totalItems = len(match) + 1\n",
" if currpage > 1: totalItems = totalItems + 1\n",
" if currpage < totalpages: totalItems = totalItems + 1\n",
"\n",
" \n",
" if cat == '':\n",
" catstr = '全部类型'\n",
" else:\n",
" catlist = getcatList(listpage, id, cat)\n",
" catstr = searchDict(catlist, cat)\n",
" selstr = '[COLOR FFFF0000]' + catstr + '[/COLOR]'\n",
" if not (id in ('3','12','13')):\n",
" if area == '':\n",
" areastr = '全部地区'\n",
" else:\n",
" arealist = getareaList(listpage, id, area)\n",
" areastr = searchDict(arealist, area)\n",
" selstr += '/[COLOR FF00FF00]' + areastr + '[/COLOR]'\n",
" if id in ('1', '2'):\n",
" if year == '':\n",
" yearstr = '全部年份'\n",
" else:\n",
" yearlist = getyearList(listpage, id, year)\n",
" yearstr = searchDict(yearlist, year)\n",
" selstr += '/[COLOR FFFFFF00]' + yearstr + '[/COLOR]'\n",
" selstr += '/[COLOR FF00FFFF]' + searchDict(ORDER_LIST, order) + '[/COLOR]'\n",
" selstr += '/[COLOR FFFF00FF]' + searchDict(PAYTYPE_LIST, paytype) + '[/COLOR]'\n",
" li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)【'+selstr+'】(按此选择)')\n",
" u = sys.argv[0]+\"?mode=4&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&cat=\"+urllib.quote_plus(cat)+\"&area=\"+urllib.quote_plus(area)+\"&year=\"+urllib.quote_plus(year)+\"&order=\"+order+\"&paytype=\"+urllib.quote_plus(paytype)+\"&page=\"+urllib.quote_plus(listpage)\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)\n",
" for i in range(0,len(match)):\n",
" p_name = re.compile('alt=\"(.+?)\"').findall(match[i])[0]\n",
" p_thumb = re.compile('src\\s*=\\s*\"(.+?)\"').findall(match[i])[0]\n",
" #p_id = re.compile('data-qidanadd-albumid=\"(\\d+)\"').search(match[i]).group(1)\n",
" p_id = re.compile('href=\"([^\"]*)\"').search(match[i]).group(1)\n",
"\n",
" try:\n",
" p_episode = re.compile('data-qidanadd-episode=\"(\\d)\"').search(match[i]).group(1) == '1'\n",
" except:\n",
" p_episode = False\n",
" match1 = re.compile('<span class=\"icon-vInfo\">([^<]+)</span>').search(match[i])\n",
" if match1:\n",
" msg = match1.group(1).strip()\n",
" p_name1 = p_name + '(' + msg + ')'\n",
" if (msg.find('更新至') == 0) or (msg.find('共') == 0):\n",
" p_episode = True\n",
" else:\n",
" p_name1 = p_name\n",
"\n",
" if p_episode:\n",
" mode = 2\n",
" isdir = True\n",
" p_id = re.compile('data-qidanadd-albumid=\"(\\d+)\"').search(match[i]).group(1)\n",
" else:\n",
" mode = 3\n",
" isdir = False\n",
" match1 = re.compile('<p class=\"dafen2\">\\s*<strong class=\"fRed\"><span>(\\d*)</span>([\\.\\d]*)</strong><span>分</span>\\s*</p>').search(match[i])\n",
" if match1:\n",
" p_rating = float(match1.group(1)+match1.group(2))\n",
" else:\n",
" p_rating = 0\n",
" match1 = re.compile('<span>导演:</span>(.+?)</p>', re.DOTALL).search(match[i])\n",
" if match1:\n",
" p_director = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))\n",
" else:\n",
" p_director = ''\n",
" match1 = re.compile('<em>主演:</em>(.+?)</div>', re.DOTALL).search(match[i])\n",
" if match1:\n",
" p_cast = re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1))\n",
" else:\n",
" p_cast = []\n",
" match1 = re.compile('<span>类型:</span>(.+?)</p>', re.DOTALL).search(match[i])\n",
" if match1:\n",
" p_genre = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))\n",
" else:\n",
" p_genre = ''\n",
" match1 = re.compile('<p class=\"s1\">\\s*<span>([^<]*)</span>\\s*</p>').search(match[i])\n",
" if match1:\n",
" p_plot = match1.group(1)\n",
" else:\n",
" p_plot = ''\n",
" li = xbmcgui.ListItem(str(i + 1) + '.' + p_name1, iconImage = '', thumbnailImage = p_thumb)\n",
" li.setArt({ 'poster': p_thumb })\n",
" u = sys.argv[0]+\"?mode=\"+str(mode)+\"&name=\"+urllib.quote_plus(p_name)+\"&id=\"+urllib.quote_plus(p_id)+\"&thumb=\"+urllib.quote_plus(p_thumb)\n",
" li.setInfo(type = \"Video\", infoLabels = {\"Title\":p_name, \"Director\":p_director, \"Genre\":p_genre, \"Plot\":p_plot, \"Cast\":p_cast, \"Rating\":p_rating})\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, isdir, totalItems)\n",
"\n",
" if currpage > 1:\n",
" li = xbmcgui.ListItem('上一页')\n",
" u = sys.argv[0]+\"?mode=1&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&cat=\"+urllib.quote_plus(cat)+\"&area=\"+urllib.quote_plus(area)+\"&year=\"+urllib.quote_plus(year)+\"&order=\"+order+\"&page=\"+urllib.quote_plus(str(currpage-1))+\"&paytype=\"+paytype\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)\n",
" if currpage < totalpages:\n",
" li = xbmcgui.ListItem('下一页')\n",
" u = sys.argv[0]+\"?mode=1&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&cat=\"+urllib.quote_plus(cat)+\"&area=\"+urllib.quote_plus(area)+\"&year=\"+urllib.quote_plus(year)+\"&order=\"+order+\"&page=\"+urllib.quote_plus(str(currpage+1))+\"&paytype=\"+paytype\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)\n",
" xbmcplugin.setContent(int(sys.argv[1]), 'movies')\n",
" xbmcplugin.endOfDirectory(int(sys.argv[1]))\n",
"\n",
"def seriesList(name,id,thumb,page):\n",
" url = 'http://cache.video.qiyi.com/a/%s' % (id)\n",
" link = GetHttpData(url)\n",
" data = link[link.find('=')+1:]\n",
" json_response = simplejson.loads(data)\n",
" if json_response['data']['tvYear']:\n",
" p_year = int(json_response['data']['tvYear'])\n",
" else:\n",
" p_year = 0\n",
" p_director = ' / '.join(json_response['data']['directors']).encode('utf-8')\n",
" p_cast = [x.encode('utf-8') for x in json_response['data']['mainActors']]\n",
" p_plot = json_response['data']['tvDesc'].encode('utf-8')\n",
"\n",
" albumType = json_response['data']['albumType']\n",
" sourceId = json_response['data']['sourceId']\n",
" if albumType in (1, 6, 9, 12, 13) and sourceId<>0:\n",
" url = 'http://cache.video.qiyi.com/jp/sdvlst/%d/%d/?categoryId=%d&sourceId=%d' % (albumType, sourceId, albumType, sourceId)\n",
" link = GetHttpData(url)\n",
" data = link[link.find('=')+1:]\n",
" json_response = simplejson.loads(data)\n",
" totalItems = len(json_response['data'])\n",
" for item in json_response['data']:\n",
" tvId = str(item['tvId'])\n",
" videoId = item['vid'].encode('utf-8')\n",
" p_id = '%s,%s' % (tvId, videoId)\n",
" p_thumb = item['aPicUrl'].encode('utf-8')\n",
" p_name = item['videoName'].encode('utf-8')\n",
" p_name = '%s %s' % (p_name, item['tvYear'].encode('utf-8'))\n",
" li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)\n",
" li.setInfo(type = \"Video\", infoLabels = {\"Title\":p_name, \"Director\":p_director, \"Cast\":p_cast, \"Plot\":p_plot, \"Year\":p_year})\n",
" u = sys.argv[0] + \"?mode=3&name=\" + urllib.quote_plus(p_name) + \"&id=\" + urllib.quote_plus(p_id)+ \"&thumb=\" + urllib.quote_plus(p_thumb)\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)\n",
" else:\n",
" url = 'http://cache.video.qiyi.com/avlist/%s/%s/' % (id, page)\n",
" link = GetHttpData(url)\n",
" data = link[link.find('=')+1:]\n",
" json_response = simplejson.loads(data)\n",
" totalItems = len(json_response['data']['vlist']) + 1\n",
" totalpages = json_response['data']['pgt']\n",
" currpage = int(page)\n",
" if currpage > 1: totalItems = totalItems + 1\n",
" if currpage < totalpages: totalItems = totalItems + 1\n",
" li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)')\n",
" u = sys.argv[0]+\"?mode=99\"\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)\n",
"\n",
" for item in json_response['data']['vlist']:\n",
" tvId = str(item['id'])\n",
" videoId = item['vid'].encode('utf-8')\n",
" p_id = '%s,%s' % (tvId, videoId)\n",
" p_thumb = item['vpic'].encode('utf-8')\n",
" p_name = item['vn'].encode('utf-8')\n",
" if item['vt']:\n",
" p_name = '%s %s' % (p_name, item['vt'].encode('utf-8'))\n",
" li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)\n",
" li.setArt({ 'poster': thumb })\n",
" li.setInfo(type = \"Video\", infoLabels = {\"Title\":p_name, \"Director\":p_director, \"Cast\":p_cast, \"Plot\":p_plot, \"Year\":p_year})\n",
" u = sys.argv[0] + \"?mode=3&name=\" + urllib.quote_plus(p_name) + \"&id=\" + urllib.quote_plus(p_id)+ \"&thumb=\" + urllib.quote_plus(p_thumb)\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)\n",
"\n",
" if currpage > 1:\n",
" li = xbmcgui.ListItem('上一页')\n",
" u = sys.argv[0]+\"?mode=2&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&thumb=\"+urllib.quote_plus(thumb)+\"&page=\"+urllib.quote_plus(str(currpage-1))\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)\n",
" if currpage < totalpages:\n",
" li = xbmcgui.ListItem('下一页')\n",
" u = sys.argv[0]+\"?mode=2&name=\"+urllib.quote_plus(name)+\"&id=\"+urllib.quote_plus(id)+\"&thumb=\"+urllib.quote_plus(thumb)+\"&page=\"+urllib.quote_plus(str(currpage+1))\n",
" xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)\n",
" xbmcplugin.setContent(int(sys.argv[1]), 'episodes')\n",
" xbmcplugin.endOfDirectory(int(sys.argv[1]))\n",
"\n",
"def selResolution(items):\n",
" ratelist = []\n",
" for i in range(0,len(items)):\n",
" if items[i] == 96: ratelist.append([7, '极速', i]) # 清晰度设置值, 清晰度, match索引\n",
" if items[i] == 1: ratelist.append([6, '流畅', i])\n",
" if items[i] == 2: ratelist.append([5, '标清', i])\n",
" if items[i] == 3: ratelist.append([4, '超清', i])\n",
" if items[i] == 4 or items[i] == 17: ratelist.append([3, '720P', i])\n",
" if items[i] == 5 or items[i] == 18: ratelist.append([2, '1080P', i])\n",
" if items[i] == 10 or items[i] == 19: ratelist.append([1, '4K', i])\n",
" ratelist.sort()\n",
" if len(ratelist) > 1:\n",
" resolution = int(__addon__.getSetting('resolution'))\n",
" if resolution == 0: # 每次询问点播视频清晰度\n",
" dialog = xbmcgui.Dialog()\n",
" list = [x[1] for x in ratelist]\n",
" sel = dialog.select('清晰度(低网速请选择低清晰度)', list)\n",
" if sel == -1:\n",
" return -1\n",
" else:\n",
" sel = 0\n",
" while sel < len(ratelist)-1 and resolution > ratelist[sel][0]: sel = sel + 1\n",
" else:\n",
" sel = 0\n",
" return ratelist[sel][2]\n",
"\n",
"'''\n",
"def getVRSXORCode(arg1,arg2):\n",
" loc3=arg2 %3\n",
" if loc3 == 1:\n",
" return arg1^121\n",
" if loc3 == 2:\n",
" return arg1^72\n",
" return arg1^103\n",
"\n",
"def getVrsEncodeCode(vlink):\n",
" loc6=0\n",
" loc2=''\n",
" loc3=vlink.split(\"-\")\n",
" loc4=len(loc3)\n",
" # loc5=loc4-1\n",
" for i in range(loc4-1,-1,-1):\n",
" loc6=getVRSXORCode(int(loc3[loc4-i-1],16),i)\n",
" loc2+=chr(loc6)\n",
" return loc2[::-1]\n",
"\n",
"def mix(tvid):\n",
" salt = '4a1caba4b4465345366f28da7c117d20'\n",
" tm = str(randint(2000,4000))\n",
" src = 'eknas'\n",
" sc = hashlib.md5(salt + tm + tvid).hexdigest()\n",
" return tm,sc,src\n",
"\n",
"def getVMS(tvid,vid,uid):\n",
" #tm ->the flash run time for md5 usage\n",
" #um -> vip 1 normal 0\n",
" #authkey -> for password protected video ,replace '' with your password\n",
" #puid user.passportid may empty?\n",
" #TODO: support password protected video\n",
" tm,sc,src = mix(tvid)\n",
" vmsreq='http://cache.video.qiyi.com/vms?key=fvip&src=1702633101b340d8917a69cf8a4b8c7' +\\\n",
" \"&tvId=\"+tvid+\"&vid=\"+vid+\"&vinfo=1&tm=\"+tm+\\\n",
" \"&enc=\"+sc+\\\n",
" \"&qyid=\"+uid+\"&tn=\"+str(random()) +\"&um=1\" +\\\n",
" \"&authkey=\"+hashlib.md5(hashlib.md5(b'').hexdigest()+str(tm)+tvid).hexdigest()\n",
" return simplejson.loads(GetHttpData(vmsreq))\n",
"\n",
"def getDispathKey(rid):\n",
" tp=\")(*&^flash@#$%a\" #magic from swf\n",
" time=simplejson.loads(GetHttpData(\"http://data.video.qiyi.com/t?tn=\"+str(random())))[\"t\"]\n",
" t=str(int(floor(int(time)/(10*60.0))))\n",
" return hashlib.md5(t+tp+rid).hexdigest()\n",
"'''\n",
"\n",
"def getVMS(tvid, vid):\n",
" t = int(time.time() * 1000)\n",
" src = '76f90cbd92f94a2e925d83e8ccd22cb7'\n",
" key = 'd5fb4bd9d50c4be6948c97edd7254b0e'\n",
" sc = hashlib.md5(str(t) + key + vid).hexdigest()\n",
" vmsreq = 'http://cache.m.iqiyi.com/tmts/{0}/{1}/?t={2}&sc={3}&src={4}'.format(tvid,vid,t,sc,src)\n",
" print vmsreq\n",
" return simplejson.loads(GetHttpData(vmsreq))\n",
"\n",
"def PlayVideo(name,id,thumb):\n",
" id = id.split(',')\n",
" if len(id) == 1:\n",
" try:\n",
" if (\"http:\" in id[0]):\n",
" link = GetHttpData(id[0])\n",
" tvId = re.compile('data-player-tvid=\"(.+?)\"', re.DOTALL).findall(link)[0]\n",
" videoId = re.compile('data-player-videoid=\"(.+?)\"', re.DOTALL).findall(link)[0]\n",
" else:\n",
" url = 'http://cache.video.qiyi.com/avlist/%s/' % (id[0])\n",
" link = GetHttpData(url)\n",
" data = link[link.find('=')+1:]\n",
" json_response = simplejson.loads(data)\n",
" tvId = str(json_response['data']['vlist'][0]['id'])\n",
" videoId = json_response['data']['vlist'][0]['vid'].encode('utf-8')\n",
" except:\n",
" dialog = xbmcgui.Dialog()\n",
" ok = dialog.ok(__addonname__, '未能获取视频地址')\n",
" return\n",
" else:\n",
" tvId = id[0]\n",
" videoId = id[1]\n",
" \n",
" info = getVMS(tvId, videoId)\n",
" if info[\"code\"] != \"A00000\":\n",
" dialog = xbmcgui.Dialog()\n",
" ok = dialog.ok(__addonname__, '无法播放此视频')\n",
" return\n",
" \n",
" vs = info[\"data\"][\"vidl\"]\n",
" sel = selResolution([x['vd'] for x in vs])\n",
" if sel == -1:\n",
" return\n",
"\n",
" video_links = vs[sel][\"m3u\"]\n",
"\n",
" listitem = xbmcgui.ListItem(name,thumbnailImage=thumb)\n",
" listitem.setInfo(type=\"Video\",infoLabels={\"Title\":name})\n",
" xbmc.Player().play(video_links, listitem)\n",
"\n",
"def performChanges(name,id,listpage,cat,area,year,order,paytype):\n",
" change = False\n",
" catlist= getcatList(listpage, id, cat)\n",
" dialog = xbmcgui.Dialog()\n",
" if len(catlist)>0:\n",
" list = [x[1] for x in catlist]\n",
" sel = dialog.select('类型', list)\n",
" if sel != -1:\n",
" cat = catlist[sel][0]\n",
" change = True\n",
" if not (id in ('3','12','13')):\n",
" arealist = getareaList(listpage, id, area)\n",
" if len(arealist)>0:\n",
" list = [x[1] for x in arealist]\n",
" sel = dialog.select('地区', list)\n",
" if sel != -1:\n",
" area = arealist[sel][0]\n",
" change = True \n",
" if id in ('1','2'):\n",
" yearlist = getyearList(listpage, id, year)\n",
" if len(yearlist)>0:\n",
" list = [x[1] for x in yearlist]\n",
" sel = dialog.select('年份', list)\n",
" if sel != -1:\n",
" year = yearlist[sel][0]\n",
" change = True\n",
" list = [x[1] for x in ORDER_LIST]\n",
" sel = dialog.select('排序方式', list)\n",
" if sel != -1:\n",
" order = ORDER_LIST[sel][0]\n",
" change = True\n",
" if change:\n",
" progList(name,id,'1',cat,area,year,order,paytype)\n",
"\n",
"def get_params():\n",
" param = []\n",
" paramstring = sys.argv[2]\n",
" if len(paramstring) >= 2:\n",
" params = sys.argv[2]\n",
" cleanedparams = params.replace('?', '')\n",
" if (params[len(params) - 1] == '/'):\n",
" params = params[0:len(params) - 2]\n",
" pairsofparams = cleanedparams.split('&')\n",
" param = {}\n",
" for i in range(len(pairsofparams)):\n",
" splitparams = {}\n",
" splitparams = pairsofparams[i].split('=')\n",
" if (len(splitparams)) == 2:\n",
" param[splitparams[0]] = splitparams[1]\n",
" return param\n",
"\n",
"params = get_params()\n",
"mode = None\n",
"name = None\n",
"id = None\n",
"cat = ''\n",
"area = ''\n",
"year = ''\n",
"order = '3'\n",
"paytype = '0'\n",
"num = '1'\n",
"page = '1'\n",
"url = None\n",
"thumb = None\n",
"\n",
"try:\n",
" thumb = urllib.unquote_plus(params[\"thumb\"])\n",
"except:\n",
" pass\n",
"try:\n",
" url = urllib.unquote_plus(params[\"url\"])\n",
"except:\n",
" pass\n",
"try:\n",
" page = urllib.unquote_plus(params[\"page\"])\n",
"except:\n",
" pass\n",
"try:\n",
" num = urllib.unquote_plus(params[\"num\"])\n",
"except:\n",
" pass\n",
"try:\n",
" res = urllib.unquote_plus(params[\"paytype\"])\n",
"except:\n",
" pass\n",
"try:\n",
" order = urllib.unquote_plus(params[\"order\"])\n",
"except:\n",
" pass\n",
"try:\n",
" year = urllib.unquote_plus(params[\"year\"])\n",
"except:\n",
" pass\n",
"try:\n",
" area = urllib.unquote_plus(params[\"area\"])\n",
"except:\n",
" pass\n",
"try:\n",
" cat = urllib.unquote_plus(params[\"cat\"])\n",
"except:\n",
" pass\n",
"try:\n",
" id = urllib.unquote_plus(params[\"id\"])\n",
"except:\n",
" pass\n",
"try:\n",
" name = urllib.unquote_plus(params[\"name\"])\n",
"except:\n",
" pass\n",
"try:\n",
" mode = int(params[\"mode\"])\n",
"except:\n",
" pass\n",
"\n",
"if mode == None:\n",
" rootList()\n",
"elif mode == 1:\n",
" progList(name,id,page,cat,area,year,order,paytype)\n",
"elif mode == 2:\n",
" seriesList(name,id,thumb,page)\n",
"elif mode == 3:\n",
" PlayVideo(name,id,thumb)\n",
"elif mode == 4:\n",
" performChanges(name,id,page,cat,area,year,order,paytype)\n"
] | [
0,
0.02040816326530612,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.05,
0,
0.027777777777777776,
0.020833333333333332,
0,
0.0784313725490196,
0.04878048780487805,
0.056338028169014086,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0.01,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0.08333333333333333,
0.024390243902439025,
0,
0.03508771929824561,
0.046511627906976744,
0.0625,
0,
0,
0.015748031496062992,
0.047619047619047616,
0,
0,
0,
0,
0.012048192771084338,
0,
0.25,
0.05,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.07407407407407407,
0.029411764705882353,
0,
0,
0,
0,
0.02857142857142857,
0,
0.010638297872340425,
0.02857142857142857,
0.02197802197802198,
0.02702702702702703,
0.04854368932038835,
0,
0.04040404040404041,
0,
0.056074766355140186,
0,
0.031578947368421054,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0.041237113402061855,
0.02702702702702703,
0.03225806451612903,
0,
0.02247191011235955,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0.046296296296296294,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0.0034965034965034965,
0.046875,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0.05714285714285714,
0,
0,
0,
0.027777777777777776,
0,
0.05714285714285714,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0.02040816326530612,
0.017241379310344827,
0,
0.2,
0.05555555555555555,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.010638297872340425,
0.0035971223021582736,
0,
0.029411764705882353,
0,
0.028169014084507043,
0.022988505747126436,
0.014084507042253521,
0,
0,
0.02,
0.0625,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0.04054054054054054,
0,
0,
0,
0,
0.011764705882352941,
0,
0.010309278350515464,
0,
0,
0.012048192771084338,
0,
0.012345679012345678,
0,
0,
0.011764705882352941,
0,
0.010638297872340425,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0.05,
0.04878048780487805,
0.00684931506849315,
0.07096774193548387,
0,
0,
0,
0,
0.003703703703703704,
0,
0,
0,
0.003703703703703704,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05454545454545454,
0.007575757575757576,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05952380952380952,
0.07246376811594203,
0.013422818791946308,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05952380952380952,
0.046511627906976744,
0.07246376811594203,
0.013422818791946308,
0.011904761904761904,
0,
0,
0,
0.005681818181818182,
0.012048192771084338,
0,
0,
0.005681818181818182,
0.012048192771084338,
0,
0,
0,
0.038461538461538464,
0,
0.029411764705882353,
0.024096385542168676,
0.017857142857142856,
0.017857142857142856,
0.017857142857142856,
0.013157894736842105,
0.012987012987012988,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0.25,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0.018518518518518517,
0.04950495049504951,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0.011111111111111112,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.0625,
0,
0,
0,
0,
0.09090909090909091,
0.04,
0.5,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0.03278688524590164,
0,
0,
0.12121212121212122,
0,
0.023255813953488372,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0.03571428571428571,
0,
0,
0,
0,
0.02702702702702703,
0.041666666666666664,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1206896551724138,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0,
0.125,
0,
0,
0.058823529411764705,
0,
0,
0.12727272727272726,
0,
0.08571428571428572,
0,
0.06896551724137931,
0,
0.11475409836065574
] | 633 | 0.01261 | false |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## INFO:
### Import modules ...
from base import *
### Convert string True result ...
_sbool = lambda sval : True if sval == 'true' else False
### Addon class ...
class CAddon:
### Translate tag to languare string ...
tlraw = lambda self, tag : e(self.localize(tag))
### Addon info ...
@property
def addon (self) : return xbmcaddon.Addon(id=TAG_PAR_SCRIPT_ID)
@property
def profile (self) : return de(xbmc.translatePath(self.addon.getAddonInfo('profile')))
@property
def localize (self) : return self.addon.getLocalizedString
@property
def name (self) : return self.addon.getAddonInfo('name')
@property
def id (self) : return self.addon.getAddonInfo('id')
@property
def author (self) : return self.addon.getAddonInfo('author')
@property
def version (self) : return self.addon.getAddonInfo('version')
@property
def path (self) : return de(self.addon.getAddonInfo('path'))
@property
def icon (self) : return self.addon.getAddonInfo('icon')
### Settings ...
@property
def COLORIZE (self) : return _sbool(self.addon.getSetting('colorize'))
@property
def UPDAFTER (self) : return _sbool(self.addon.getSetting('updafter'))
@property
def ADDUPD (self) : return _sbool(self.addon.getSetting('addupd'))
@property
def BGUPD (self) : return _sbool(self.addon.getSetting('bgupd'))
@property
def LNKTIMEOUT (self) : return int(self.addon.getSetting('lnktimeout'))
@property
def MNUITMNUM (self) : return int(self.addon.getSetting('mnuitmnum'))
@property
def SETPAGE (self) : return int(self.addon.getSetting('setpage'))
@property
def CALLURL (self) : return _sbool(self.addon.getSetting('callurl'))
@property
def PLAYBCONT (self) : return _sbool(self.addon.getSetting('playbcont'))
@property
def POSUPD (self) : return int(self.addon.getSetting('posupd'))
@property
def POSSLEEP (self) : return int(self.addon.getSetting('possleep'))
@property
def WCHF (self) : return _sbool(self.addon.getSetting('wchf'))
@property
def WPERC (self) : return int(self.addon.getSetting('wperc'))
@property
def AUTORES (self) : return _sbool(self.addon.getSetting('autores'))
@property
def RESDLG (self) : return _sbool(self.addon.getSetting('resdlg'))
@property
def DETVIDEXT (self) : return _sbool(self.addon.getSetting('detvidext'))
@property
def WAITBSEEK (self) : return int(self.addon.getSetting('waitbseek'))
@property
def EODGENM (self) : return self.addon.getSetting('eodgenm')
@property
def movFolder (self) : return self.addon.getSetting('fldrmov')
@property
def tvsFolder (self) : return self.addon.getSetting('fldrtvs')
@property
def SILENTUPD (self) : return _sbool(self.addon.getSetting('silentupd'))
@property
def AUTOUPDSRC (self) : return _sbool(self.addon.getSetting('autoupdsrc'))
@property
def AUTOUPDALL (self) : return _sbool(self.addon.getSetting('autoupdall'))
@property
def NOREPAUTO (self) : return _sbool(self.addon.getSetting('norepauto'))
@property
def NOREPRAWAUTO (self) : return _sbool(self.addon.getSetting('noreprawauto'))
@property
def HIDEAUPD (self) : return _sbool(self.addon.getSetting('hideaupd'))
@property
def ALLOWSHADOW (self) : return _sbool(self.addon.getSetting('allowshadow'))
@property
def AUTIME (self) : return int(self.addon.getSetting('autime'))
@property
def STARTUPSHAD (self) : return _sbool(self.addon.getSetting('startupshad'))
@property
def COLOR (self) : return self.addon.getSetting('mnucolor')
@property
def libpath (self) : _libpath = self.addon.getSetting('libpath'); return _libpath if _libpath != TAG_PAR_SETDEF else self.profile
@property
def BKUPPATH (self) : _bkuppath = self.addon.getSetting('bkuppath'); return _bkuppath if _bkuppath != TAG_PAR_SETDEF else self.profile
@property
def BKUPREMOLD (self) : return _sbool(self.addon.getSetting('bkupremold'))
@property
def BKUPNUM (self) : return int(self.addon.getSetting('bkupnum'))
@property
def BKUPSTARTUP (self) : return _sbool(self.addon.getSetting('bkupstartup'))
@property
def BKUPAUTO (self) : return _sbool(self.addon.getSetting('bkupauto'))
@property
def BKUPTIME (self) : return int(self.addon.getSetting('bkuptime'))
@property
def HIDEBCKPRGS (self) : return _sbool(self.addon.getSetting('hidebckprgs'))
@property
def USESKINS (self) : return _sbool(self.addon.getSetting('useskins'))
@property
def DIMBCKG (self) : return _sbool(self.addon.getSetting('dimbckg'))
@property
def SKIN (self) : return self.addon.getSetting('skin')
#@property
#def NEWPLAYS (self) : return _sbool(self.addon.getSetting('newplays'))
@property
def PCORE (self) : return self.addon.getSetting('pcore')
@property
def PCOREVAL (self) : return int(self.addon.getSetting('pcoreval'))
@property
def DEDLPTIME (self) : return int(self.addon.getSetting('dedlptime'))
@property
def PBMETHOD (self) : return self.addon.getSetting('pbmethod')
@property
def USENOWPLAY (self) : return _sbool(self.addon.getSetting('usenowplay'))
@property
def NOWPLAYTIME (self) : return int(self.addon.getSetting('nowplaytime'))
@property
def FASTBCKP (self) : return _sbool(self.addon.getSetting('fastbckp'))
@property
def SAVEONREST (self) : return _sbool(self.addon.getSetting('saveonrest'))
@property
def ACSSTKN (self) : return _sbool(self.addon.getSetting('acsstkn'))
@property
def USESYNC (self) : return _sbool(self.addon.getSetting('usesync'))
@property
def STRTUPSYNC (self) : return _sbool(self.addon.getSetting('strtupsync'))
@property
def AUTOSYNC (self) : return _sbool(self.addon.getSetting('autosync'))
@property
def AUTOSYNCTIME (self) : return int(self.addon.getSetting('autosynctime'))
@property
def SYLIMITTIME (self) : return int(self.addon.getSetting('sylimittime'))
@property
def SYLIMITCOUNT (self) : return int(self.addon.getSetting('sylimitcount'))
@property
def USEWS (self) : return _sbool(self.addon.getSetting('usews'))
@property
def STRTUPWS (self) : return _sbool(self.addon.getSetting('strtupws'))
@property
def AUTOWS (self) : return _sbool(self.addon.getSetting('autows'))
@property
def AUTOWSTIME (self) : return int(self.addon.getSetting('autowstime'))
@property
def JSIVAL (self) : return int(self.addon.getSetting('jsival'))/100.0
@property
def USELFM (self) : return _sbool(self.addon.getSetting('uselfm'))
@property
def USESRV (self) : return _sbool(self.addon.getSetting('usesrv'))
@property
def SRVSTATUSV (self) : return _sbool(self.addon.getSetting('srvstatusv'))
@property
def SRVSTATUS (self) : return self.addon.getSetting('srvstatus')
def getlibpath(self): _libpath = self.addon.getSetting('libpath'); return _libpath if _libpath != TAG_PAR_SETDEF else self.profile
def getbckpath(self): _bkuppath = self.addon.getSetting('bkuppath'); return _bkuppath if _bkuppath != TAG_PAR_SETDEF else self.profile
def getlib(self) : return self.addon.getSetting('fldrmov'), self.addon.getSetting('fldrtvs')
def getshad(self) : return _sbool(self.addon.getSetting('allowshadow'))
def getsilent(self) : return _sbool(self.addon.getSetting('silentupd'))
def getautime(self) : return int(self.addon.getSetting('autime'))
def getcolor(self) : return self.addon.getSetting('mnucolor')
def getabck(self) : return _sbool(self.addon.getSetting('bkupauto'))
def getbcktime(self) : return int(self.addon.getSetting('bkuptime'))
def getsynctime(self): return int(self.addon.getSetting('autosynctime'))
def getautosync(self): return _sbool(self.addon.getSetting('autosync'))
def getsyncatkn(self): return _sbool(self.addon.getSetting('acsstkn'))
def getwstime(self) : return int(self.addon.getSetting('autowstime'))
def getautows(self) : return _sbool(self.addon.getSetting('autows'))
| [
"# -*- coding: utf-8 -*-\n",
"#\n",
"# Copyright (C) 2016 Taifxx\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"#\n",
"########## INFO:\n",
"\n",
"### Import modules ...\n",
"from base import *\n",
"\n",
"\n",
"### Convert string True result ...\n",
"_sbool = lambda sval : True if sval == 'true' else False\n",
"\n",
"\n",
"### Addon class ...\n",
"class CAddon:\n",
"\n",
" ### Translate tag to languare string ...\n",
" tlraw = lambda self, tag : e(self.localize(tag)) \n",
"\n",
" ### Addon info ...\n",
" @property\n",
" def addon (self) : return xbmcaddon.Addon(id=TAG_PAR_SCRIPT_ID)\n",
" @property\n",
" def profile (self) : return de(xbmc.translatePath(self.addon.getAddonInfo('profile')))\n",
" @property\n",
" def localize (self) : return self.addon.getLocalizedString\n",
" @property\n",
" def name (self) : return self.addon.getAddonInfo('name')\n",
" @property\n",
" def id (self) : return self.addon.getAddonInfo('id')\n",
" @property\n",
" def author (self) : return self.addon.getAddonInfo('author')\n",
" @property\n",
" def version (self) : return self.addon.getAddonInfo('version')\n",
" @property\n",
" def path (self) : return de(self.addon.getAddonInfo('path'))\n",
" @property\n",
" def icon (self) : return self.addon.getAddonInfo('icon')\n",
" \n",
" \n",
" ### Settings ...\n",
" @property\n",
" def COLORIZE (self) : return _sbool(self.addon.getSetting('colorize'))\n",
" @property\n",
" def UPDAFTER (self) : return _sbool(self.addon.getSetting('updafter'))\n",
" @property\n",
" def ADDUPD (self) : return _sbool(self.addon.getSetting('addupd'))\n",
" @property\n",
" def BGUPD (self) : return _sbool(self.addon.getSetting('bgupd'))\n",
" @property\n",
" def LNKTIMEOUT (self) : return int(self.addon.getSetting('lnktimeout'))\n",
" @property\n",
" def MNUITMNUM (self) : return int(self.addon.getSetting('mnuitmnum'))\n",
" @property\n",
" def SETPAGE (self) : return int(self.addon.getSetting('setpage'))\n",
" @property\n",
" def CALLURL (self) : return _sbool(self.addon.getSetting('callurl'))\n",
" @property\n",
" def PLAYBCONT (self) : return _sbool(self.addon.getSetting('playbcont')) \n",
" @property\n",
" def POSUPD (self) : return int(self.addon.getSetting('posupd')) \n",
" @property\n",
" def POSSLEEP (self) : return int(self.addon.getSetting('possleep')) \n",
" @property\n",
" def WCHF (self) : return _sbool(self.addon.getSetting('wchf')) \n",
" @property\n",
" def WPERC (self) : return int(self.addon.getSetting('wperc')) \n",
" @property\n",
" def AUTORES (self) : return _sbool(self.addon.getSetting('autores')) \n",
" @property\n",
" def RESDLG (self) : return _sbool(self.addon.getSetting('resdlg')) \n",
" @property\n",
" def DETVIDEXT (self) : return _sbool(self.addon.getSetting('detvidext'))\n",
" @property\n",
" def WAITBSEEK (self) : return int(self.addon.getSetting('waitbseek')) \n",
" @property\n",
" def EODGENM (self) : return self.addon.getSetting('eodgenm') \n",
" @property\n",
" def movFolder (self) : return self.addon.getSetting('fldrmov')\n",
" @property\n",
" def tvsFolder (self) : return self.addon.getSetting('fldrtvs')\n",
" @property\n",
" def SILENTUPD (self) : return _sbool(self.addon.getSetting('silentupd')) \n",
" @property\n",
" def AUTOUPDSRC (self) : return _sbool(self.addon.getSetting('autoupdsrc'))\n",
" @property\n",
" def AUTOUPDALL (self) : return _sbool(self.addon.getSetting('autoupdall'))\n",
" @property\n",
" def NOREPAUTO (self) : return _sbool(self.addon.getSetting('norepauto'))\n",
" @property\n",
" def NOREPRAWAUTO (self) : return _sbool(self.addon.getSetting('noreprawauto'))\n",
" @property\n",
" def HIDEAUPD (self) : return _sbool(self.addon.getSetting('hideaupd'))\n",
" @property\n",
" def ALLOWSHADOW (self) : return _sbool(self.addon.getSetting('allowshadow'))\n",
" @property\n",
" def AUTIME (self) : return int(self.addon.getSetting('autime'))\n",
" @property\n",
" def STARTUPSHAD (self) : return _sbool(self.addon.getSetting('startupshad'))\n",
" @property\n",
" def COLOR (self) : return self.addon.getSetting('mnucolor')\n",
" @property\n",
" def libpath (self) : _libpath = self.addon.getSetting('libpath'); return _libpath if _libpath != TAG_PAR_SETDEF else self.profile\n",
" @property\n",
" def BKUPPATH (self) : _bkuppath = self.addon.getSetting('bkuppath'); return _bkuppath if _bkuppath != TAG_PAR_SETDEF else self.profile\n",
" @property\n",
" def BKUPREMOLD (self) : return _sbool(self.addon.getSetting('bkupremold')) \n",
" @property\n",
" def BKUPNUM (self) : return int(self.addon.getSetting('bkupnum')) \n",
" @property\n",
" def BKUPSTARTUP (self) : return _sbool(self.addon.getSetting('bkupstartup'))\n",
" @property\n",
" def BKUPAUTO (self) : return _sbool(self.addon.getSetting('bkupauto')) \n",
" @property\n",
" def BKUPTIME (self) : return int(self.addon.getSetting('bkuptime')) \n",
" @property\n",
" def HIDEBCKPRGS (self) : return _sbool(self.addon.getSetting('hidebckprgs'))\n",
" @property\n",
" def USESKINS (self) : return _sbool(self.addon.getSetting('useskins'))\n",
" @property\n",
" def DIMBCKG (self) : return _sbool(self.addon.getSetting('dimbckg'))\n",
" @property\n",
" def SKIN (self) : return self.addon.getSetting('skin')\n",
" #@property\n",
" #def NEWPLAYS (self) : return _sbool(self.addon.getSetting('newplays'))\n",
" @property\n",
" def PCORE (self) : return self.addon.getSetting('pcore')\n",
" @property\n",
" def PCOREVAL (self) : return int(self.addon.getSetting('pcoreval'))\n",
" @property\n",
" def DEDLPTIME (self) : return int(self.addon.getSetting('dedlptime'))\n",
" @property\n",
" def PBMETHOD (self) : return self.addon.getSetting('pbmethod')\n",
" @property\n",
" def USENOWPLAY (self) : return _sbool(self.addon.getSetting('usenowplay'))\n",
" @property\n",
" def NOWPLAYTIME (self) : return int(self.addon.getSetting('nowplaytime'))\n",
" @property\n",
" def FASTBCKP (self) : return _sbool(self.addon.getSetting('fastbckp'))\n",
" @property\n",
" def SAVEONREST (self) : return _sbool(self.addon.getSetting('saveonrest'))\n",
" @property\n",
" def ACSSTKN (self) : return _sbool(self.addon.getSetting('acsstkn'))\n",
" @property\n",
" def USESYNC (self) : return _sbool(self.addon.getSetting('usesync'))\n",
" @property\n",
" def STRTUPSYNC (self) : return _sbool(self.addon.getSetting('strtupsync'))\n",
" @property\n",
" def AUTOSYNC (self) : return _sbool(self.addon.getSetting('autosync'))\n",
" @property\n",
" def AUTOSYNCTIME (self) : return int(self.addon.getSetting('autosynctime'))\n",
" @property\n",
" def SYLIMITTIME (self) : return int(self.addon.getSetting('sylimittime'))\n",
" @property\n",
" def SYLIMITCOUNT (self) : return int(self.addon.getSetting('sylimitcount'))\n",
" @property\n",
" def USEWS (self) : return _sbool(self.addon.getSetting('usews'))\n",
" @property \n",
" def STRTUPWS (self) : return _sbool(self.addon.getSetting('strtupws'))\n",
" @property \n",
" def AUTOWS (self) : return _sbool(self.addon.getSetting('autows'))\n",
" @property \n",
" def AUTOWSTIME (self) : return int(self.addon.getSetting('autowstime'))\n",
" @property \n",
" def JSIVAL (self) : return int(self.addon.getSetting('jsival'))/100.0\n",
" @property \n",
" def USELFM (self) : return _sbool(self.addon.getSetting('uselfm'))\n",
" @property \n",
" def USESRV (self) : return _sbool(self.addon.getSetting('usesrv'))\n",
" @property \n",
" def SRVSTATUSV (self) : return _sbool(self.addon.getSetting('srvstatusv'))\n",
" @property \n",
" def SRVSTATUS (self) : return self.addon.getSetting('srvstatus')\n",
" \n",
" \n",
" def getlibpath(self): _libpath = self.addon.getSetting('libpath'); return _libpath if _libpath != TAG_PAR_SETDEF else self.profile\n",
" def getbckpath(self): _bkuppath = self.addon.getSetting('bkuppath'); return _bkuppath if _bkuppath != TAG_PAR_SETDEF else self.profile\n",
" \n",
" def getlib(self) : return self.addon.getSetting('fldrmov'), self.addon.getSetting('fldrtvs')\n",
" def getshad(self) : return _sbool(self.addon.getSetting('allowshadow'))\n",
" def getsilent(self) : return _sbool(self.addon.getSetting('silentupd')) \n",
" def getautime(self) : return int(self.addon.getSetting('autime'))\n",
" def getcolor(self) : return self.addon.getSetting('mnucolor')\n",
" def getabck(self) : return _sbool(self.addon.getSetting('bkupauto'))\n",
" def getbcktime(self) : return int(self.addon.getSetting('bkuptime'))\n",
" def getsynctime(self): return int(self.addon.getSetting('autosynctime'))\n",
" def getautosync(self): return _sbool(self.addon.getSetting('autosync'))\n",
" def getsyncatkn(self): return _sbool(self.addon.getSetting('acsstkn'))\n",
" def getwstime(self) : return int(self.addon.getSetting('autowstime'))\n",
" def getautows(self) : return _sbool(self.addon.getSetting('autows')) \n",
" \n",
" "
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0.043478260869565216,
0,
0,
0,
0.02857142857142857,
0.03508771929824561,
0,
0,
0.05,
0,
0,
0.022222222222222223,
0.05357142857142857,
0,
0.043478260869565216,
0,
0.025974025974025976,
0,
0.030612244897959183,
0,
0.028985507246376812,
0,
0.028169014084507043,
0,
0.028985507246376812,
0,
0.0273972602739726,
0,
0.02702702702702703,
0,
0.02666666666666667,
0,
0.028169014084507043,
0.2,
0.2,
0.09523809523809523,
0.07142857142857142,
0.037037037037037035,
0,
0.037037037037037035,
0,
0.02531645569620253,
0,
0.02564102564102564,
0,
0.025,
0,
0.02531645569620253,
0,
0.025974025974025976,
0,
0.025,
0,
0.04395604395604396,
0,
0.0379746835443038,
0,
0.0379746835443038,
0,
0.0375,
0,
0.0379746835443038,
0,
0.036585365853658534,
0,
0.0375,
0,
0.036585365853658534,
0,
0.0375,
0,
0.04054054054054054,
0,
0.027777777777777776,
0,
0.027777777777777776,
0,
0.04819277108433735,
0,
0.03614457831325301,
0,
0.03614457831325301,
0,
0.036585365853658534,
0,
0.03529411764705882,
0,
0.037037037037037035,
0,
0.03571428571428571,
0,
0.02631578947368421,
0,
0.03571428571428571,
0,
0.0273972602739726,
0,
0.028368794326241134,
0,
0.027586206896551724,
0,
0.047619047619047616,
0,
0.037037037037037035,
0,
0.03571428571428571,
0,
0.047619047619047616,
0,
0.0375,
0,
0.03571428571428571,
0,
0.037037037037037035,
0,
0.025,
0,
0.028985507246376812,
0.06666666666666667,
0.024390243902439025,
0,
0.02857142857142857,
0,
0.02564102564102564,
0,
0.02531645569620253,
0,
0.0273972602739726,
0,
0.03614457831325301,
0,
0.037037037037037035,
0,
0.037037037037037035,
0,
0.03614457831325301,
0,
0.025,
0,
0.025,
0,
0.03614457831325301,
0,
0.037037037037037035,
0,
0.036585365853658534,
0,
0.037037037037037035,
0,
0.036585365853658534,
0,
0.02564102564102564,
0.045454545454545456,
0.037037037037037035,
0.045454545454545456,
0.02531645569620253,
0.045454545454545456,
0.025,
0.045454545454545456,
0.036585365853658534,
0.045454545454545456,
0.02531645569620253,
0.045454545454545456,
0.02531645569620253,
0.045454545454545456,
0.03614457831325301,
0.045454545454545456,
0.02702702702702703,
0.2,
0.2,
0.050359712230215826,
0.014388489208633094,
0.2,
0.019801980198019802,
0.012658227848101266,
0.02564102564102564,
0.014084507042253521,
0.014705882352941176,
0.013157894736842105,
0.0136986301369863,
0,
0,
0,
0.013333333333333334,
0.02631578947368421,
0.2,
0.5
] | 206 | 0.025658 | false |