org_text
stringlengths
830
329k
texts
sequence
scores
sequence
num_lines
int64
1
8.05k
avg_score
float64
0
0.27
check
bool
1 class
from django.http import HttpResponse from django.core.servers.basehttp import FileWrapper import csv, codecs, cStringIO from feedback2013.models import Subject, Student, Score, Feedback class UnicodeWriter: """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwds) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)() def writerow(self, row): self.writer.writerow([s.encode("utf-8") for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0) def writerows(self, rows): for row in rows: self.writerow(row) def write(self, data): self.stream.write(data) def simple_export_fb2013(request): response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=feedback2013.csv' csvwriter = UnicodeWriter(response) csvwriter.write(codecs.BOM_UTF8) csvwriter.writerow([u'中文姓名', u'英文姓名', u'就读高中', u'Email', u'来澳前(中国)所读学校', u'来澳前(中国)所学最高年级', u'来澳年份', u'最终ATAR成绩', u'录取大学(校区)与专业', u'意见建议', u'反馈发布时间', u'Unit3/4科目', u'原始分', u'加减分后', u'是否为2013年所学,或为2012提前已考?', u'备注',]) for item in Feedback.objects.all(): csvwriter.writerow([unicode(item.student.chinese_name), unicode(item.student.english_name), unicode(item.student.high_school), unicode(item.student.email), unicode(item.student.school_in_china), unicode(item.student.education_in_china), unicode(item.student.year_study_in_au), unicode(item.student.final_atar_score), unicode(item.student.uni_and_major), unicode(item.comment), unicode(item.created_date)]) for item2 in Score.objects.filter(student=item.student): csvwriter.writerow([unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(), unicode(item2.subject.name), unicode(item2.study_score), unicode(item2.scaled_score), unicode('Yes' if item2.for_2012_2011 else 'No'), unicode(item2.remark)]) return response
[ "from django.http import HttpResponse\n", "from django.core.servers.basehttp import FileWrapper\n", "import csv, codecs, cStringIO\n", "\n", "from feedback2013.models import Subject, Student, Score, Feedback\n", "\n", "\n", "class UnicodeWriter:\n", "\t\"\"\"\n", "\tA CSV writer which will write rows to CSV file \"f\",\n", "\twhich is encoded in the given encoding.\n", "\t\"\"\"\n", "\n", "\tdef __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n", "\t\t# Redirect output to a queue\n", "\t\tself.queue = cStringIO.StringIO()\n", "\t\tself.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n", "\t\tself.stream = f\n", "\t\tself.encoder = codecs.getincrementalencoder(encoding)()\n", "\n", "\tdef writerow(self, row):\n", "\t\tself.writer.writerow([s.encode(\"utf-8\") for s in row])\n", "\t\t# Fetch UTF-8 output from the queue ...\n", "\t\tdata = self.queue.getvalue()\n", "\t\tdata = data.decode(\"utf-8\")\n", "\t\t# ... and reencode it into the target encoding\n", "\t\tdata = self.encoder.encode(data)\n", "\t\t# write to the target stream\n", "\t\tself.stream.write(data)\n", "\t\t# empty queue\n", "\t\tself.queue.truncate(0)\n", "\n", "\tdef writerows(self, rows):\n", "\t\tfor row in rows:\n", "\t\t\tself.writerow(row)\n", "\t\n", "\tdef write(self, data):\n", "\t\tself.stream.write(data)\n", "\n", "def simple_export_fb2013(request):\n", "\tresponse = HttpResponse(mimetype='text/csv')\n", "\tresponse['Content-Disposition'] = 'attachment; filename=feedback2013.csv'\n", "\tcsvwriter = UnicodeWriter(response)\n", "\tcsvwriter.write(codecs.BOM_UTF8)\n", "\tcsvwriter.writerow([u'中文姓名', \n", "\t\t\t\t\t\t\t\tu'英文姓名', \n", "\t\t\t\t\t\t\t\tu'就读高中', \n", "\t\t\t\t\t\t\t\tu'Email', \n", "\t\t\t\t\t\t\t\tu'来澳前(中国)所读学校', \n", "\t\t\t\t\t\t\t\tu'来澳前(中国)所学最高年级', \n", "\t\t\t\t\t\t\t\tu'来澳年份', \n", "\t\t\t\t\t\t\t\tu'最终ATAR成绩', \n", "\t\t\t\t\t\t\t\tu'录取大学(校区)与专业', \n", "\t\t\t\t\t\t\t\tu'意见建议',\n", "\t\t\t\t\t\t\t\tu'反馈发布时间',\n", "\t\t\t\t\t\t\t\tu'Unit3/4科目', \n", "\t\t\t\t\t\t\t\tu'原始分', \n", "\t\t\t\t\t\t\t\tu'加减分后', \n", "\t\t\t\t\t\t\t\tu'是否为2013年所学,或为2012提前已考?', \n", "\t\t\t\t\t\t\t\tu'备注',])\n", "\tfor item in Feedback.objects.all():\n", "\t\tcsvwriter.writerow([unicode(item.student.chinese_name), \n", "\t\t\t\t\t\t\t\t\tunicode(item.student.english_name),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.high_school),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.email),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.school_in_china),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.education_in_china),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.year_study_in_au),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.final_atar_score),\n", "\t\t\t\t\t\t\t\t\tunicode(item.student.uni_and_major),\n", "\t\t\t\t\t\t\t\t\tunicode(item.comment),\n", "\t\t\t\t\t\t\t\t\tunicode(item.created_date)])\n", "\t\tfor item2 in Score.objects.filter(student=item.student):\n", "\t\t\tcsvwriter.writerow([unicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(),\n", "\t\t\t\t\t\t\t\t\t\tunicode(item2.subject.name),\n", "\t\t\t\t\t\t\t\t\t\tunicode(item2.study_score),\n", "\t\t\t\t\t\t\t\t\t\tunicode(item2.scaled_score),\n", "\t\t\t\t\t\t\t\t\t\tunicode('Yes' if item2.for_2012_2011 else 'No'),\n", "\t\t\t\t\t\t\t\t\t\tunicode(item2.remark)])\n", "\treturn response\t\t" ]
[ 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0.2, 0.018867924528301886, 0.024390243902439025, 0.2, 0, 0.014492753623188406, 0.03225806451612903, 0.027777777777777776, 0.015625, 0.05555555555555555, 0.017241379310344827, 0, 0.038461538461538464, 0.017543859649122806, 0.023809523809523808, 0.03225806451612903, 0.03333333333333333, 0.02040816326530612, 0.02857142857142857, 0.03225806451612903, 0.038461538461538464, 0.0625, 0.04, 0, 0.03571428571428571, 0.05263157894736842, 0.045454545454545456, 1, 0.041666666666666664, 0.038461538461538464, 0, 0.02857142857142857, 0.021739130434782608, 0.013333333333333334, 0.02702702702702703, 0.029411764705882353, 0.06451612903225806, 0.16666666666666666, 0.16666666666666666, 0.15789473684210525, 0.12, 0.1111111111111111, 0.16666666666666666, 0.13636363636363635, 0.12, 0.11764705882352941, 0.10526315789473684, 0.13043478260869565, 0.17647058823529413, 0.16666666666666666, 0.08333333333333333, 0.17647058823529413, 0.02702702702702703, 0.03389830508474576, 0.044444444444444446, 0.022727272727272728, 0.02631578947368421, 0.020833333333333332, 0.0196078431372549, 0.02040816326530612, 0.02040816326530612, 0.021739130434782608, 0.03125, 0.02631578947368421, 0.01694915254237288, 0.029411764705882353, 0.09523809523809523, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.047619047619047616, 0.02564102564102564, 0.02631578947368421, 0.02564102564102564, 0.01694915254237288, 0.029411764705882353, 0.16666666666666666 ]
90
0.063146
false
#FLM: Component Dump # Version 2.0 # # Will look through a font and write out a text file that lists any glyph with a # component(s), one glyph per line of the file. On each line, the script writes # the glyph name, the width of the glyph, and then each component name and x, y # offset for that compnent. These values are all semicolon seperated. # # Examples: # Agrave;587.0;A;0;0;grave;70;0 # Aringacute;587.0;A;0;0;ring;155;139;acute;155;312 # # This script was originally written in 2006 for John Hudson at Tiro Typeworks # # Version 2.0: Tested to work in RoboFont, license changed from GPL to MIT, and # put on Github. # # --------------------- # The MIT License (MIT) # # Copyright (c) 2015 Typefounding # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #Imports from robofab.world import CurrentFont from robofab.interface.all.dialogs import PutFile, Message, ProgressBar #Script font = CurrentFont() filePath = PutFile() file = open(filePath, 'w') tickCount = len(font) bar = ProgressBar('Writing dump file', tickCount) tick = 0 outList = [] for glyph in font: bar.tick(tick) tick = tick+1 if len(glyph.components) != 0: output = glyph.name + ';' + str(glyph.width) componentNumber = 0 while componentNumber < len(glyph.components): x, y = glyph.components[componentNumber].offset output = output + ';' + glyph.components[componentNumber].baseGlyph + ';' + str(x) + ';' + str(y) componentNumber = componentNumber + 1 output = output + '\n' outList.append((glyph.index, output)) outDictionary = dict(outList) outKeys = outDictionary.keys() outKeys.sort() keyCount = 0 while keyCount < len(outKeys): file.write(outDictionary[outKeys[keyCount]]) keyCount = keyCount + 1 bar.close() file.close() Message('Dump file written')
[ "#FLM: Component Dump\n", "# Version 2.0\n", "#\n", "# Will look through a font and write out a text file that lists any glyph with a\n", "# component(s), one glyph per line of the file. On each line, the script writes\n", "# the glyph name, the width of the glyph, and then each component name and x, y\n", "# offset for that compnent. These values are all semicolon seperated.\n", "#\n", "# Examples:\n", "# Agrave;587.0;A;0;0;grave;70;0\n", "# Aringacute;587.0;A;0;0;ring;155;139;acute;155;312\n", "#\n", "# This script was originally written in 2006 for John Hudson at Tiro Typeworks\n", "#\n", "# Version 2.0: Tested to work in RoboFont, license changed from GPL to MIT, and\n", "# put on Github.\n", "#\n", "# ---------------------\n", "# The MIT License (MIT)\n", "# \n", "# Copyright (c) 2015 Typefounding\n", "# \n", "# Permission is hereby granted, free of charge, to any person obtaining a copy\n", "# of this software and associated documentation files (the \"Software\"), to deal\n", "# in the Software without restriction, including without limitation the rights\n", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n", "# copies of the Software, and to permit persons to whom the Software is\n", "# furnished to do so, subject to the following conditions:\n", "# \n", "# The above copyright notice and this permission notice shall be included in all\n", "# copies or substantial portions of the Software.\n", "# \n", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n", "# SOFTWARE.\n", "\n", "\n", "#Imports\n", "from robofab.world import CurrentFont\n", "from robofab.interface.all.dialogs import PutFile, Message, ProgressBar\n", "\n", "#Script\n", "font = CurrentFont()\n", "filePath = PutFile()\n", "file = open(filePath, 'w')\n", "tickCount = len(font)\n", "bar = ProgressBar('Writing dump file', tickCount)\n", "tick = 0\n", "outList = []\n", "for glyph in font:\n", "\tbar.tick(tick)\n", "\ttick = tick+1\n", "\tif len(glyph.components) != 0:\n", "\t\toutput = glyph.name + ';' + str(glyph.width)\n", "\t\tcomponentNumber = 0\n", "\t\twhile componentNumber < len(glyph.components):\n", "\t\t\tx, y = glyph.components[componentNumber].offset\n", "\t\t\toutput = output + ';' + glyph.components[componentNumber].baseGlyph + ';' + str(x) + ';' + str(y)\n", "\t\t\tcomponentNumber = componentNumber + 1\n", "\t\toutput = output + '\\n'\n", "\t\toutList.append((glyph.index, output))\n", "outDictionary = dict(outList)\n", "outKeys = outDictionary.keys()\n", "outKeys.sort()\n", "keyCount = 0\n", "while keyCount < len(outKeys):\n", "\tfile.write(outDictionary[outKeys[keyCount]])\n", "\tkeyCount = keyCount + 1\n", "\n", "bar.close()\n", "file.close()\n", "Message('Dump file written')\n" ]
[ 0.047619047619047616, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3333333333333333, 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0.3333333333333333, 0.012345679012345678, 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0.125, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.06666666666666667, 0.03125, 0.02127659574468085, 0.045454545454545456, 0.02040816326530612, 0.0196078431372549, 0.019801980198019802, 0.024390243902439025, 0.04, 0.025, 0, 0, 0, 0, 0, 0.021739130434782608, 0.04, 0, 0, 0, 0 ]
76
0.027366
false
# -*- coding: utf-8 -*- # ProjectEuler/src/python/problem109.py # # Darts # ===== # Published on Friday, 18th November 2005, 06:00 pm # # In the game of darts a player throws three darts at a target board which is # split into twenty equal sized sections numbered one to twenty. The score # of a dart is determined by the number of the region that the dart lands in. A # dart landing outside the red/green outer ring scores zero. The black and # cream regions inside this ring represent single scores. However, the # red/green outer ring and middle ring score double and treble scores # respectively. At the centre of the board are two concentric circles called # the bull region, or bulls-eye. The outer bull is worth 25 points and the # inner bull is a double, worth 50 points. There are many variations of rules # but in the most popular game the players will begin with a score 301 or 501 # and the first player to reduce their running total to zero is a winner. # However, it is normal to play a "doubles out" system, which means that the # player must land a double (including the double bulls-eye at the centre of # the board) on their final dart to win; any other dart that would reduce their # running total to one or lower means the score for that set of three darts is # "bust". When a player is able to finish on their current score it is called # a "checkout" and the highest checkout is 170: T20 T20 D25 (two treble 20s and # double bull). There are exactly eleven distinct ways to checkout on a score # of 6: D3     D1 D2   S2 D2   D2 D1   S4 D1   S1 S1 D2 S1 T1 D1 # S1 S3 D1 D1 D1 D1 D1 S2 D1 S2 S2 D1 Note that D1 D2 is considered # different to D2 D1 as they finish on different doubles. However, the # combination S1 T1 D1 is considered the same as T1 S1 D1. In addition we shall # not include misses in considering combinations; for example, D3 is the same # as 0 D3 and 0 0 D3. Incredibly there are 42336 distinct ways of checking out # in total. How many distinct ways can a player checkout with a score less than # 100? import projecteuler as pe def main(): pass if __name__ == "__main__": main()
[ "# -*- coding: utf-8 -*-\n", "# ProjectEuler/src/python/problem109.py\n", "#\n", "# Darts\n", "# =====\n", "# Published on Friday, 18th November 2005, 06:00 pm\n", "#\n", "# In the game of darts a player throws three darts at a target board which is\n", "# split into twenty equal sized sections numbered one to twenty. The score\n", "# of a dart is determined by the number of the region that the dart lands in. A\n", "# dart landing outside the red/green outer ring scores zero. The black and\n", "# cream regions inside this ring represent single scores. However, the\n", "# red/green outer ring and middle ring score double and treble scores\n", "# respectively. At the centre of the board are two concentric circles called\n", "# the bull region, or bulls-eye. The outer bull is worth 25 points and the\n", "# inner bull is a double, worth 50 points. There are many variations of rules\n", "# but in the most popular game the players will begin with a score 301 or 501\n", "# and the first player to reduce their running total to zero is a winner.\n", "# However, it is normal to play a \"doubles out\" system, which means that the\n", "# player must land a double (including the double bulls-eye at the centre of\n", "# the board) on their final dart to win; any other dart that would reduce their\n", "# running total to one or lower means the score for that set of three darts is\n", "# \"bust\". When a player is able to finish on their current score it is called\n", "# a \"checkout\" and the highest checkout is 170: T20 T20 D25 (two treble 20s and\n", "# double bull). There are exactly eleven distinct ways to checkout on a score\n", "# of 6: D3     D1 D2   S2 D2   D2 D1   S4 D1   S1 S1 D2 S1 T1 D1\n", "# S1 S3 D1 D1 D1 D1 D1 S2 D1 S2 S2 D1 Note that D1 D2 is considered\n", "# different to D2 D1 as they finish on different doubles. However, the\n", "# combination S1 T1 D1 is considered the same as T1 S1 D1. In addition we shall\n", "# not include misses in considering combinations; for example, D3 is the same\n", "# as 0 D3 and 0 0 D3. Incredibly there are 42336 distinct ways of checking out\n", "# in total. How many distinct ways can a player checkout with a score less than\n", "# 100?\n", "\n", "import projecteuler as pe\n", "\n", "def main():\n", " pass\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0.037037037037037035, 0 ]
41
0.002936
false
import cv2 from PyQt5.QtWidgets import QErrorMessage #from socket import socket #from pickle import loads from Socket import Socket class CannotReadFrame(BaseException) : pass class StreamReader : def __init__(self) : self._server = None self._capturedDevice = None pass def connect(self, addr = '127.0.0.1', port = 4242) : try: self._server = Socket() statusCode = self._server.connect_ex((addr, port)) if statusCode != 0 : raise statusCode except : self.connectLocalCamera() def connectLocalCamera(self) : self.close() qem = QErrorMessage() qem.showMessage('Не удаётся подключиться к Raspberry Pi: Будет подключена локальная камера') qem.exec() self._capturedDevice = cv2.VideoCapture(0) def releseLocalCamera(self) : self._capturedDevice.relese() self._capturedDevice = None def __del__(self) : self.close() def getFrame(self) : if self._server is not None : try: return self._getFrameFromRemoteCamera() except: self.connectLocalCamera() if self._capturedDevice is not None : try: return self._getFrameFromLocalCamera() except: raise CannotReadFrame raise CannotReadFrame def _getFrameFromRemoteCamera(self) : self._server.sendObject('get_frame') frame = self._server.recvObject() return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) def _getFrameFromLocalCamera(self) : retVal, frame = self._capturedDevice.read() if retVal == False : raise CannotReadFrame return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) def readable(self) : #заглушка return True def recvall(self, sock, size) : binary = sock.recv(size) diff = size - len(binary) while diff : buf = sock.recv(diff) diff = diff - len(buf) binary = binary + buf return binary def close(self) : if self._capturedDevice is not None : self._capturedDevice.release() self._capturedDevice = None if self._server is not None : try: #self._server.sendObject('close_conection') self._server.sendObject('exit') self._server.close() except: pass finally: self._server = None
[ "import cv2\n", "from PyQt5.QtWidgets import QErrorMessage\n", "#from socket import socket\n", "#from pickle import loads\n", "from Socket import Socket\n", "\n", "class CannotReadFrame(BaseException) :\n", " pass\n", "\n", "class StreamReader :\n", " def __init__(self) :\n", " self._server = None\n", " self._capturedDevice = None\n", " pass\n", "\n", " def connect(self, addr = '127.0.0.1', port = 4242) :\n", " try:\n", " self._server = Socket()\n", " statusCode = self._server.connect_ex((addr, port))\n", " if statusCode != 0 :\n", " raise statusCode\n", " except :\n", " self.connectLocalCamera()\n", "\n", " def connectLocalCamera(self) :\n", " self.close()\n", " qem = QErrorMessage()\n", " qem.showMessage('Не удаётся подключиться к Raspberry Pi: Будет подключена локальная камера')\n", " qem.exec()\n", " self._capturedDevice = cv2.VideoCapture(0) \n", "\n", " def releseLocalCamera(self) :\n", " self._capturedDevice.relese()\n", " self._capturedDevice = None\n", "\n", " def __del__(self) :\n", " self.close()\n", "\n", " def getFrame(self) :\n", " if self._server is not None :\n", " try: \n", " return self._getFrameFromRemoteCamera() \n", " except: \n", " self.connectLocalCamera()\n", "\n", " if self._capturedDevice is not None :\n", " try: \n", " return self._getFrameFromLocalCamera() \n", " except: \n", " raise CannotReadFrame\n", " raise CannotReadFrame\n", "\n", "\n", " def _getFrameFromRemoteCamera(self) :\n", " self._server.sendObject('get_frame')\n", " frame = self._server.recvObject()\n", " return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", "\n", " def _getFrameFromLocalCamera(self) :\n", " retVal, frame = self._capturedDevice.read()\n", " if retVal == False :\n", " raise CannotReadFrame\n", " return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n", "\n", " def readable(self) :\n", " #заглушка\n", " return True\n", "\n", " def recvall(self, sock, size) :\n", " binary = sock.recv(size)\n", " diff = size - len(binary)\n", " while diff :\n", " buf = sock.recv(diff)\n", " diff = diff - len(buf)\n", " binary = binary + buf\n", " return binary\n", "\n", " def close(self) :\n", " if self._capturedDevice is not None :\n", " self._capturedDevice.release()\n", " self._capturedDevice = None\n", " if self._server is not None :\n", " try:\n", " #self._server.sendObject('close_conection')\n", " self._server.sendObject('exit')\n", " self._server.close()\n", " except:\n", " pass\n", " finally:\n", " self._server = None\n", "\n", "\n", "\n" ]
[ 0, 0, 0.037037037037037035, 0.038461538461538464, 0, 0, 0.05128205128205128, 0, 0, 0.09523809523809523, 0.04, 0, 0, 0, 0, 0.08771929824561403, 0, 0, 0, 0.030303030303030304, 0, 0.11764705882352941, 0, 0, 0.02857142857142857, 0, 0, 0.009900990099009901, 0, 0.01818181818181818, 0, 0.029411764705882353, 0, 0, 0, 0.041666666666666664, 0, 0, 0.04, 0.02631578947368421, 0.05555555555555555, 0.017543859649122806, 0.09523809523809523, 0, 0, 0.021739130434782608, 0.05555555555555555, 0.017857142857142856, 0.09523809523809523, 0, 0, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0.024390243902439025, 0, 0.06896551724137931, 0, 0, 0, 0.04, 0.05555555555555555, 0, 0, 0.027777777777777776, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0, 0.045454545454545456, 0.021739130434782608, 0, 0, 0.02631578947368421, 0, 0.016666666666666666, 0, 0, 0.05, 0, 0, 0, 0, 0, 1 ]
93
0.027124
false
#!/usr/bin/env python # coding: UTF-8 print('Hello World!') #如果try中有异常发生时,将执行异常的归属,执行except。 #异常层层比较,看是否是exception1, exception2...,直到找到其归属,执行相应的except中的语句。 #如果except后面没有任何参数,那么表示所有的exception都交给这段程序处理。 re1 = iter(range(5)) try: #当循环进行到第6次的时候,re.next()不会再返回元素,而是抛出(raise)StopIteration的异常。 for i in range(100): print( re1.__next__()) except StopIteration: print ('here is end ',i) except TypeError: print("TypeError") except: print("UnHandled Error") print( '=======with exception handle==') def func(): try: re = iter(range(5)) for i in range(20): print(re.__next__()) except ZeroDivisionError: print("ZeroDivisionError") #如果无法将异常交给合适的对象,异常将继续向上层抛出,直到被捕捉或者造成主程序报错。 #如果try中没有异常,那么except部分将跳过,执行else中的语句。 #finally是无论是否有异常,最后都要做的一些事情。 try: print ('----------------------Call func') func() except StopIteration: print ('func StopIteration') except NameError: print ('func NameError') except: print("func UnHandled Error") else: print ("func No exception") finally: print( 'func running out') #raise exception def func_ex(): print( '----------------------Begin func_ex' ) raise StopIteration() print( 'Finished func_ex' ) try: func_ex() except StopIteration: print ('func_ex StopIteration') try: 1/0 except ZeroDivisionError as reason: print( '========', reason) #自定义异常 class ShortInputException(Exception): '''A user -defined exception class''' def __init__(self, length, atleast): Exception.__init__(self) self.length=length self.atleast=atleast #import traceback try: s=input('Enter soemthing --> ') if len(s) < 3: raise ShortInputException(len(s),3) except ShortInputException as ex: #print(traceback.format_exc()) print("==ShortInputException : the input was of length %d, was excepting at least %d ." % (ex.length, ex.atleast)) else: print( 'Done') # return 是用于函数返回的,不能打断程序执行,所以try中不能用return。 def func_break(x): try: print('-----------Begin func_try_return') return x+3 #return x finally: print('-----------finally func_try_return') return ++x #在Python中++x不会改变x的值x++根本就是错的。。。 #无论如何都会执行finally, try的return没有用。 print (func_break(11)) #在中断点并不会退出,而是继续执行finally后,才退出。 import sys def func_break1(x): try: print('-----------Begin func_try_return') sys.exit() finally: print('-----------finally func_try_return') return ++x #11 print (func_break1(11)) input('Please enter a code to quit:')
[ "#!/usr/bin/env python\n", "# coding: UTF-8\n", "\n", "print('Hello World!')\n", "\n", "#如果try中有异常发生时,将执行异常的归属,执行except。\n", "#异常层层比较,看是否是exception1, exception2...,直到找到其归属,执行相应的except中的语句。\n", "#如果except后面没有任何参数,那么表示所有的exception都交给这段程序处理。\n", "re1 = iter(range(5))\n", "try:\n", " #当循环进行到第6次的时候,re.next()不会再返回元素,而是抛出(raise)StopIteration的异常。\n", " for i in range(100):\n", " print( re1.__next__())\n", "except StopIteration:\n", " print ('here is end ',i)\n", "except TypeError:\n", " print(\"TypeError\")\n", "except:\n", " print(\"UnHandled Error\") \n", "print( '=======with exception handle==')\n", "\n", "\n", "def func():\n", " try:\n", " re = iter(range(5))\n", " for i in range(20):\n", " print(re.__next__()) \n", " except ZeroDivisionError:\n", " print(\"ZeroDivisionError\")\n", "#如果无法将异常交给合适的对象,异常将继续向上层抛出,直到被捕捉或者造成主程序报错。\n", "#如果try中没有异常,那么except部分将跳过,执行else中的语句。\n", "#finally是无论是否有异常,最后都要做的一些事情。\n", "try:\n", " print ('----------------------Call func')\n", " func()\n", "except StopIteration:\n", " print ('func StopIteration')\n", "except NameError:\n", " print ('func NameError')\n", "except:\n", " print(\"func UnHandled Error\") \n", "else:\n", " print (\"func No exception\")\n", "finally:\n", " print( 'func running out')\n", "\n", " \n", "#raise exception\n", "def func_ex():\n", " print( '----------------------Begin func_ex' )\n", " raise StopIteration()\n", " print( 'Finished func_ex' )\n", " \n", "try:\n", " func_ex()\n", "except StopIteration:\n", " print ('func_ex StopIteration')\n", "\n", " \n", "try:\n", " 1/0 \n", "except ZeroDivisionError as reason:\n", " print( '========', reason)\n", " \n", "#自定义异常\n", "class ShortInputException(Exception):\n", " '''A user -defined exception class'''\n", " def __init__(self, length, atleast):\n", " Exception.__init__(self)\n", " self.length=length\n", " self.atleast=atleast\n", "\n", "#import traceback\n", "try:\n", " s=input('Enter soemthing --> ')\n", " if len(s) < 3:\n", " raise ShortInputException(len(s),3)\n", "except ShortInputException as ex:\n", " #print(traceback.format_exc())\n", " print(\"==ShortInputException : the input was of length %d, was excepting at least %d .\" % (ex.length, ex.atleast))\n", "else:\n", " print( 'Done')\n", " \n", " \n", "# return 是用于函数返回的,不能打断程序执行,所以try中不能用return。\n", "def func_break(x):\n", " try:\n", " print('-----------Begin func_try_return')\n", " return x+3\n", " #return x\n", " finally:\n", " print('-----------finally func_try_return')\n", " return ++x #在Python中++x不会改变x的值x++根本就是错的。。。\n", " \n", "#无论如何都会执行finally, try的return没有用。\n", "print (func_break(11))\n", "\n", "#在中断点并不会退出,而是继续执行finally后,才退出。\n", "import sys\n", "def func_break1(x):\n", " try:\n", " print('-----------Begin func_try_return')\n", " sys.exit()\n", " finally:\n", " print('-----------finally func_try_return')\n", " return ++x #11\n", "print (func_break1(11))\n", "\n", "input('Please enter a code to quit:')\n", "\n", "\n", "\n", " \n", " " ]
[ 0, 0, 0, 0, 0, 0.030303030303030304, 0.015873015873015872, 0.022222222222222223, 0, 0, 0.015625, 0, 0.03225806451612903, 0, 0.06896551724137931, 0, 0, 0.125, 0.03125, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0.05555555555555555, 0.023255813953488372, 0.02631578947368421, 0.034482758620689655, 0.2, 0.021739130434782608, 0, 0.043478260869565216, 0.02857142857142857, 0.05263157894736842, 0.03225806451612903, 0.125, 0.02564102564102564, 0, 0.029411764705882353, 0, 0.030303030303030304, 0, 0.2, 0.058823529411764705, 0, 0.0392156862745098, 0, 0.0625, 0.2, 0.2, 0, 0.043478260869565216, 0.027777777777777776, 0, 0.2, 0, 0.1111111111111111, 0, 0.03225806451612903, 0.2, 0.14285714285714285, 0.02631578947368421, 0, 0, 0, 0.037037037037037035, 0.034482758620689655, 0, 0.05555555555555555, 0.2, 0.027777777777777776, 0, 0.022727272727272728, 0, 0.02857142857142857, 0.008333333333333333, 0, 0.05263157894736842, 0.2, 0.2, 0, 0, 0, 0, 0.05, 0.05555555555555555, 0, 0, 0.0392156862745098, 0.25, 0.030303030303030304, 0.08695652173913043, 0, 0.03225806451612903, 0.09090909090909091, 0.05, 0, 0, 0, 0, 0, 0.041666666666666664, 0.08333333333333333, 0, 0, 0, 0, 0, 0.5, 3 ]
114
0.068821
false
# -*- encoding: utf-8 -*- ############################################################################## # # OrgFileExporter, a python module exporter for exporting WikidPad files to # orgfiles. # Copyright (c) 2012 Josep Mones Teixidor # All rights reserved. # # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the <ORGANIZATION> nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ############################################################################## """ OrgFileExporter.py https://github.com/jmones/wikidpad_orgfile_exporter DESCRIPTION WikidPad exporter to emacs org-mode files (http://orgmode.org). FEATURES This exporter lacks a lot of features. It's just a quick hack to export some data from WikidPad. Feel free to improved. Current supported features: * Exporting data to a unique file, each word in a node. * It uses WikidPad parser classes to get WikidPad data. * It uses PyOrgMode library to generate org files. * It set ups links from wiki words in pages to actual nodes (inserting CUSTOM_ID properties). * It processes bold and italics. * It processes tables (only simple ones). * It processes horizontal rules. * It processes unordered and ordered lists. However this features known to be missing: * Does not support footnotes. * Does not support insertion. * Does not support roman lists. * Does not support alpha lists. * Does not support wikidpad anchors in text. * Only strategy available to layout nodes is "one word, one node". * Doesn't have a clever way to solve presence of headings in words. REQUIREMENTS * WikidPad version >= 2.2. * PyOrgMode (included). INSTALLATION 1. If user_extensions/ folder in WikidPad installation doesn't exist, create it as a sibling of extensions/ 2. Copy OrgFileExporter.py to user_extensions/ 3. Copy PyOrgMode.py to user_extensions/ USAGE 1. Select Extra/Export 2. Select "Org mode file" in "Export to:" dropdown. 3. Select destination file (it will create a single file). 4. Adjust all other settings as desired. 5. Press OK. AUTHOR Josep Mones Teixidor < jmones at gmail dot com > """ from pwiki.StringOps import * from pwiki.Exporters import AbstractExporter from pwiki.WikiPyparsing import SyntaxNode import PyOrgMode import string import re from copy import copy WIKIDPAD_PLUGIN = (("Exporters", 1),) LOG = False def describeExportersV01(mainControl): return (OrgFileExporter,) class OrgFileExporter(AbstractExporter): """ Exports org mode files from WikidPad. """ def __init__(self, mainControl): AbstractExporter.__init__(self, mainControl) self.wordList = None self.exportDest = None self.currentContent = [] self.currentLine = "" self.currentIndent = 2 self.currentWord = "" self.niceTitles = {} self.listItems = [] @staticmethod def getExportTypes(mainControl, continuousExport=False): """ Return sequence of tuples with the description of export types provided by this object. A tuple has the form (<exp. type>, <human readable description>) All exporters must provide this as a static method (which can be called without constructing an object first. mainControl -- PersonalWikiFrame object continuousExport -- If True, only types with support for continuous export are listed. """ if continuousExport: # Continuous export not supported return () return ( (u"org_mode", _(u'Org mode file')), ) def getAddOptPanelsForTypes(self, guiparent, exportTypes): """ Construct all necessary GUI panels for additional options for the types contained in exportTypes. Returns sequence of tuples (<exp. type>, <panel for add. options or None>) The panels should use guiparent as parent. If the same panel is used for multiple export types the function can and should include all export types for this panel even if some of them weren't requested. Panel objects must not be shared by different exporter classes. """ if not u"org_mode" in exportTypes: return () return ( (u"org_mode", None), ) def getExportDestinationWildcards(self, exportType): """ If an export type is intended to go to a file, this function returns a (possibly empty) sequence of tuples (wildcard description, wildcard filepattern). If an export type goes to a directory, None is returned """ if exportType == u"org_mode": return ((_(u"Org mode file (*.org)"), "*.org"),) return None def getAddOptVersion(self): """ Returns the version of the additional options information returned by getAddOpt(). If the return value is -1, the version info can't be stored between application sessions. Otherwise, the addopt information can be stored between sessions and can later handled back to the export method of the object without previously showing the export dialog. """ return -1 def getAddOpt(self, addoptpanel): """ Reads additional options from panel addoptpanel. If getAddOptVersion() > -1, the return value must be a sequence of simple string and/or numeric objects. Otherwise, any object can be returned (normally the addoptpanel itself) """ return (1,) def setAddOpt(self, addOpt, addoptpanel): """ Shows content of addOpt in the addoptpanel (must not be None). This function is only called if getAddOptVersion() != -1. """ pass def flushLine(self, force=False): if force or len(self.currentLine) > 0: line = (" "*self.currentIndent) + self.currentLine + "\n" self.currentContent.append(line.encode("utf-8")) self.currentLine = "" def shouldExport(self, wikiWord, wikiPage=None): if not wikiPage: try: wikiPage = self.wikiDocument.getWikiPage(wikiWord) except WikiWordNotFoundException: return False return strToBool(wikiPage.getAttributes().get("export", ("True",))[-1]) def getLinkForWikiWord(self, word, default = None): relUnAlias = self.wikiDocument.getWikiPageNameForLinkTerm(word) if relUnAlias is None: return default if not self.shouldExport(word): return default return relUnAlias def processWikiWord(self, astNodeOrWord, fullContent=None): if isinstance(astNodeOrWord, SyntaxNode): wikiWord = astNodeOrWord.wikiWord titleNode = astNodeOrWord.titleNode else: wikiWord = astNodeOrWord titleNode = None if titleNode == None: title = self.niceTitles.get(wikiWord, None) link = self.getLinkForWikiWord(wikiWord) if link: if titleNode is not None: self.currentLine += u"[[#%s][" % link self.processAst(fullContent, titleNode) self.currentLine += u"]]" else: if title is None: self.currentLine += u"[[#%s]]" % (link) else: self.currentLine += u"[[#%s][%s]]" % (link, title) else: if titleNode is not None: self.processAst(fullContent, titleNode) else: if isinstance(astNodeOrWord, SyntaxNode): self.currentLine += astNodeOrWord.getString() else: self.currentLine += astNodeOrWord def processUrlLink(self, fullContent, astNode): link = astNode.url self.currentLine += u"[[%s][" % link if astNode.titleNode is not None: self.processAst(fullContent, astNode.titleNode) else: self.currentLine += astNode.coreNode.getString() self.currentLine += "]]" def processTable(self, content, astNode): """ Write out content of a table as HTML code. astNode -- node of type "table" """ self.flushLine() table = PyOrgMode.OrgTable.Element() table.content = [] for row in astNode.iterFlatByName("tableRow"): orgRow = [] for cell in row.iterFlatByName("tableCell"): orgRow.append(cell.getString().encode("utf-8")) table.content.append(orgRow) self.currentContent.append(table) def processAst(self, content, pageAst): """ Actual token to org-mode converter. May be called recursively. """ for node in pageAst.iterFlatNamed(): tname = node.name # self.currentLine += "{" + tname + "}" if tname is None: continue elif tname == "plainText": if self.removePlainText: # This it the text for the first title in a wikiword, # we use it as a nice title pass else: self.currentLine += node.getString() elif tname == "lineBreak": self.flushLine(True) elif tname == "newParagraph": self.flushLine() self.flushLine(True) elif tname == "whitespace": self.currentLine += " " elif tname == "indentedText": self.flushLine() self.currentIndent += 2 self.processAst(content, node) elif tname == "orderedList": self.flushLine() self.processAst(content, node) self.flushLine() elif tname == "unorderedList": self.flushLine() self.listItems.append(0) self.processAst(content, node) self.listItems.pop() self.flushLine() elif tname == "romanList": self.flushLine() print "[ERROR: romanList is not implemented]" self.processAst(content, node) self.flushLine() elif tname == "alphaList": self.flushLine() print "[ERROR: alphaList is not implemented]" self.processAst(content, node) self.flushLine() elif tname == "bullet": self.currentLine += "- "; elif tname == "number": self.listItems[-1] += 1 self.currentLine += "%d. " % self.listItems[-1]; elif tname == "roman": print "[ERROR: roman is not implemented]" elif tname == "alpha": print "[ERROR: alpha is not implemented]" elif tname == "italics": self.currentLine += "/" self.processAst(content, node) self.currentLine += "/" elif tname == "bold": self.currentLine += "*" self.processAst(content, node) self.currentLine += "*" elif tname == "htmlTag" or tname == "htmlEntity": self.currentLine += node.getString() elif tname == "heading": # we ignore the heading, it doesn't fit very well in the # exporting model we are using (every wikiword is a node) self.flushLine() # we use the first heading as a friendly title for the node if self.itemsProcessed == 0: self.removePlainText = True self.processAst(content, node.contentNode) self.removePlainText = False else: self.processAst(content, node.contentNode) elif tname == "horizontalLine": self.flushLine() self.currentLine += "-----" self.flushLine() elif tname == "preBlock": self.flushLine() self.currentLine += "#+BEGIN_EXAMPLE" self.flushLine() for line in string.split(node.findFlatByName("preText").getString(), "\n"): self.currentLine += line self.flushLine() self.currentLine += "#+END_EXAMPLE" elif tname == "todoEntry": # we should create nodes but it's difficult to fit in current "each wiki word is a node scheme" self.flushLine() self.currentLine += "TODO: %s%s" % (node.key, node.delimiter) self.processAst(content, node.valueNode) self.flushLine() elif tname == "script": pass # Hide scripts elif tname == "noExport": pass # Hide no export areas elif tname == "anchorDef": self.currentLine += u"[ERROR: We can't process anchors]" elif tname == "wikiWord": self.processWikiWord(node, content) elif tname == "table": self.processTable(content, node) elif tname == "footnote": self.flushLine() self.currentLine += u"[ERROR: We can't process footnotes]" self.flushLine() elif tname == "urlLink": self.processUrlLink(content, node) elif tname == "stringEnd": pass else: self.flushLine() self.currentLine += u'[Unknown parser node with name "%s" found]' % tname self.flushLine() self.itemsProcessed += 1 # if we have a line to flush do it now self.flushLine() def updateNiceTitle(self, content, word, pageAst): """ This gets Nice title """ item = pageAst.iterFlatNamed().next() if item.name == 'heading': item = item.contentNode.iterFlatNamed().next() if item.name == 'plainText': self.niceTitles[word] = item.getString() def export(self, wikiDocument, wordList, exportType, exportDest, compatFilenames, addopt, progressHandler): """ Run export operation. wikiDocument -- WikiDocument object wordList -- Sequence of wiki words to export exportType -- string tag to identify how to export exportDest -- Path to destination directory or file to export to compatFilenames -- Should the filenames be encoded to be lowest level compatible addopt -- additional options returned by getAddOpt() """ self.wikiDocument = wikiDocument self.wordList = wordList self.exportDest = exportDest try: org = PyOrgMode.OrgDataStructure() # capture nice titles for word in self.wordList: wikiPage = self.wikiDocument.getWikiPage(word) word = wikiPage.getWikiWord() content = wikiPage.getLiveText() basePageAst = wikiPage.getLivePageAst() # set default setting self.niceTitles[word] = word self.updateNiceTitle(content, word, basePageAst) for word in self.wordList: wikiPage = self.wikiDocument.getWikiPage(word) word = wikiPage.getWikiWord() formatDetails = wikiPage.getFormatDetails() content = wikiPage.getLiveText() basePageAst = wikiPage.getLivePageAst() self.currentContent = [] self.currentWord = word self.currentLine = "" self.itemsProcessed = 0 self.removePlainText = False self.currentIndent = 2 self.listItems = [] self.processAst(content, basePageAst) node = PyOrgMode.OrgNode.Element() node.level = 1 node.heading = self.niceTitles[word].encode("utf-8") drawer = PyOrgMode.OrgDrawer.Element("PROPERTIES") customId = ":CUSTOM_ID: " + word drawer.content.append(customId.encode("utf-8")) node.content.append(drawer) node.content.extend(self.currentContent) org.root.append_clean(node) org.save_to_file(self.exportDest) except: traceback.print_exc()
[ "# -*- encoding: utf-8 -*-\n", "##############################################################################\n", "#\n", "# OrgFileExporter, a python module exporter for exporting WikidPad files to\n", "# orgfiles.\n", "# Copyright (c) 2012 Josep Mones Teixidor\n", "# All rights reserved.\n", "# \n", "# \n", "# Redistribution and use in source and binary forms, with or without modification,\n", "# are permitted provided that the following conditions are met:\n", "# \n", "# * Redistributions of source code must retain the above copyright notice,\n", "# this list of conditions and the following disclaimer.\n", "# * Redistributions in binary form must reproduce the above copyright notice,\n", "# this list of conditions and the following disclaimer in the documentation\n", "# and/or other materials provided with the distribution.\n", "# * Neither the name of the <ORGANIZATION> nor the names of its contributors\n", "# may be used to endorse or promote products derived from this software\n", "# without specific prior written permission.\n", "# \n", "# \n", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n", "# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n", "# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n", "# POSSIBILITY OF SUCH DAMAGE.\n", "# \n", "##############################################################################\n", "\n", "\"\"\"\n", "\tOrgFileExporter.py\n", " https://github.com/jmones/wikidpad_orgfile_exporter\n", "\n", " DESCRIPTION\n", " WikidPad exporter to emacs org-mode files (http://orgmode.org).\n", " \n", " FEATURES\n", " This exporter lacks a lot of features. It's just a quick hack to export some data\n", " from WikidPad. Feel free to improved. Current supported features:\n", " * Exporting data to a unique file, each word in a node.\n", " * It uses WikidPad parser classes to get WikidPad data.\n", " * It uses PyOrgMode library to generate org files.\n", " * It set ups links from wiki words in pages to actual nodes (inserting CUSTOM_ID properties).\n", " * It processes bold and italics.\n", " * It processes tables (only simple ones).\n", " * It processes horizontal rules.\n", " * It processes unordered and ordered lists.\n", " \n", " However this features known to be missing:\n", " * Does not support footnotes.\n", " * Does not support insertion.\n", " * Does not support roman lists.\n", " * Does not support alpha lists.\n", " * Does not support wikidpad anchors in text.\n", " * Only strategy available to layout nodes is \"one word, one node\".\n", " * Doesn't have a clever way to solve presence of headings in words.\n", " \n", " \n", " REQUIREMENTS\n", " * WikidPad version >= 2.2.\n", " * PyOrgMode (included).\n", " \n", "\n", " INSTALLATION\n", " 1. If user_extensions/ folder in WikidPad installation doesn't exist, create it as a sibling of extensions/\n", " 2. Copy OrgFileExporter.py to user_extensions/\n", " 3. Copy PyOrgMode.py to user_extensions/\n", " \n", " USAGE\n", " 1. Select Extra/Export\n", " 2. Select \"Org mode file\" in \"Export to:\" dropdown.\n", " 3. Select destination file (it will create a single file).\n", " 4. Adjust all other settings as desired.\n", " 5. Press OK.\n", " \n", " AUTHOR\n", " Josep Mones Teixidor < jmones at gmail dot com >\n", "\"\"\"\n", "\n", "from pwiki.StringOps import *\n", "from pwiki.Exporters import AbstractExporter\n", "from pwiki.WikiPyparsing import SyntaxNode\n", "import PyOrgMode\n", "import string\n", "import re\n", "from copy import copy\n", "\n", "WIKIDPAD_PLUGIN = ((\"Exporters\", 1),)\n", "LOG = False\n", "\n", "\n", "def describeExportersV01(mainControl):\n", " return (OrgFileExporter,)\n", "\n", "class OrgFileExporter(AbstractExporter):\n", " \"\"\"\n", " Exports org mode files from WikidPad.\n", " \"\"\"\n", " def __init__(self, mainControl):\n", " AbstractExporter.__init__(self, mainControl)\n", " self.wordList = None\n", " self.exportDest = None\n", " self.currentContent = []\n", " self.currentLine = \"\"\n", " self.currentIndent = 2\n", " self.currentWord = \"\"\n", " self.niceTitles = {}\n", " self.listItems = []\n", "\n", " @staticmethod\n", " def getExportTypes(mainControl, continuousExport=False):\n", " \"\"\"\n", " Return sequence of tuples with the description of export types provided\n", " by this object. A tuple has the form (<exp. type>,\n", " <human readable description>)\n", " All exporters must provide this as a static method (which can be called\n", " without constructing an object first.\n", "\n", " mainControl -- PersonalWikiFrame object\n", " continuousExport -- If True, only types with support for continuous export\n", " are listed.\n", " \"\"\"\n", " if continuousExport:\n", " # Continuous export not supported\n", " return ()\n", " return (\n", " (u\"org_mode\", _(u'Org mode file')),\n", " )\n", "\n", " def getAddOptPanelsForTypes(self, guiparent, exportTypes):\n", " \"\"\"\n", " Construct all necessary GUI panels for additional options\n", " for the types contained in exportTypes.\n", " Returns sequence of tuples (<exp. type>, <panel for add. options or None>)\n", "\n", " The panels should use guiparent as parent.\n", " If the same panel is used for multiple export types the function can\n", " and should include all export types for this panel even if some of\n", " them weren't requested. Panel objects must not be shared by different\n", " exporter classes.\n", " \"\"\"\n", " if not u\"org_mode\" in exportTypes:\n", " return ()\n", "\n", " return (\n", " (u\"org_mode\", None),\n", " )\n", "\n", "\n", "\n", " def getExportDestinationWildcards(self, exportType):\n", " \"\"\"\n", " If an export type is intended to go to a file, this function\n", " returns a (possibly empty) sequence of tuples\n", " (wildcard description, wildcard filepattern).\n", " \n", " If an export type goes to a directory, None is returned\n", " \"\"\"\n", " if exportType == u\"org_mode\":\n", " return ((_(u\"Org mode file (*.org)\"), \"*.org\"),) \n", " return None\n", "\n", " def getAddOptVersion(self):\n", " \"\"\"\n", " Returns the version of the additional options information returned\n", " by getAddOpt(). If the return value is -1, the version info can't\n", " be stored between application sessions.\n", " \n", " Otherwise, the addopt information can be stored between sessions\n", " and can later handled back to the export method of the object\n", " without previously showing the export dialog.\n", " \"\"\"\n", " return -1\n", "\n", "\n", " def getAddOpt(self, addoptpanel):\n", " \"\"\"\n", " Reads additional options from panel addoptpanel.\n", " If getAddOptVersion() > -1, the return value must be a sequence\n", " of simple string and/or numeric objects. Otherwise, any object\n", " can be returned (normally the addoptpanel itself)\n", " \"\"\"\n", " return (1,)\n", "\n", "\n", " def setAddOpt(self, addOpt, addoptpanel):\n", " \"\"\"\n", " Shows content of addOpt in the addoptpanel (must not be None).\n", " This function is only called if getAddOptVersion() != -1.\n", " \"\"\"\n", " pass\n", " \n", " def flushLine(self, force=False):\n", " if force or len(self.currentLine) > 0:\n", " line = (\" \"*self.currentIndent) + self.currentLine + \"\\n\"\n", " self.currentContent.append(line.encode(\"utf-8\"))\n", " self.currentLine = \"\"\n", " \n", "\n", " def shouldExport(self, wikiWord, wikiPage=None):\n", " if not wikiPage:\n", " try:\n", " wikiPage = self.wikiDocument.getWikiPage(wikiWord)\n", " except WikiWordNotFoundException:\n", " return False\n", "\n", " return strToBool(wikiPage.getAttributes().get(\"export\", (\"True\",))[-1])\n", "\n", " def getLinkForWikiWord(self, word, default = None):\n", " relUnAlias = self.wikiDocument.getWikiPageNameForLinkTerm(word)\n", " if relUnAlias is None:\n", " return default\n", " if not self.shouldExport(word):\n", " return default\n", " \n", " return relUnAlias\n", "\n", " def processWikiWord(self, astNodeOrWord, fullContent=None):\n", " if isinstance(astNodeOrWord, SyntaxNode):\n", " wikiWord = astNodeOrWord.wikiWord\n", " titleNode = astNodeOrWord.titleNode\n", " else:\n", " wikiWord = astNodeOrWord\n", " titleNode = None\n", " \n", " if titleNode == None:\n", " title = self.niceTitles.get(wikiWord, None)\n", " \n", "\n", " link = self.getLinkForWikiWord(wikiWord)\n", "\n", " if link:\n", " if titleNode is not None:\n", " self.currentLine += u\"[[#%s][\" % link\n", " self.processAst(fullContent, titleNode)\n", " self.currentLine += u\"]]\"\n", " else:\n", " if title is None: \n", " self.currentLine += u\"[[#%s]]\" % (link)\n", " else:\n", " self.currentLine += u\"[[#%s][%s]]\" % (link, title)\n", " else:\n", " if titleNode is not None:\n", " self.processAst(fullContent, titleNode)\n", " else:\n", " if isinstance(astNodeOrWord, SyntaxNode):\n", " self.currentLine += astNodeOrWord.getString()\n", " else:\n", " self.currentLine += astNodeOrWord\n", "\n", " def processUrlLink(self, fullContent, astNode):\n", " link = astNode.url\n", " self.currentLine += u\"[[%s][\" % link\n", " if astNode.titleNode is not None:\n", " self.processAst(fullContent, astNode.titleNode)\n", " else:\n", " self.currentLine += astNode.coreNode.getString()\n", " self.currentLine += \"]]\"\n", "\n", "\n", " def processTable(self, content, astNode):\n", " \"\"\"\n", " Write out content of a table as HTML code.\n", " \n", " astNode -- node of type \"table\"\n", " \"\"\"\n", " self.flushLine()\n", " table = PyOrgMode.OrgTable.Element()\n", " table.content = []\n", " \n", " for row in astNode.iterFlatByName(\"tableRow\"):\n", " orgRow = []\n", " for cell in row.iterFlatByName(\"tableCell\"):\n", " orgRow.append(cell.getString().encode(\"utf-8\"))\n", " table.content.append(orgRow)\n", " self.currentContent.append(table)\n", " \n", "\n", " def processAst(self, content, pageAst):\n", " \"\"\"\n", " Actual token to org-mode converter. May be called recursively.\n", " \"\"\"\n", " for node in pageAst.iterFlatNamed():\n", " tname = node.name\n", "\n", " # self.currentLine += \"{\" + tname + \"}\"\n", " \n", " if tname is None:\n", " continue \n", " elif tname == \"plainText\":\n", " if self.removePlainText:\n", " # This it the text for the first title in a wikiword,\n", " # we use it as a nice title\n", " pass\n", " else:\n", " self.currentLine += node.getString()\n", " elif tname == \"lineBreak\":\n", " self.flushLine(True)\n", " elif tname == \"newParagraph\":\n", " self.flushLine()\n", " self.flushLine(True)\n", " elif tname == \"whitespace\":\n", " self.currentLine += \" \"\n", " elif tname == \"indentedText\":\n", " self.flushLine()\n", " self.currentIndent += 2\n", " self.processAst(content, node)\n", " elif tname == \"orderedList\":\n", " self.flushLine()\n", " self.processAst(content, node)\n", " self.flushLine()\n", " elif tname == \"unorderedList\":\n", " self.flushLine()\n", " self.listItems.append(0)\n", " self.processAst(content, node)\n", " self.listItems.pop()\n", " self.flushLine()\n", " elif tname == \"romanList\":\n", " self.flushLine()\n", " print \"[ERROR: romanList is not implemented]\"\n", " self.processAst(content, node)\n", " self.flushLine()\n", " elif tname == \"alphaList\":\n", " self.flushLine()\n", " print \"[ERROR: alphaList is not implemented]\"\n", " self.processAst(content, node)\n", " self.flushLine()\n", " elif tname == \"bullet\":\n", " self.currentLine += \"- \";\n", " elif tname == \"number\":\n", " self.listItems[-1] += 1\n", " self.currentLine += \"%d. \" % self.listItems[-1];\n", " elif tname == \"roman\":\n", " print \"[ERROR: roman is not implemented]\"\n", " elif tname == \"alpha\":\n", " print \"[ERROR: alpha is not implemented]\"\n", " elif tname == \"italics\":\n", " self.currentLine += \"/\"\n", " self.processAst(content, node)\n", " self.currentLine += \"/\"\n", " elif tname == \"bold\":\n", " self.currentLine += \"*\"\n", " self.processAst(content, node)\n", " self.currentLine += \"*\"\n", " \n", " elif tname == \"htmlTag\" or tname == \"htmlEntity\":\n", " self.currentLine += node.getString()\n", "\n", " elif tname == \"heading\":\n", " # we ignore the heading, it doesn't fit very well in the\n", " # exporting model we are using (every wikiword is a node)\n", " self.flushLine()\n", " \n", " # we use the first heading as a friendly title for the node\n", " if self.itemsProcessed == 0:\n", " self.removePlainText = True\n", " self.processAst(content, node.contentNode)\n", " self.removePlainText = False\n", " else:\n", " self.processAst(content, node.contentNode)\n", "\n", " elif tname == \"horizontalLine\":\n", " self.flushLine()\n", " self.currentLine += \"-----\"\n", " self.flushLine()\n", "\n", " elif tname == \"preBlock\":\n", " self.flushLine()\n", " self.currentLine += \"#+BEGIN_EXAMPLE\"\n", " self.flushLine()\n", " for line in string.split(node.findFlatByName(\"preText\").getString(), \"\\n\"):\n", " self.currentLine += line\n", " self.flushLine()\n", " self.currentLine += \"#+END_EXAMPLE\"\n", "\n", " elif tname == \"todoEntry\":\n", " # we should create nodes but it's difficult to fit in current \"each wiki word is a node scheme\"\n", " self.flushLine()\n", " self.currentLine += \"TODO: %s%s\" % (node.key, node.delimiter)\n", " self.processAst(content, node.valueNode)\n", " self.flushLine()\n", " elif tname == \"script\":\n", " pass # Hide scripts\n", " elif tname == \"noExport\":\n", " pass # Hide no export areas\n", " elif tname == \"anchorDef\":\n", " self.currentLine += u\"[ERROR: We can't process anchors]\"\n", " elif tname == \"wikiWord\":\n", " self.processWikiWord(node, content)\n", " elif tname == \"table\":\n", " self.processTable(content, node)\n", " elif tname == \"footnote\":\n", " self.flushLine()\n", " self.currentLine += u\"[ERROR: We can't process footnotes]\"\n", " self.flushLine()\n", " elif tname == \"urlLink\":\n", " self.processUrlLink(content, node)\n", " elif tname == \"stringEnd\":\n", " pass\n", " else:\n", " self.flushLine()\n", " self.currentLine += u'[Unknown parser node with name \"%s\" found]' % tname\n", " self.flushLine()\n", " \n", " self.itemsProcessed += 1\n", " \n", "\n", " # if we have a line to flush do it now\n", " self.flushLine()\n", " \n", " def updateNiceTitle(self, content, word, pageAst):\n", " \"\"\"\n", " This gets Nice title\n", " \"\"\"\n", " item = pageAst.iterFlatNamed().next()\n", " if item.name == 'heading': \n", " item = item.contentNode.iterFlatNamed().next()\n", " if item.name == 'plainText':\n", " self.niceTitles[word] = item.getString()\n", " \n", " \n", " def export(self, wikiDocument, wordList, exportType, exportDest,\n", " compatFilenames, addopt, progressHandler):\n", " \"\"\"\n", " Run export operation.\n", " \n", " wikiDocument -- WikiDocument object\n", " wordList -- Sequence of wiki words to export\n", " exportType -- string tag to identify how to export\n", " exportDest -- Path to destination directory or file to export to\n", " compatFilenames -- Should the filenames be encoded to be lowest\n", " level compatible\n", " addopt -- additional options returned by getAddOpt()\n", " \"\"\"\n", " self.wikiDocument = wikiDocument\n", " self.wordList = wordList\n", " self.exportDest = exportDest\n", " \n", " try:\n", " org = PyOrgMode.OrgDataStructure()\n", "\n", " # capture nice titles\n", " for word in self.wordList:\n", " wikiPage = self.wikiDocument.getWikiPage(word)\n", "\n", " word = wikiPage.getWikiWord()\n", " content = wikiPage.getLiveText()\n", " basePageAst = wikiPage.getLivePageAst()\n", " # set default setting\n", " self.niceTitles[word] = word\n", " self.updateNiceTitle(content, word, basePageAst)\n", "\n", " for word in self.wordList:\n", " wikiPage = self.wikiDocument.getWikiPage(word)\n", "\n", " word = wikiPage.getWikiWord()\n", " formatDetails = wikiPage.getFormatDetails()\n", " content = wikiPage.getLiveText()\n", " basePageAst = wikiPage.getLivePageAst()\n", " \n", " self.currentContent = []\n", " self.currentWord = word\n", " self.currentLine = \"\"\n", " self.itemsProcessed = 0\n", " self.removePlainText = False\n", " self.currentIndent = 2\n", " self.listItems = []\n", " self.processAst(content, basePageAst)\n", " \n", "\n", " node = PyOrgMode.OrgNode.Element()\n", " node.level = 1\n", " node.heading = self.niceTitles[word].encode(\"utf-8\")\n", " \n", " drawer = PyOrgMode.OrgDrawer.Element(\"PROPERTIES\")\n", " customId = \":CUSTOM_ID: \" + word\n", " drawer.content.append(customId.encode(\"utf-8\"))\n", " node.content.append(drawer)\n", " node.content.extend(self.currentContent)\n", "\n", " org.root.append_clean(node)\n", " org.save_to_file(self.exportDest) \n", " except:\n", " traceback.print_exc()\n", " \n", "\n", "\n", "\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0.16666666666666666, 0.011627906976744186, 0, 0.16666666666666666, 0.012195121951219513, 0, 0.011764705882352941, 0.011764705882352941, 0, 0.011904761904761904, 0.012345679012345678, 0, 0.16666666666666666, 0.16666666666666666, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0.05, 0.017857142857142856, 0, 0, 0, 0.2, 0, 0.011627906976744186, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.2, 0, 0, 0, 0.2, 0, 0, 0.008928571428571428, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.03333333333333333, 0, 0.07692307692307693, 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0.02857142857142857, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0.022727272727272728, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0.02702702702702703, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0.015384615384615385, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0.008928571428571428, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0.058823529411764705, 0, 0.07692307692307693, 0, 0.02127659574468085, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0.02127659574468085, 0, 0, 0, 0.058823529411764705, 0.1111111111111111, 0.014492753623188406, 0.01818181818181818, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0.0196078431372549, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517, 0.0625, 0, 0.07692307692307693, 0, 0, 0, 1 ]
496
0.013093
false
# Copyright (C) 2009 Tim Gaggstatter <Tim.Gaggstatter AT gmx DOT net> # Copyright (C) 2010 Eduardo Robles Elvira <edulix AT gmail DOT com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db import models from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from djangoratings.fields import RatingField from django.db.models import signals from user.models import Profile from tbmessages.utils import new_transfer_email class Area(models.Model): name = models.CharField(_("Area"), max_length=40) def __unicode__(self): return self.name class Meta: ordering = ["name"] class Category(models.Model): name = models.CharField(_(u"Category"), max_length=45) def __unicode__(self): return self.name class Meta: ordering = ["name"] verbose_name = _(u"Category") verbose_name_plural = _(u"Categories") OFFER_CHOICES = ( (True, _('offer')), (False, _('demand')) ) class Service(models.Model): creator = models.ForeignKey(Profile, related_name="services", verbose_name=_("Creator")) is_offer = models.BooleanField(_("Service type"), choices=OFFER_CHOICES, default=True) pub_date = models.DateTimeField(_(u"Publish date"), auto_now=True, auto_now_add=True) is_active = models.BooleanField(default=True) description = models.TextField(_(u"Description"), max_length=400) category = models.ForeignKey(Category, verbose_name=_('Category')) area = models.ForeignKey(Area, null=True, blank=True, verbose_name=_("Area")) def __unicode__(self): if self.is_offer: msj = _("offered") else: msj = _("demanded") msj = unicode(msj) return "%d: '%s' %s from %s" % (self.id, self.short_name(), msj, self.creator) def short_name(self): if len(self.description) < 53: return self.description return "%s..." % self.description[:50] def transfers_count(self): return self.transfers.count() def sorted_transfers(self): return self.transfers.order_by('-request_date') def messages_count(self): from tbmessages.models import Message return Message.objects.filter(service=self).count() def messages(self): from tbmessages.models import Message return Message.objects.filter(service=self) def credits_transfered(self): ret = self.transfers.filter(status='d').aggregate(models.Sum('credits')) return ret['credits__sum'] and ret['credits__sum'] or 0 def credit_hours_transfered(self): credits = self.credits_transfered() if credits % 60 == 0: return credits/60 return credits/60.0 def ongoing_transfers(self, user): if self.is_offer: return Transfer.objects.filter(credits_debtor=user, service=self, status__in=["q", "a"]) else: return Transfer.objects.filter(credits_payee=user, service=self, status__in=["q", "a"]) class Meta: ordering = ('-pub_date', ) TRANSFER_STATUS = ( ('q', _('requested')), # q for reQuest ('a', _('accepted')), # a for Accepted ('r', _('cancelled')), # r for Rejected TODO: (but it actually should be c for cancelled) ('d', _('done')), # d for Done ) class Transfer(models.Model): rating = RatingField(range=5, allow_anonymous=False, can_change_vote=True) def int_rating(self): return int(self.rating.score / self.rating.votes) # will only be set and used when transfer is not associated with a service direct_transfer_creator = models.ForeignKey(Profile, related_name='direct_transfers_created', null=True, blank=True, verbose_name=_("Direct transfer creator")) # Person receiving the credits (and giving the service) credits_payee = models.ForeignKey(Profile, related_name='transfers_received', verbose_name=_("Credits payee")) # Person giving the credits (and receiving the service) credits_debtor = models.ForeignKey(Profile, related_name='transfers_given', verbose_name=_("Credits debtor")) service = models.ForeignKey(Service, related_name='transfers', null=True, blank=True, verbose_name=_("Service")) # Small description for the received service description = models.TextField(_(u"Description"), max_length=300) request_date = models.DateTimeField(_("Transfer request date"), auto_now=True, auto_now_add=True) confirmation_date = models.DateTimeField(_(u"Transfer confirmation date"), null=True) status = models.CharField(_(u"Status"), max_length=1, choices=TRANSFER_STATUS) is_public = models.BooleanField(_(u"Is public"), default=False) # credits in minutes credits = models.PositiveIntegerField(_(u"Credits")) def credit_hours(self): return self.credits/60.0 class meta: ordering = ['-request_date'] def creator(self): ''' Transfer creator ''' if self.service: return self.service.creator == self.credits_debtor and\ self.credits_payee or self.credits_debtor else: return self.direct_transfer_creator def recipient(self): ''' the user which is not the creator ''' if self.service: return self.service.creator != self.credits_debtor and\ self.credits_payee or self.credits_debtor else: return self.direct_transfer_creator == self.credits_debtor and\ self.credits_payee or self.credits_debtor def is_direct(self): return not self.service def status_readable(self): return TRANSFER_STATUS[self.status] def __unicode__(self): return self.description[0:53] + '...' signals.post_save.connect(new_transfer_email, sender=Transfer)
[ "# Copyright (C) 2009 Tim Gaggstatter <Tim.Gaggstatter AT gmx DOT net>\n", "# Copyright (C) 2010 Eduardo Robles Elvira <edulix AT gmail DOT com>\n", "#\n", "# This program is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU Affero General Public License as published by\n", "# the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# This program is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU Affero General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU Affero General Public License\n", "# along with this program. If not, see <http://www.gnu.org/licenses/>.\n", "\n", "\n", "from django.db import models\n", "from django.contrib.auth.models import User\n", "from django.utils.translation import ugettext_lazy as _\n", "from djangoratings.fields import RatingField\n", "from django.db.models import signals\n", "\n", "from user.models import Profile\n", "from tbmessages.utils import new_transfer_email\n", "\n", "class Area(models.Model):\n", "\n", " name = models.CharField(_(\"Area\"), max_length=40)\n", "\n", " def __unicode__(self):\n", " return self.name\n", "\n", " class Meta:\n", " ordering = [\"name\"]\n", "\n", "\n", "class Category(models.Model):\n", "\n", " name = models.CharField(_(u\"Category\"), max_length=45)\n", "\n", " def __unicode__(self):\n", " return self.name\n", "\n", " class Meta:\n", " ordering = [\"name\"]\n", " verbose_name = _(u\"Category\")\n", " verbose_name_plural = _(u\"Categories\")\n", "\n", "\n", "OFFER_CHOICES = (\n", " (True, _('offer')),\n", " (False, _('demand'))\n", ")\n", "\n", "class Service(models.Model):\n", " creator = models.ForeignKey(Profile, related_name=\"services\",\n", " verbose_name=_(\"Creator\"))\n", " is_offer = models.BooleanField(_(\"Service type\"), choices=OFFER_CHOICES, default=True)\n", " pub_date = models.DateTimeField(_(u\"Publish date\"),\n", " auto_now=True, auto_now_add=True)\n", " is_active = models.BooleanField(default=True)\n", " description = models.TextField(_(u\"Description\"), max_length=400)\n", " category = models.ForeignKey(Category, verbose_name=_('Category'))\n", " area = models.ForeignKey(Area, null=True, blank=True,\n", " verbose_name=_(\"Area\"))\n", "\n", " def __unicode__(self):\n", " if self.is_offer:\n", " msj = _(\"offered\")\n", " else:\n", " msj = _(\"demanded\")\n", " msj = unicode(msj)\n", " return \"%d: '%s' %s from %s\" % (self.id, self.short_name(), msj, self.creator)\n", "\n", " def short_name(self):\n", " if len(self.description) < 53:\n", " return self.description\n", "\n", " return \"%s...\" % self.description[:50]\n", "\n", " def transfers_count(self):\n", " return self.transfers.count()\n", "\n", " def sorted_transfers(self):\n", " return self.transfers.order_by('-request_date')\n", "\n", " def messages_count(self):\n", " from tbmessages.models import Message\n", " return Message.objects.filter(service=self).count()\n", "\n", " def messages(self):\n", " from tbmessages.models import Message\n", " return Message.objects.filter(service=self)\n", "\n", " def credits_transfered(self):\n", " ret = self.transfers.filter(status='d').aggregate(models.Sum('credits'))\n", " return ret['credits__sum'] and ret['credits__sum'] or 0\n", "\n", " def credit_hours_transfered(self):\n", " credits = self.credits_transfered()\n", " if credits % 60 == 0:\n", " return credits/60\n", "\n", " return credits/60.0\n", "\n", " def ongoing_transfers(self, user):\n", " if self.is_offer:\n", " return Transfer.objects.filter(credits_debtor=user, service=self,\n", " status__in=[\"q\", \"a\"])\n", " else:\n", " return Transfer.objects.filter(credits_payee=user, service=self,\n", " status__in=[\"q\", \"a\"])\n", "\n", " class Meta:\n", " ordering = ('-pub_date', )\n", "\n", "\n", "TRANSFER_STATUS = (\n", " ('q', _('requested')), # q for reQuest\n", " ('a', _('accepted')), # a for Accepted\n", " ('r', _('cancelled')), # r for Rejected TODO: (but it actually should be c for cancelled)\n", " ('d', _('done')), # d for Done\n", ")\n", "\n", "class Transfer(models.Model):\n", " rating = RatingField(range=5, allow_anonymous=False, can_change_vote=True)\n", "\n", " def int_rating(self):\n", " return int(self.rating.score / self.rating.votes)\n", "\n", " # will only be set and used when transfer is not associated with a service\n", " direct_transfer_creator = models.ForeignKey(Profile,\n", " related_name='direct_transfers_created', null=True, blank=True,\n", " verbose_name=_(\"Direct transfer creator\"))\n", "\n", " # Person receiving the credits (and giving the service)\n", " credits_payee = models.ForeignKey(Profile, related_name='transfers_received',\n", " verbose_name=_(\"Credits payee\"))\n", "\n", " # Person giving the credits (and receiving the service)\n", " credits_debtor = models.ForeignKey(Profile, related_name='transfers_given',\n", " verbose_name=_(\"Credits debtor\"))\n", "\n", " service = models.ForeignKey(Service, related_name='transfers', null=True,\n", " blank=True, verbose_name=_(\"Service\"))\n", "\n", " # Small description for the received service\n", " description = models.TextField(_(u\"Description\"), max_length=300)\n", "\n", " request_date = models.DateTimeField(_(\"Transfer request date\"),\n", " auto_now=True, auto_now_add=True)\n", "\n", " confirmation_date = models.DateTimeField(_(u\"Transfer confirmation date\"),\n", " null=True)\n", "\n", " status = models.CharField(_(u\"Status\"), max_length=1, choices=TRANSFER_STATUS)\n", "\n", " is_public = models.BooleanField(_(u\"Is public\"), default=False)\n", "\n", " # credits in minutes\n", " credits = models.PositiveIntegerField(_(u\"Credits\"))\n", "\n", " def credit_hours(self):\n", " return self.credits/60.0\n", "\n", " class meta:\n", " ordering = ['-request_date']\n", "\n", " def creator(self):\n", " '''\n", " Transfer creator\n", " '''\n", " if self.service:\n", " return self.service.creator == self.credits_debtor and\\\n", " self.credits_payee or self.credits_debtor\n", " else:\n", " return self.direct_transfer_creator\n", "\n", " def recipient(self):\n", " '''\n", " the user which is not the creator\n", " '''\n", " if self.service:\n", " return self.service.creator != self.credits_debtor and\\\n", " self.credits_payee or self.credits_debtor\n", " else:\n", " return self.direct_transfer_creator == self.credits_debtor and\\\n", " self.credits_payee or self.credits_debtor\n", "\n", " def is_direct(self):\n", " return not self.service\n", "\n", " def status_readable(self):\n", " return TRANSFER_STATUS[self.status]\n", "\n", " def __unicode__(self):\n", " return self.description[0:53] + '...'\n", "\n", "signals.post_save.connect(new_transfer_email, sender=Transfer)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0.02857142857142857, 0.01098901098901099, 0, 0.023809523809523808, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0.023255813953488372, 0.02127659574468085, 0.02857142857142857, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0.013888888888888888, 0.0196078431372549, 0, 0, 0.012195121951219513, 0.024390243902439025, 0, 0, 0, 0.023809523809523808, 0, 0, 0.02127659574468085, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0.05263157894736842, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872 ]
200
0.00296
false
RowNum = int(input("Row: ")) ColumnNum = int(input("Column: ")) MatrixRow = [] MatrixColumn = [] for i in range(0, RowNum): for j in range(0, ColumnNum): MatrixColumn.append(j) MatrixRow.append(MatrixColumn) MatrixColumn = [] LineIndex = 0 j = 1 Max = ColumnNum * RowNum while 1: for i in range(LineIndex, ColumnNum-1-LineIndex): MatrixRow[LineIndex][i] = j j = j + 1 if j > Max: break for i in range(LineIndex, RowNum-1-LineIndex): MatrixRow[i][ColumnNum-1-LineIndex] = j j = j + 1 if j > Max: break for i in range(LineIndex+1, ColumnNum-LineIndex): MatrixRow[RowNum-1-LineIndex][ColumnNum-i] = j j = j + 1 if j > Max: break for i in range(LineIndex+1, RowNum-LineIndex): MatrixRow[RowNum-i][LineIndex] = j j = j + 1 if j > Max: break LineIndex = LineIndex + 1 if j > Max: break for i in range(0, RowNum): print(MatrixRow[i])
[ "RowNum = int(input(\"Row: \"))\n", "ColumnNum = int(input(\"Column: \"))\n", "MatrixRow = []\n", "MatrixColumn = []\n", "for i in range(0, RowNum):\n", " for j in range(0, ColumnNum):\n", " MatrixColumn.append(j)\n", " MatrixRow.append(MatrixColumn)\n", " MatrixColumn = []\n", "LineIndex = 0\n", "j = 1\n", "Max = ColumnNum * RowNum\n", "while 1:\n", " for i in range(LineIndex, ColumnNum-1-LineIndex):\n", " MatrixRow[LineIndex][i] = j\n", " j = j + 1\n", " if j > Max:\n", " break\n", " for i in range(LineIndex, RowNum-1-LineIndex):\n", " MatrixRow[i][ColumnNum-1-LineIndex] = j\n", " j = j + 1\n", " if j > Max:\n", " break\n", " for i in range(LineIndex+1, ColumnNum-LineIndex):\n", " MatrixRow[RowNum-1-LineIndex][ColumnNum-i] = j\n", " j = j + 1\n", " if j > Max:\n", " break\n", " for i in range(LineIndex+1, RowNum-LineIndex):\n", " MatrixRow[RowNum-i][LineIndex] = j\n", " j = j + 1\n", " if j > Max:\n", " break\n", " LineIndex = LineIndex + 1\n", " if j > Max:\n", " break\n", "for i in range(0, RowNum):\n", " print(MatrixRow[i])\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
38
0.001012
false
# -*- coding: utf-8 -*- # ProjectEuler/src/python/problem428.py # # Necklace of circles # =================== # Published on Sunday, 19th May 2013, 01:00 am # # Let a, b and c be positive numbers. Let W, X, Y, Z be four collinear points # where |WX| = a, |XY| = b, |YZ| = c and |WZ| = a + b + c. Let Cin be the # circle having the diameter XY. Let Cout be the circle having the diameter # WZ. The triplet (a, b, c) is called a necklace triplet if you can place k # 3 distinct circles C1, C2, ..., Ck such that: Ci has no common interior # points with any Cj for 1 i, j k and i j, Ci is tangent to both Cin and # Cout for 1 i k, Ci is tangent to Ci+1 for 1 i < k, and Ck is tangent to # C1. For example, (5, 5, 5) and (4, 3, 21) are necklace triplets, while it # can be shown that (2, 2, 5) is not. Let T(n) be the number of necklace # triplets (a, b, c) such that a, b and c are positive integers, and b n. For # example, T(1) = 9, T(20) = 732 and T(3000) = 438106. Find T(1 000 000 # 000). import projecteuler as pe def main(): pass if __name__ == "__main__": main()
[ "# -*- coding: utf-8 -*-\n", "# ProjectEuler/src/python/problem428.py\n", "#\n", "# Necklace of circles\n", "# ===================\n", "# Published on Sunday, 19th May 2013, 01:00 am\n", "#\n", "# Let a, b and c be positive numbers. Let W, X, Y, Z be four collinear points\n", "# where |WX| = a, |XY| = b, |YZ| = c and |WZ| = a + b + c. Let Cin be the\n", "# circle having the diameter XY. Let Cout be the circle having the diameter\n", "# WZ. The triplet (a, b, c) is called a necklace triplet if you can place k\n", "# 3 distinct circles C1, C2, ..., Ck such that: Ci has no common interior\n", "# points with any Cj for 1 i, j k and i j, Ci is tangent to both Cin and\n", "# Cout for 1 i k, Ci is tangent to Ci+1 for 1 i < k, and Ck is tangent to\n", "# C1. For example, (5, 5, 5) and (4, 3, 21) are necklace triplets, while it\n", "# can be shown that (2, 2, 5) is not. Let T(n) be the number of necklace\n", "# triplets (a, b, c) such that a, b and c are positive integers, and b n. For\n", "# example, T(1) = 9, T(20) = 732 and T(3000) = 438106. Find T(1 000 000\n", "# 000).\n", "\n", "import projecteuler as pe\n", "\n", "def main():\n", " pass\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0.037037037037037035, 0 ]
27
0.004458
false
# This file is part of Plex:CS. # # Plex:CS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Plex:CS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Plex:CS. If not, see <http://www.gnu.org/licenses/>. from operator import itemgetter from xml.dom import minidom import unicodedata import plexcs import datetime import fnmatch import shutil import time import sys import re import os import json import xmltodict import math def multikeysort(items, columns): comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns] def comparer(left, right): for fn, mult in comparers: result = cmp(fn(left), fn(right)) if result: return mult * result else: return 0 return sorted(items, cmp=comparer) def checked(variable): if variable: return 'Checked' else: return '' def radio(variable, pos): if variable == pos: return 'Checked' else: return '' def latinToAscii(unicrap): """ From couch potato """ xlate = { 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A', 0xc6: 'Ae', 0xc7: 'C', 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e', 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I', 0xd0: 'Th', 0xd1: 'N', 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O', 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U', 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss', 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a', 0xe6: 'ae', 0xe7: 'c', 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e', 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i', 0xf0: 'th', 0xf1: 'n', 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o', 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u', 0xfd: 'y', 0xfe: 'th', 0xff: 'y', 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}', 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}', 0xa9: '{C}', 0xaa: '{^a}', 0xab: '&lt;&lt;', 0xac: '{not}', 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}', 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'", 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}', 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '&gt;&gt;', 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?', 0xd7: '*', 0xf7: '/' } r = '' if unicrap: for i in unicrap: if ord(i) in xlate: r += xlate[ord(i)] elif ord(i) >= 0x80: pass else: r += str(i) return r def convert_milliseconds(ms): seconds = ms / 1000 gmtime = time.gmtime(seconds) if seconds > 3600: minutes = time.strftime("%H:%M:%S", gmtime) else: minutes = time.strftime("%M:%S", gmtime) return minutes def convert_milliseconds_to_minutes(ms): if str(ms).isdigit(): seconds = float(ms) / 1000 minutes = round(seconds / 60, 0) return math.trunc(minutes) return 0 def convert_seconds(s): gmtime = time.gmtime(s) if s > 3600: minutes = time.strftime("%H:%M:%S", gmtime) else: minutes = time.strftime("%M:%S", gmtime) return minutes def today(): today = datetime.date.today() yyyymmdd = datetime.date.isoformat(today) return yyyymmdd def now(): now = datetime.datetime.now() return now.strftime("%Y-%m-%d %H:%M:%S") def human_duration(s): hd = '' if str(s).isdigit(): d = int(s / 84600) h = int((s % 84600) / 3600) m = int(((s % 84600) % 3600) / 60) s = int(((s % 84600) % 3600) % 60) hd_list = [] if d > 0: hd_list.append(str(d) + ' days') if h > 0: hd_list.append(str(h) + ' hrs') if m > 0: hd_list.append(str(m) + ' mins') if s > 0: hd_list.append(str(s) + ' secs') hd = ' '.join(hd_list) return hd else: return hd def get_age(date): try: split_date = date.split('-') except: return False try: days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2]) except IndexError: days_old = False return days_old def bytes_to_mb(bytes): mb = int(bytes) / 1048576 size = '%.1f MB' % mb return size def mb_to_bytes(mb_str): result = re.search('^(\d+(?:\.\d+)?)\s?(?:mb)?', mb_str, flags=re.I) if result: return int(float(result.group(1)) * 1048576) def piratesize(size): split = size.split(" ") factor = float(split[0]) unit = split[1].upper() if unit == 'MiB': size = factor * 1048576 elif unit == 'MB': size = factor * 1000000 elif unit == 'GiB': size = factor * 1073741824 elif unit == 'GB': size = factor * 1000000000 elif unit == 'KiB': size = factor * 1024 elif unit == 'KB': size = factor * 1000 elif unit == "B": size = factor else: size = 0 return size def replace_all(text, dic, normalize=False): if not text: return '' for i, j in dic.iteritems(): if normalize: try: if sys.platform == 'darwin': j = unicodedata.normalize('NFD', j) else: j = unicodedata.normalize('NFC', j) except TypeError: j = unicodedata.normalize('NFC', j.decode(plexcs.SYS_ENCODING, 'replace')) text = text.replace(i, j) return text def replace_illegal_chars(string, type="file"): if type == "file": string = re.sub('[\?"*:|<>/]', '_', string) if type == "folder": string = re.sub('[:\?<>"|]', '_', string) return string def cleanName(string): pass1 = latinToAscii(string).lower() out_string = re.sub('[\.\-\/\!\@\#\$\%\^\&\*\(\)\+\-\"\'\,\;\:\[\]\{\}\<\>\=\_]', '', pass1).encode('utf-8') return out_string def cleanTitle(title): title = re.sub('[\.\-\/\_]', ' ', title).lower() # Strip out extra whitespace title = ' '.join(title.split()) title = title.title() return title def split_path(f): """ Split a path into components, starting with the drive letter (if any). Given a path, os.path.join(*split_path(f)) should be path equal to f. """ components = [] drive, path = os.path.splitdrive(f) # Strip the folder from the path, iterate until nothing is left while True: path, folder = os.path.split(path) if folder: components.append(folder) else: if path: components.append(path) break # Append the drive (if any) if drive: components.append(drive) # Reverse components components.reverse() # Done return components def extract_logline(s): # Default log format pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE) match = pattern.match(s) if match: timestamp = match.group("timestamp") level = match.group("level") thread = match.group("thread") message = match.group("message") return (timestamp, level, thread, message) else: return None def split_string(mystring, splitvar=','): mylist = [] for each_word in mystring.split(splitvar): mylist.append(each_word.strip()) return mylist def create_https_certificates(ssl_cert, ssl_key): """ Create a pair of self-signed HTTPS certificares and store in them in 'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed. This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard). """ from plexcs import logger from OpenSSL import crypto from certgen import createKeyPair, createCertRequest, createCertificate, \ TYPE_RSA, serial # Create the CA Certificate cakey = createKeyPair(TYPE_RSA, 2048) careq = createCertRequest(cakey, CN="Certificate Authority") cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years pkey = createKeyPair(TYPE_RSA, 2048) req = createCertRequest(pkey, CN="Plex:CS") cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years # Save the key and certificate to disk try: with open(ssl_key, "w") as fp: fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) with open(ssl_cert, "w") as fp: fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) except IOError as e: logger.error("Error creating SSL key and certificate: %s", e) return False return True def cast_to_float(s): try: return float(s) except ValueError: return -1 def convert_xml_to_json(xml): o = xmltodict.parse(xml) return json.dumps(o) def convert_xml_to_dict(xml): o = xmltodict.parse(xml) return o def get_percent(value1, value2): if str(value1).isdigit() and str(value2).isdigit(): value1 = cast_to_float(value1) value2 = cast_to_float(value2) else: return 0 if value1 != 0 and value2 != 0: percent = (value1 / value2) * 100 else: percent = 0 return math.trunc(percent) def parse_xml(unparsed=None): from plexcs import logger if unparsed: try: xml_parse = minidom.parseString(unparsed) return xml_parse except Exception as e: logger.warn("Error parsing XML. %s" % e) return [] except: logger.warn("Error parsing XML.") return [] else: logger.warn("XML parse request made but no data received.") return [] """ Validate xml keys to make sure they exist and return their attribute value, return blank value is none found """ def get_xml_attr(xml_key, attribute, return_bool=False, default_return=''): if xml_key.getAttribute(attribute): if return_bool: return True else: return xml_key.getAttribute(attribute) else: if return_bool: return False else: return default_return def process_json_kwargs(json_kwargs): params = {} if json_kwargs: params = json.loads(json_kwargs) return params def sanitize(string): if string: return unicode(string).replace('<','&lt;').replace('>','&gt;') else: return ''
[ "# This file is part of Plex:CS.\n", "#\n", "# Plex:CS is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU General Public License as published by\n", "# the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# Plex:CS is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with Plex:CS. If not, see <http://www.gnu.org/licenses/>.\n", "\n", "from operator import itemgetter\n", "from xml.dom import minidom\n", "\n", "import unicodedata\n", "import plexcs\n", "import datetime\n", "import fnmatch\n", "import shutil\n", "import time\n", "import sys\n", "import re\n", "import os\n", "import json\n", "import xmltodict\n", "import math\n", "\n", "\n", "def multikeysort(items, columns):\n", " comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]\n", "\n", " def comparer(left, right):\n", " for fn, mult in comparers:\n", " result = cmp(fn(left), fn(right))\n", " if result:\n", " return mult * result\n", " else:\n", " return 0\n", "\n", " return sorted(items, cmp=comparer)\n", "\n", "\n", "def checked(variable):\n", " if variable:\n", " return 'Checked'\n", " else:\n", " return ''\n", "\n", "\n", "def radio(variable, pos):\n", "\n", " if variable == pos:\n", " return 'Checked'\n", " else:\n", " return ''\n", "\n", "\n", "def latinToAscii(unicrap):\n", " \"\"\"\n", " From couch potato\n", " \"\"\"\n", " xlate = {\n", " 0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n", " 0xc6: 'Ae', 0xc7: 'C',\n", " 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',\n", " 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n", " 0xd0: 'Th', 0xd1: 'N',\n", " 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n", " 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n", " 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n", " 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n", " 0xe6: 'ae', 0xe7: 'c',\n", " 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',\n", " 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n", " 0xf0: 'th', 0xf1: 'n',\n", " 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n", " 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n", " 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n", " 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n", " 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n", " 0xa9: '{C}', 0xaa: '{^a}', 0xab: '&lt;&lt;', 0xac: '{not}',\n", " 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n", " 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n", " 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n", " 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '&gt;&gt;',\n", " 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n", " 0xd7: '*', 0xf7: '/'\n", " }\n", "\n", " r = ''\n", " if unicrap:\n", " for i in unicrap:\n", " if ord(i) in xlate:\n", " r += xlate[ord(i)]\n", " elif ord(i) >= 0x80:\n", " pass\n", " else:\n", " r += str(i)\n", "\n", " return r\n", "\n", "\n", "def convert_milliseconds(ms):\n", "\n", " seconds = ms / 1000\n", " gmtime = time.gmtime(seconds)\n", " if seconds > 3600:\n", " minutes = time.strftime(\"%H:%M:%S\", gmtime)\n", " else:\n", " minutes = time.strftime(\"%M:%S\", gmtime)\n", "\n", " return minutes\n", "\n", "def convert_milliseconds_to_minutes(ms):\n", "\n", " if str(ms).isdigit():\n", " seconds = float(ms) / 1000\n", " minutes = round(seconds / 60, 0)\n", "\n", " return math.trunc(minutes)\n", "\n", " return 0\n", "\n", "def convert_seconds(s):\n", "\n", " gmtime = time.gmtime(s)\n", " if s > 3600:\n", " minutes = time.strftime(\"%H:%M:%S\", gmtime)\n", " else:\n", " minutes = time.strftime(\"%M:%S\", gmtime)\n", "\n", " return minutes\n", "\n", "\n", "def today():\n", " today = datetime.date.today()\n", " yyyymmdd = datetime.date.isoformat(today)\n", " return yyyymmdd\n", "\n", "\n", "def now():\n", " now = datetime.datetime.now()\n", " return now.strftime(\"%Y-%m-%d %H:%M:%S\")\n", "\n", "def human_duration(s):\n", "\n", " hd = ''\n", "\n", " if str(s).isdigit():\n", " d = int(s / 84600)\n", " h = int((s % 84600) / 3600)\n", " m = int(((s % 84600) % 3600) / 60)\n", " s = int(((s % 84600) % 3600) % 60)\n", "\n", " hd_list = []\n", " if d > 0:\n", " hd_list.append(str(d) + ' days')\n", " if h > 0:\n", " hd_list.append(str(h) + ' hrs')\n", " if m > 0:\n", " hd_list.append(str(m) + ' mins')\n", " if s > 0:\n", " hd_list.append(str(s) + ' secs')\n", "\n", " hd = ' '.join(hd_list)\n", "\n", " return hd\n", " else:\n", " return hd\n", "\n", "def get_age(date):\n", "\n", " try:\n", " split_date = date.split('-')\n", " except:\n", " return False\n", "\n", " try:\n", " days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])\n", " except IndexError:\n", " days_old = False\n", "\n", " return days_old\n", "\n", "\n", "def bytes_to_mb(bytes):\n", "\n", " mb = int(bytes) / 1048576\n", " size = '%.1f MB' % mb\n", " return size\n", "\n", "\n", "def mb_to_bytes(mb_str):\n", " result = re.search('^(\\d+(?:\\.\\d+)?)\\s?(?:mb)?', mb_str, flags=re.I)\n", " if result:\n", " return int(float(result.group(1)) * 1048576)\n", "\n", "\n", "def piratesize(size):\n", " split = size.split(\" \")\n", " factor = float(split[0])\n", " unit = split[1].upper()\n", "\n", " if unit == 'MiB':\n", " size = factor * 1048576\n", " elif unit == 'MB':\n", " size = factor * 1000000\n", " elif unit == 'GiB':\n", " size = factor * 1073741824\n", " elif unit == 'GB':\n", " size = factor * 1000000000\n", " elif unit == 'KiB':\n", " size = factor * 1024\n", " elif unit == 'KB':\n", " size = factor * 1000\n", " elif unit == \"B\":\n", " size = factor\n", " else:\n", " size = 0\n", "\n", " return size\n", "\n", "\n", "def replace_all(text, dic, normalize=False):\n", "\n", " if not text:\n", " return ''\n", "\n", " for i, j in dic.iteritems():\n", " if normalize:\n", " try:\n", " if sys.platform == 'darwin':\n", " j = unicodedata.normalize('NFD', j)\n", " else:\n", " j = unicodedata.normalize('NFC', j)\n", " except TypeError:\n", " j = unicodedata.normalize('NFC', j.decode(plexcs.SYS_ENCODING, 'replace'))\n", " text = text.replace(i, j)\n", " return text\n", "\n", "\n", "def replace_illegal_chars(string, type=\"file\"):\n", " if type == \"file\":\n", " string = re.sub('[\\?\"*:|<>/]', '_', string)\n", " if type == \"folder\":\n", " string = re.sub('[:\\?<>\"|]', '_', string)\n", "\n", " return string\n", "\n", "\n", "def cleanName(string):\n", "\n", " pass1 = latinToAscii(string).lower()\n", " out_string = re.sub('[\\.\\-\\/\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\+\\-\\\"\\'\\,\\;\\:\\[\\]\\{\\}\\<\\>\\=\\_]', '', pass1).encode('utf-8')\n", "\n", " return out_string\n", "\n", "\n", "def cleanTitle(title):\n", "\n", " title = re.sub('[\\.\\-\\/\\_]', ' ', title).lower()\n", "\n", " # Strip out extra whitespace\n", " title = ' '.join(title.split())\n", "\n", " title = title.title()\n", "\n", " return title\n", "\n", "\n", "def split_path(f):\n", " \"\"\"\n", " Split a path into components, starting with the drive letter (if any). Given\n", " a path, os.path.join(*split_path(f)) should be path equal to f.\n", " \"\"\"\n", "\n", " components = []\n", " drive, path = os.path.splitdrive(f)\n", "\n", " # Strip the folder from the path, iterate until nothing is left\n", " while True:\n", " path, folder = os.path.split(path)\n", "\n", " if folder:\n", " components.append(folder)\n", " else:\n", " if path:\n", " components.append(path)\n", "\n", " break\n", "\n", " # Append the drive (if any)\n", " if drive:\n", " components.append(drive)\n", "\n", " # Reverse components\n", " components.reverse()\n", "\n", " # Done\n", " return components\n", "\n", "\n", "def extract_logline(s):\n", " # Default log format\n", " pattern = re.compile(r'(?P<timestamp>.*?)\\s\\-\\s(?P<level>.*?)\\s*\\:\\:\\s(?P<thread>.*?)\\s\\:\\s(?P<message>.*)', re.VERBOSE)\n", " match = pattern.match(s)\n", " if match:\n", " timestamp = match.group(\"timestamp\")\n", " level = match.group(\"level\")\n", " thread = match.group(\"thread\")\n", " message = match.group(\"message\")\n", " return (timestamp, level, thread, message)\n", " else:\n", " return None\n", "\n", "\n", "def split_string(mystring, splitvar=','):\n", " mylist = []\n", " for each_word in mystring.split(splitvar):\n", " mylist.append(each_word.strip())\n", " return mylist\n", "\n", "def create_https_certificates(ssl_cert, ssl_key):\n", " \"\"\"\n", " Create a pair of self-signed HTTPS certificares and store in them in\n", " 'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.\n", "\n", " This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).\n", " \"\"\"\n", "\n", " from plexcs import logger\n", "\n", " from OpenSSL import crypto\n", " from certgen import createKeyPair, createCertRequest, createCertificate, \\\n", " TYPE_RSA, serial\n", "\n", " # Create the CA Certificate\n", " cakey = createKeyPair(TYPE_RSA, 2048)\n", " careq = createCertRequest(cakey, CN=\"Certificate Authority\")\n", " cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years\n", "\n", " pkey = createKeyPair(TYPE_RSA, 2048)\n", " req = createCertRequest(pkey, CN=\"Plex:CS\")\n", " cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years\n", "\n", " # Save the key and certificate to disk\n", " try:\n", " with open(ssl_key, \"w\") as fp:\n", " fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))\n", " with open(ssl_cert, \"w\") as fp:\n", " fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n", " except IOError as e:\n", " logger.error(\"Error creating SSL key and certificate: %s\", e)\n", " return False\n", "\n", " return True\n", "\n", "\n", "def cast_to_float(s):\n", " try:\n", " return float(s)\n", " except ValueError:\n", " return -1\n", "\n", "\n", "def convert_xml_to_json(xml):\n", " o = xmltodict.parse(xml)\n", " return json.dumps(o)\n", "\n", "\n", "def convert_xml_to_dict(xml):\n", " o = xmltodict.parse(xml)\n", " return o\n", "\n", "\n", "def get_percent(value1, value2):\n", "\n", " if str(value1).isdigit() and str(value2).isdigit():\n", " value1 = cast_to_float(value1)\n", " value2 = cast_to_float(value2)\n", " else:\n", " return 0\n", "\n", " if value1 != 0 and value2 != 0:\n", " percent = (value1 / value2) * 100\n", " else:\n", " percent = 0\n", "\n", " return math.trunc(percent)\n", "\n", "def parse_xml(unparsed=None):\n", " from plexcs import logger\n", "\n", " if unparsed:\n", " try:\n", " xml_parse = minidom.parseString(unparsed)\n", " return xml_parse\n", " except Exception as e:\n", " logger.warn(\"Error parsing XML. %s\" % e)\n", " return []\n", " except:\n", " logger.warn(\"Error parsing XML.\")\n", " return []\n", " else:\n", " logger.warn(\"XML parse request made but no data received.\")\n", " return []\n", "\n", "\"\"\"\n", "Validate xml keys to make sure they exist and return their attribute value, return blank value is none found\n", "\"\"\"\n", "def get_xml_attr(xml_key, attribute, return_bool=False, default_return=''):\n", " if xml_key.getAttribute(attribute):\n", " if return_bool:\n", " return True\n", " else:\n", " return xml_key.getAttribute(attribute)\n", " else:\n", " if return_bool:\n", " return False\n", " else:\n", " return default_return\n", "\n", "def process_json_kwargs(json_kwargs):\n", " params = {}\n", " if json_kwargs:\n", " params = json.loads(json_kwargs)\n", "\n", " return params\n", "\n", "def sanitize(string):\n", " if string:\n", " return unicode(string).replace('<','&lt;').replace('>','&gt;')\n", " else:\n", " return ''\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.007692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0547945205479452, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0.23893805309734514, 0, 0, 0, 0, 0, 0, 0.07547169811320754, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0, 0, 0, 0.019801980198019802, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0.25, 0.009174311926605505, 0, 0.013157894736842105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0.028169014084507043, 0, 0 ]
438
0.002839
false
""" Represents the Depend settings """ # Always try to import cElementTree since it's faster if it exists try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET import platform from os.path import join, abspath, exists from pylib.logwrapper import LogWrapper from pylib.depend.depsource import DepSource # XML Settings for Download of Depends class DependSettings(object): def __init__(self): """Dependency Settings""" super().__init__() self.log = LogWrapper.getlogger() # Path to the config file self.ConfigPath = None self.platform = None # XML Root Tag self.xmlroot = None # custom properties self.DepsDirectory = None self.ArchiveDirectory = None self.SoxVersion = None self.CMakeGenerator = None # List of Sources self.sources = [] def read_element(self, tag): """Read XML Value Element""" nextval = next(self.xmlroot.iter(tag), None) if nextval == None : raise ValueError('Element not found: ' + tag) return nextval.text def loadxml(self): """Load XML""" # Load in the xml tree = ET.ElementTree(file=self.ConfigPath) self.xmlroot = tree.getroot() if self.xmlroot.tag != 'Settings': raise ValueError('Root Element is not Settings') # Custom Settings self.DepsDirectory = self.read_element('DepsDirectory') self.DepsDirectory = abspath(self.DepsDirectory) self.ArchiveDirectory = self.read_element('ArchiveDirectory') self.ArchiveDirectory = join(self.DepsDirectory, self.ArchiveDirectory) self.SoxVersion = self.read_element('SoxVersion') self.CMakeGenerator = self.read_element('CMakeGenerator') # Set the Archive directory for downloaded sources DepSource.ArchiveDir = self.ArchiveDirectory # Set the root Extract directory for extracting sources DepSource.RootExtractDir = self.DepsDirectory # Load in the list of download sources self.sources = DepSource.parsexml(self.xmlroot) return def getdeps(self): """Download and Extract Sources""" for source in self.sources: self.log.info("") self.log.info("#####################################################") # Skip anything already extracted extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir)) if exists(extractdir): self.log.warn("Deps Subdir: " + source.destsubdir + " already exists, skipping") continue source.Extracted = False downloaded = source.download() if downloaded == False: self.log.error("Download Failed") else: source.Extracted = source.extract() # Remove the archive file source.remove_archivefile() # Re-jig the directories for those that need it for source in self.sources: if source.Extracted == True: source.movetoparent_multiple() return def get_configpath(self): log = LogWrapper.getlogger() """Determine which config filename / path to use""" self.platform = platform.system() settingsfile = "" if self.platform == "Windows": settingsfile = "Settings_win32.xml" elif self.platform == "Linux": settingsfile = "Settings_linux.xml" else: log.critical("Unsupported platform") self.ConfigPath = None self.log.info("Platform identified as: " + self.platform) self.log.info("Settings file: " + settingsfile) self.ConfigPath = abspath(settingsfile) return self.ConfigPath
[ "\"\"\"\n", "Represents the Depend settings\n", "\"\"\"\n", "\n", "# Always try to import cElementTree since it's faster if it exists\n", "try:\n", " import xml.etree.cElementTree as ET\n", "except ImportError:\n", " import xml.etree.ElementTree as ET\n", "\n", "import platform\n", "from os.path import join, abspath, exists\n", "from pylib.logwrapper import LogWrapper\n", "from pylib.depend.depsource import DepSource\n", "\n", "# XML Settings for Download of Depends\n", "class DependSettings(object):\n", "\n", " def __init__(self):\n", " \"\"\"Dependency Settings\"\"\"\n", " super().__init__()\n", " self.log = LogWrapper.getlogger()\n", "\n", " # Path to the config file\n", " self.ConfigPath = None\n", " self.platform = None\n", "\n", " # XML Root Tag\n", " self.xmlroot = None\n", "\n", " # custom properties\n", " self.DepsDirectory = None\n", " self.ArchiveDirectory = None\n", " self.SoxVersion = None\n", " self.CMakeGenerator = None\n", "\n", " # List of Sources\n", " self.sources = []\n", "\n", " def read_element(self, tag):\n", " \"\"\"Read XML Value Element\"\"\"\n", " nextval = next(self.xmlroot.iter(tag), None)\n", " if nextval == None : raise ValueError('Element not found: ' + tag)\n", " return nextval.text\n", "\n", " def loadxml(self):\n", " \"\"\"Load XML\"\"\"\n", " # Load in the xml\n", " tree = ET.ElementTree(file=self.ConfigPath)\n", " self.xmlroot = tree.getroot()\n", " if self.xmlroot.tag != 'Settings':\n", " raise ValueError('Root Element is not Settings')\n", "\n", " # Custom Settings\n", " self.DepsDirectory = self.read_element('DepsDirectory')\n", " self.DepsDirectory = abspath(self.DepsDirectory)\n", " self.ArchiveDirectory = self.read_element('ArchiveDirectory')\n", " self.ArchiveDirectory = join(self.DepsDirectory, self.ArchiveDirectory)\n", " self.SoxVersion = self.read_element('SoxVersion')\n", " self.CMakeGenerator = self.read_element('CMakeGenerator')\n", "\n", " # Set the Archive directory for downloaded sources\n", " DepSource.ArchiveDir = self.ArchiveDirectory\n", " # Set the root Extract directory for extracting sources\n", " DepSource.RootExtractDir = self.DepsDirectory\n", "\n", " # Load in the list of download sources\n", " self.sources = DepSource.parsexml(self.xmlroot)\n", " return\n", "\n", " def getdeps(self):\n", " \"\"\"Download and Extract Sources\"\"\"\n", " for source in self.sources:\n", " self.log.info(\"\")\n", " self.log.info(\"#####################################################\")\n", "\n", " # Skip anything already extracted\n", " extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir))\n", " if exists(extractdir):\n", " self.log.warn(\"Deps Subdir: \" + source.destsubdir + \" already exists, skipping\")\n", " continue\n", "\n", " source.Extracted = False\n", " downloaded = source.download()\n", " if downloaded == False:\n", " self.log.error(\"Download Failed\")\n", " else:\n", " source.Extracted = source.extract()\n", "\n", " # Remove the archive file\n", " source.remove_archivefile()\n", "\n", " # Re-jig the directories for those that need it\n", " for source in self.sources:\n", " if source.Extracted == True:\n", " source.movetoparent_multiple()\n", " return\n", "\n", " def get_configpath(self):\n", " log = LogWrapper.getlogger()\n", " \"\"\"Determine which config filename / path to use\"\"\"\n", " self.platform = platform.system()\n", " settingsfile = \"\"\n", " if self.platform == \"Windows\":\n", " settingsfile = \"Settings_win32.xml\"\n", " elif self.platform == \"Linux\":\n", " settingsfile = \"Settings_linux.xml\"\n", " else:\n", " log.critical(\"Unsupported platform\")\n", " self.ConfigPath = None\n", " self.log.info(\"Platform identified as: \" + self.platform)\n", " self.log.info(\"Settings file: \" + settingsfile)\n", " self.ConfigPath = abspath(settingsfile)\n", " return self.ConfigPath\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0.011904761904761904, 0, 0.010309278350515464, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
114
0.001588
false
# coding: UTF-8 # Name: 配置信息 # Author: LYC # Created: 2014-04-08 import re class Configuration(object): """ 配置 """ UserDeclarator = ":" VarPrefix = "$" FuncPrefix = "#" ConstantPrefix = "$$" VarRealPrefix = "_" FuncRealPrefix = "func_" UnknownNumber = "$$" AnswerConstant = "$$ans" AutoNumFunc = "_" UserVarRegex = re.compile("^\s*(\$[a-z]+\d*)\s*$") UserFuncRegex = re.compile("^\s*(#[a-z]+\d*)\s*$") HexRegex = re.compile("0x[0-9a-f]+") OctRegex = re.compile("0o[0-8]+") class OPLEVEL(object): """ 运算符优先级与权值 """ LBK = 00, CMM = 10, ADD = 20, SUB = 20, MUL = 30, DIV = 30, MOD = 40, POW = 40, UOP = 50, class OPRegex(object): """ 运算符正则 """ UOPRegex = re.compile(r"^(\-|\+|[a-z]\w*)$") NUMRegex = re.compile(r"^(\.|\d)+[jl]?$") BOPRegex = re.compile(r"^([^\w\(\)\[\]]+|[a-z]+)$") VARRegex = re.compile(r"^_[a-z_0-9]+$") LBKRegex = re.compile(r"^[\(\[]$") RBKRegex = re.compile(r"^[\)\]]$") NONRegex = re.compile(r"^$")
[ "# coding: UTF-8\n", "# Name: 配置信息\n", "# Author: LYC\n", "# Created: 2014-04-08\n", "\n", "import re\n", "\n", "class Configuration(object):\n", " \"\"\"\n", " 配置\n", " \"\"\"\n", " UserDeclarator = \":\"\n", " VarPrefix = \"$\"\n", " FuncPrefix = \"#\"\n", " ConstantPrefix = \"$$\"\n", "\n", " VarRealPrefix = \"_\"\n", " FuncRealPrefix = \"func_\"\n", "\n", " UnknownNumber = \"$$\"\n", " AnswerConstant = \"$$ans\"\n", "\n", " AutoNumFunc = \"_\"\n", "\n", " UserVarRegex = re.compile(\"^\\s*(\\$[a-z]+\\d*)\\s*$\")\n", " UserFuncRegex = re.compile(\"^\\s*(#[a-z]+\\d*)\\s*$\")\n", " HexRegex = re.compile(\"0x[0-9a-f]+\")\n", " OctRegex = re.compile(\"0o[0-8]+\")\n", "\n", "class OPLEVEL(object):\n", " \"\"\"\n", " 运算符优先级与权值\n", " \"\"\"\n", " LBK = 00,\n", " CMM = 10,\n", " ADD = 20,\n", " SUB = 20,\n", " MUL = 30,\n", " DIV = 30,\n", " MOD = 40,\n", " POW = 40,\n", " UOP = 50,\n", "\n", "\n", "class OPRegex(object):\n", " \"\"\"\n", " 运算符正则\n", " \"\"\"\n", "\n", " UOPRegex = re.compile(r\"^(\\-|\\+|[a-z]\\w*)$\")\n", " NUMRegex = re.compile(r\"^(\\.|\\d)+[jl]?$\")\n", " BOPRegex = re.compile(r\"^([^\\w\\(\\)\\[\\]]+|[a-z]+)$\")\n", " VARRegex = re.compile(r\"^_[a-z_0-9]+$\")\n", " LBKRegex = re.compile(r\"^[\\(\\[]$\")\n", " RBKRegex = re.compile(r\"^[\\)\\]]$\")\n", " NONRegex = re.compile(r\"^$\")" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07272727272727272, 0.05454545454545454, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125 ]
56
0.004223
false
# -*- coding: utf-8 -*- """ Region - descendant class of BaseRegion. """ import datetime import os import time from common_exceptions import FailExit, FindFailed from Location import Location from BaseRegion import BaseRegion, logger, DELAY_BETWEEN_CV_ATTEMPT from Match import Match from Pattern import Pattern from Screen import Screen from Settings import settings class Region(BaseRegion): def offset(self, *args, **kwargs): """ Return area moved relates to self. Option 1 (Sikuli-like): loc_offs := args[0] - Location type; where to move; (w,h) don't change Option 2: x_offs := args[0] - int; where to move by x y_offs := args[1] - int; where to move by y (w,h) don't change """ if len(kwargs) != 0: raise FailExit('Unknown keys in kwargs = %s' % str(kwargs)) offset_title = 'Offset of {}'.format(self.title) if len(args) == 2 and \ (isinstance(args[0], int) or isinstance(args[0], float)) and \ (isinstance(args[1], int) or isinstance(args[1], float)): return Region(self.x + int(args[0]), self.y + int(args[1]), self.w, self.h, find_timeout=self._find_timeout, title=offset_title) elif len(args) == 1 and isinstance(args[0], Location): return Region(self.x + args[0].x, self.y + args[0].y, self.w, self.h, find_timeout=self._find_timeout, title=offset_title) else: raise FailExit('Incorrect offset() method call:\n\targs = {}'.format(args)) def right(self, length=None): """ Return right area relates to self, do not including self. Height of new area is equal to height of self. Width of new area is equal to 'length' or till the end of the screen """ right_title = 'Region right of {}'.format(self.title) try: if length is None: scr = Region(*Screen(self.screen_number).area) reg = Region(self.x + self.w, self.y, (scr.x + scr.w - 1) - (self.x + self.w) + 1, self.h, find_timeout=self._find_timeout, title=right_title) elif isinstance(length, int) and length > 0: reg = Region(self.x + self.w, self.y, length, self.h, find_timeout=self._find_timeout, title=right_title) else: raise FailExit('Incorrect length: type is {type}; value is {length}'.format( typr=str(type(length)), length=str(length))) except FailExit: raise FailExit('Incorrect right() method call:\n\tlength = {length}'.format( length=length)) return reg def left(self, length=None): """ Return left area relates to self, do not including self. Height of new area is equal to height of self. Width of new area is equal to 'length' or till the end of the screen if 'length' is not set """ left_title = 'Region left of {}'.format(self.title) try: if length is None: scr = Region(*Screen(self.screen_number).area) reg = Region(scr.x, self.y, (self.x - 1) - scr.x + 1, self.h, find_timeout=self._find_timeout, title=left_title) elif isinstance(length, int) and length > 0: reg = Region(self.x - length, self.y, length, self.h, find_timeout=self._find_timeout, title=left_title) else: raise FailExit('Incorrect length: type is {type}; value is {length}'.format( typr=str(type(length)), length=str(length))) except FailExit: raise FailExit('Incorrect left() method call:\n\tlength = {length}'.format( length=length)) return reg def above(self, length=None): """ Return top area relates to self, do not including self. Width of new area is equal to width of self. Height of new area is equal to 'length' or till the end of the screen if 'length' is not set """ try: if length is None: scr = Region(*Screen(self.screen_number).area) reg = Region(self.x, scr.y, self.w, (self.y - 1) - scr.y + 1, find_timeout=self._find_timeout, title='Region top of %s' % self.title) elif isinstance(length, int) and length > 0: reg = Region(self.x, self.y - length, self.w, length, find_timeout=self._find_timeout, title='Region top of %s' % self.title) else: raise FailExit('Incorrect length: type is {type}; value is {length}'.format( typr=str(type(length)), length=str(length))) except FailExit: raise FailExit('Incorrect above() method call:\n\tlength = {length}'.format( length=length)) return reg def below(self, length=None): """ Return bottom area relates to self, do not including self. Width of new area is equal to width of self. Height of new area is equal to 'length' or till the end of the screen if 'length' is not set """ try: if length is None: scr = Region(*Screen(self.screen_number).area) reg = Region(self.x, self.y + self.h, self.w, (scr.y + scr.h - 1) - (self.y + self.h) + 1, find_timeout=self._find_timeout, title='Region bottom of %s' % self.title) elif isinstance(length, int) and length > 0: reg = Region(self.x, self.y + self.h, self.w, length, find_timeout=self._find_timeout, title='Region bottom of %s' % self.title) else: raise FailExit('Incorrect length: type is {type}; value is {length}'.format( typr=str(type(length)), length=str(length))) except FailExit: raise FailExit('Incorrect below() method call:\n\tlength = {length}'.format( length=length)) return reg def nearby(self, length=0): """ Return area around self, including self. """ try: if isinstance(length, int) and ((length >= 0) or (length < 0 and (-2 * length) < self.w and (-2 * length) < self.h)): reg = Region(self.x - length, self.y - length, self.w + 2 * length, self.h + 2 * length, find_timeout=self._find_timeout, title='Nearby region of {}'.format(self.title)) else: raise FailExit('Incorrect length: type is {type}; value is {length}'.format( typr=str(type(length)), length=str(length))) except FailExit: raise FailExit('Incorrect nearby() method call:\n\tlength = {length}'.format( length=length)) return reg def find_all(self, pattern, delay_before=0): err_msg = 'Incorrect find_all() method call:' \ '\n\tpattern = {pattern}\n\tdelay_before = {delay}'.format( pattern=str(pattern).split(os.pathsep)[-1], delay=delay_before) try: delay_before = float(delay_before) except ValueError: raise FailExit(err_msg) if isinstance(pattern, str): pattern = Pattern(pattern) if not isinstance(pattern, Pattern): raise FailExit(err_msg) time.sleep(delay_before) results = self._find(pattern, self.search_area) self._last_match = map(lambda pt: Match(pt[0], pt[1], pattern.get_w, pattern.get_h, pt[2], pattern), results) logger.info('total found {count} matches of "{file}"'.format( count=len(self._last_match), file=pattern.get_filename(full_path=False))) return self._last_match def _wait_for_appear_or_vanish(self, pattern, timeout, condition): """ pattern - could be String or List. If isinstance(pattern, list), the first element will return. It can be used when it's necessary to find one of the several images """ fail_exit_text = 'bad "pattern" argument; it should be a string (path to image file) or Pattern object: {}' if not isinstance(pattern, list): pattern = [pattern] for (_i, p) in enumerate(pattern): if isinstance(p, str): pattern[_i] = Pattern(p) elif not isinstance(p, Pattern): raise FailExit(fail_exit_text.format(p)) if timeout is None: timeout = self._find_timeout else: try: timeout = float(timeout) if timeout < 0: raise ValueError except ValueError: raise FailExit('Incorrect argument: timeout = {}'.format(timeout)) prev_field = None elaps_time = 0 while True: if prev_field is None or (prev_field != self.search_area).all(): for ptn in pattern: results = self._find(ptn, self.search_area) if condition == 'appear': if len(results) != 0: # Found something. Choose one result with best 'score'. # If several results has the same 'score' a first found result will choose res = max(results, key=lambda x: x[2]) logger.info(' "%s" has been found in(%i, %i)' % ( ptn.get_filename(full_path=False), res[0], res[1])) return Match(int(res[0] / self.scaling_factor), int(res[1] / self.scaling_factor), int(ptn.get_w / self.scaling_factor), int(ptn.get_h / self.scaling_factor), res[2], ptn) elif condition == 'vanish': if len(results) == 0: logger.info('"{}" has vanished'.format(ptn.get_filename(full_path=False))) return else: raise FailExit('unknown condition: "{}"'.format(condition)) time.sleep(DELAY_BETWEEN_CV_ATTEMPT) elaps_time += DELAY_BETWEEN_CV_ATTEMPT if elaps_time >= timeout: logger.warning('{} hasn`t been found'.format(ptn.get_filename(full_path=False))) failed_images = ', '.join(map(lambda _p: _p.get_filename(full_path=False), pattern)) raise FindFailed('Unable to find "{file}" in {region}'.format( file=failed_images, region=str(self))) def find(self, image_path, timeout=None, similarity=settings.min_similarity, exception_on_find_fail=True): """ Waits for pattern appear during timeout (in seconds) if timeout = 0 - one search iteration will perform if timeout = None - default value will use Returns Region if pattern found. If pattern did not found returns None if exception_on_find_fail is False else raises FindFailed exception """ logger.info(' try to find "{img}" with similarity {s}'.format( img=str(image_path).split(os.path.sep)[-1], s=similarity)) try: self._last_match = self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'appear') except FailExit: self._last_match = None raise except FindFailed as ex: if exception_on_find_fail: self.save_as_jpg(os.path.join( settings.find_failed_dir, '%s_%s.jpg' % (datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), str(image_path).split('/')[-1]))) raise ex else: return None else: return self._last_match def wait_vanish(self, image_path, timeout=None, similarity=settings.min_similarity): """ Waits for pattern vanish during timeout (in seconds). If pattern already vanished before method call it return True if timeout = 0 - one search iteration will perform if timeout = None - default value will use """ logger.info('Check if "{file}" vanish during {t} with similarity {s}'.format( file=str(image_path).split(os.path.sep)[-1], t=timeout if timeout else str(self._find_timeout), s=similarity)) try: self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'vanish') except FailExit: raise FailExit('Incorrect wait_vanish() method call:' '\n\tregion = {region}\n\timage_path = {path}\n\ttimeout = {t}'.format( region=str(self), path=image_path, t=timeout)) except FindFailed: logger.info('"{}" not vanished'.format(str(image_path).split(os.path.sep)[-1])) return False else: logger.info('"{}" vanished'.format(str(image_path).split(os.path.sep)[-1])) return True finally: self._last_match = None def exists(self, image_path): self._last_match = None try: self._last_match = self._wait_for_appear_or_vanish(image_path, 0, 'appear') except FailExit: raise FailExit('Incorrect exists() method call:' '\n\tregion = {region}\n\timage_path = {path}'.format( region=str(self), path=image_path)) except FindFailed: return False else: return True def wait(self, image_path=None, timeout=None): """ For compatibility with Sikuli. Wait for pattern appear or just wait """ if image_path is None: if timeout: time.sleep(timeout) else: try: self._last_match = self._wait_for_appear_or_vanish(image_path, timeout, 'appear') except FailExit: self._last_match = None raise FailExit('Incorrect wait() method call:' '\n\tregion = {region}\n\timage_path = {path}\n\ttimeout = {t}'.format( region=str(self), path=image_path, t=timeout)) else: return self._last_match
[ "# -*- coding: utf-8 -*-\r\n", "\r\n", "\"\"\"\r\n", " Region - descendant class of BaseRegion.\r\n", "\"\"\"\r\n", "\r\n", "import datetime\r\n", "import os\r\n", "import time\r\n", "\r\n", "from common_exceptions import FailExit, FindFailed\r\n", "from Location import Location\r\n", "from BaseRegion import BaseRegion, logger, DELAY_BETWEEN_CV_ATTEMPT\r\n", "from Match import Match\r\n", "from Pattern import Pattern\r\n", "from Screen import Screen\r\n", "from Settings import settings\r\n", "\r\n", "\r\n", "class Region(BaseRegion):\r\n", " def offset(self, *args, **kwargs):\r\n", " \"\"\"\r\n", " Return area moved relates to self.\r\n", " Option 1 (Sikuli-like):\r\n", " loc_offs := args[0] - Location type; where to move; (w,h) don't change\r\n", " Option 2:\r\n", " x_offs := args[0] - int; where to move by x\r\n", " y_offs := args[1] - int; where to move by y\r\n", " (w,h) don't change\r\n", " \"\"\"\r\n", " if len(kwargs) != 0:\r\n", " raise FailExit('Unknown keys in kwargs = %s' % str(kwargs))\r\n", "\r\n", " offset_title = 'Offset of {}'.format(self.title)\r\n", " if len(args) == 2 and \\\r\n", " (isinstance(args[0], int) or isinstance(args[0], float)) and \\\r\n", " (isinstance(args[1], int) or isinstance(args[1], float)):\r\n", " return Region(self.x + int(args[0]),\r\n", " self.y + int(args[1]),\r\n", " self.w, self.h, find_timeout=self._find_timeout,\r\n", " title=offset_title)\r\n", " elif len(args) == 1 and isinstance(args[0], Location):\r\n", " return Region(self.x + args[0].x,\r\n", " self.y + args[0].y,\r\n", " self.w, self.h, find_timeout=self._find_timeout,\r\n", " title=offset_title)\r\n", " else:\r\n", " raise FailExit('Incorrect offset() method call:\\n\\targs = {}'.format(args))\r\n", "\r\n", " def right(self, length=None):\r\n", " \"\"\"\r\n", " Return right area relates to self, do not including self.\r\n", " Height of new area is equal to height of self.\r\n", " Width of new area is equal to 'length' or till the end of the screen\r\n", " \"\"\"\r\n", " right_title = 'Region right of {}'.format(self.title)\r\n", " try:\r\n", " if length is None:\r\n", " scr = Region(*Screen(self.screen_number).area)\r\n", " reg = Region(self.x + self.w, self.y,\r\n", " (scr.x + scr.w - 1) - (self.x + self.w) + 1,\r\n", " self.h, find_timeout=self._find_timeout,\r\n", " title=right_title)\r\n", " elif isinstance(length, int) and length > 0:\r\n", " reg = Region(self.x + self.w, self.y,\r\n", " length,\r\n", " self.h, find_timeout=self._find_timeout,\r\n", " title=right_title)\r\n", " else:\r\n", " raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n", " typr=str(type(length)), length=str(length)))\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect right() method call:\\n\\tlength = {length}'.format(\r\n", " length=length))\r\n", " return reg\r\n", "\r\n", " def left(self, length=None):\r\n", " \"\"\"\r\n", " Return left area relates to self, do not including self.\r\n", " Height of new area is equal to height of self.\r\n", " Width of new area is equal to 'length' or till the end of the screen\r\n", " if 'length' is not set\r\n", " \"\"\"\r\n", " left_title = 'Region left of {}'.format(self.title)\r\n", " try:\r\n", " if length is None:\r\n", " scr = Region(*Screen(self.screen_number).area)\r\n", " reg = Region(scr.x, self.y, (self.x - 1) - scr.x + 1,\r\n", " self.h, find_timeout=self._find_timeout,\r\n", " title=left_title)\r\n", " elif isinstance(length, int) and length > 0:\r\n", " reg = Region(self.x - length, self.y, length,\r\n", " self.h, find_timeout=self._find_timeout,\r\n", " title=left_title)\r\n", " else:\r\n", " raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n", " typr=str(type(length)), length=str(length)))\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect left() method call:\\n\\tlength = {length}'.format(\r\n", " length=length))\r\n", " return reg\r\n", "\r\n", " def above(self, length=None):\r\n", " \"\"\"\r\n", " Return top area relates to self, do not including self.\r\n", " Width of new area is equal to width of self.\r\n", " Height of new area is equal to 'length' or till the end of the screen\r\n", " if 'length' is not set\r\n", " \"\"\"\r\n", " try:\r\n", " if length is None:\r\n", " scr = Region(*Screen(self.screen_number).area)\r\n", " reg = Region(self.x, scr.y, self.w, (self.y - 1) - scr.y + 1,\r\n", " find_timeout=self._find_timeout,\r\n", " title='Region top of %s' % self.title)\r\n", " elif isinstance(length, int) and length > 0:\r\n", " reg = Region(self.x, self.y - length, self.w, length,\r\n", " find_timeout=self._find_timeout,\r\n", " title='Region top of %s' % self.title)\r\n", " else:\r\n", " raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n", " typr=str(type(length)), length=str(length)))\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect above() method call:\\n\\tlength = {length}'.format(\r\n", " length=length))\r\n", " return reg\r\n", "\r\n", " def below(self, length=None):\r\n", " \"\"\"\r\n", " Return bottom area relates to self, do not including self.\r\n", " Width of new area is equal to width of self.\r\n", " Height of new area is equal to 'length' or till the end of the screen\r\n", " if 'length' is not set\r\n", " \"\"\"\r\n", " try:\r\n", " if length is None:\r\n", " scr = Region(*Screen(self.screen_number).area)\r\n", " reg = Region(self.x, self.y + self.h, self.w, (scr.y + scr.h - 1) - (self.y + self.h) + 1,\r\n", " find_timeout=self._find_timeout,\r\n", " title='Region bottom of %s' % self.title)\r\n", " elif isinstance(length, int) and length > 0:\r\n", " reg = Region(self.x, self.y + self.h, self.w, length,\r\n", " find_timeout=self._find_timeout,\r\n", " title='Region bottom of %s' % self.title)\r\n", " else:\r\n", " raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n", " typr=str(type(length)), length=str(length)))\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect below() method call:\\n\\tlength = {length}'.format(\r\n", " length=length))\r\n", " return reg\r\n", "\r\n", " def nearby(self, length=0):\r\n", " \"\"\"\r\n", " Return area around self, including self.\r\n", " \"\"\"\r\n", " try:\r\n", " if isinstance(length, int) and ((length >= 0) or\r\n", " (length < 0 and\r\n", " (-2 * length) < self.w and\r\n", " (-2 * length) < self.h)):\r\n", " reg = Region(self.x - length, self.y - length, self.w + 2 * length,\r\n", " self.h + 2 * length, find_timeout=self._find_timeout,\r\n", " title='Nearby region of {}'.format(self.title))\r\n", " else:\r\n", " raise FailExit('Incorrect length: type is {type}; value is {length}'.format(\r\n", " typr=str(type(length)), length=str(length)))\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect nearby() method call:\\n\\tlength = {length}'.format(\r\n", " length=length))\r\n", " return reg\r\n", "\r\n", " def find_all(self, pattern, delay_before=0):\r\n", " err_msg = 'Incorrect find_all() method call:' \\\r\n", " '\\n\\tpattern = {pattern}\\n\\tdelay_before = {delay}'.format(\r\n", " pattern=str(pattern).split(os.pathsep)[-1], delay=delay_before)\r\n", " try:\r\n", " delay_before = float(delay_before)\r\n", " except ValueError:\r\n", " raise FailExit(err_msg)\r\n", "\r\n", " if isinstance(pattern, str):\r\n", " pattern = Pattern(pattern)\r\n", " if not isinstance(pattern, Pattern):\r\n", " raise FailExit(err_msg)\r\n", "\r\n", " time.sleep(delay_before)\r\n", " results = self._find(pattern, self.search_area)\r\n", " self._last_match = map(lambda pt: Match(pt[0], pt[1],\r\n", " pattern.get_w, pattern.get_h,\r\n", " pt[2], pattern), results)\r\n", " logger.info('total found {count} matches of \"{file}\"'.format(\r\n", " count=len(self._last_match), file=pattern.get_filename(full_path=False)))\r\n", " return self._last_match\r\n", "\r\n", " def _wait_for_appear_or_vanish(self, pattern, timeout, condition):\r\n", " \"\"\"\r\n", " pattern - could be String or List.\r\n", " If isinstance(pattern, list), the first element will return.\r\n", " It can be used when it's necessary to find one of the several images\r\n", " \"\"\"\r\n", " fail_exit_text = 'bad \"pattern\" argument; it should be a string (path to image file) or Pattern object: {}'\r\n", "\r\n", " if not isinstance(pattern, list):\r\n", " pattern = [pattern]\r\n", "\r\n", " for (_i, p) in enumerate(pattern):\r\n", " if isinstance(p, str):\r\n", " pattern[_i] = Pattern(p)\r\n", " elif not isinstance(p, Pattern):\r\n", " raise FailExit(fail_exit_text.format(p))\r\n", "\r\n", " if timeout is None:\r\n", " timeout = self._find_timeout\r\n", " else:\r\n", " try:\r\n", " timeout = float(timeout)\r\n", " if timeout < 0:\r\n", " raise ValueError\r\n", " except ValueError:\r\n", " raise FailExit('Incorrect argument: timeout = {}'.format(timeout))\r\n", "\r\n", " prev_field = None\r\n", " elaps_time = 0\r\n", " while True:\r\n", " if prev_field is None or (prev_field != self.search_area).all():\r\n", " for ptn in pattern:\r\n", " results = self._find(ptn, self.search_area)\r\n", " if condition == 'appear':\r\n", " if len(results) != 0:\r\n", " # Found something. Choose one result with best 'score'.\r\n", " # If several results has the same 'score' a first found result will choose\r\n", " res = max(results, key=lambda x: x[2])\r\n", " logger.info(' \"%s\" has been found in(%i, %i)' % (\r\n", " ptn.get_filename(full_path=False), res[0], res[1]))\r\n", " return Match(int(res[0] / self.scaling_factor),\r\n", " int(res[1] / self.scaling_factor),\r\n", " int(ptn.get_w / self.scaling_factor),\r\n", " int(ptn.get_h / self.scaling_factor),\r\n", " res[2], ptn)\r\n", " elif condition == 'vanish':\r\n", " if len(results) == 0:\r\n", " logger.info('\"{}\" has vanished'.format(ptn.get_filename(full_path=False)))\r\n", " return\r\n", " else:\r\n", " raise FailExit('unknown condition: \"{}\"'.format(condition))\r\n", "\r\n", " time.sleep(DELAY_BETWEEN_CV_ATTEMPT)\r\n", " elaps_time += DELAY_BETWEEN_CV_ATTEMPT\r\n", " if elaps_time >= timeout:\r\n", " logger.warning('{} hasn`t been found'.format(ptn.get_filename(full_path=False)))\r\n", " failed_images = ', '.join(map(lambda _p: _p.get_filename(full_path=False), pattern))\r\n", " raise FindFailed('Unable to find \"{file}\" in {region}'.format(\r\n", " file=failed_images, region=str(self)))\r\n", "\r\n", " def find(self, image_path, timeout=None, similarity=settings.min_similarity,\r\n", " exception_on_find_fail=True):\r\n", " \"\"\"\r\n", " Waits for pattern appear during timeout (in seconds)\r\n", " if timeout = 0 - one search iteration will perform\r\n", " if timeout = None - default value will use\r\n", "\r\n", " Returns Region if pattern found.\r\n", " If pattern did not found returns None if exception_on_find_fail is False\r\n", " else raises FindFailed exception\r\n", " \"\"\"\r\n", " logger.info(' try to find \"{img}\" with similarity {s}'.format(\r\n", " img=str(image_path).split(os.path.sep)[-1], s=similarity))\r\n", " try:\r\n", " self._last_match = self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'appear')\r\n", " except FailExit:\r\n", " self._last_match = None\r\n", " raise\r\n", " except FindFailed as ex:\r\n", " if exception_on_find_fail:\r\n", " self.save_as_jpg(os.path.join(\r\n", " settings.find_failed_dir,\r\n", " '%s_%s.jpg' % (datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"),\r\n", " str(image_path).split('/')[-1])))\r\n", " raise ex\r\n", " else:\r\n", " return None\r\n", " else:\r\n", " return self._last_match\r\n", "\r\n", " def wait_vanish(self, image_path, timeout=None, similarity=settings.min_similarity):\r\n", " \"\"\"\r\n", " Waits for pattern vanish during timeout (in seconds).\r\n", " If pattern already vanished before method call it return True\r\n", "\r\n", " if timeout = 0 - one search iteration will perform\r\n", " if timeout = None - default value will use\r\n", " \"\"\"\r\n", " logger.info('Check if \"{file}\" vanish during {t} with similarity {s}'.format(\r\n", " file=str(image_path).split(os.path.sep)[-1],\r\n", " t=timeout if timeout else str(self._find_timeout),\r\n", " s=similarity))\r\n", " try:\r\n", " self._wait_for_appear_or_vanish(Pattern(image_path, similarity), timeout, 'vanish')\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect wait_vanish() method call:'\r\n", " '\\n\\tregion = {region}\\n\\timage_path = {path}\\n\\ttimeout = {t}'.format(\r\n", " region=str(self), path=image_path, t=timeout))\r\n", " except FindFailed:\r\n", " logger.info('\"{}\" not vanished'.format(str(image_path).split(os.path.sep)[-1]))\r\n", " return False\r\n", " else:\r\n", " logger.info('\"{}\" vanished'.format(str(image_path).split(os.path.sep)[-1]))\r\n", " return True\r\n", " finally:\r\n", " self._last_match = None\r\n", "\r\n", " def exists(self, image_path):\r\n", " self._last_match = None\r\n", " try:\r\n", " self._last_match = self._wait_for_appear_or_vanish(image_path, 0, 'appear')\r\n", " except FailExit:\r\n", " raise FailExit('Incorrect exists() method call:'\r\n", " '\\n\\tregion = {region}\\n\\timage_path = {path}'.format(\r\n", " region=str(self), path=image_path))\r\n", " except FindFailed:\r\n", " return False\r\n", " else:\r\n", " return True\r\n", "\r\n", " def wait(self, image_path=None, timeout=None):\r\n", " \"\"\"\r\n", " For compatibility with Sikuli.\r\n", " Wait for pattern appear or just wait\r\n", " \"\"\"\r\n", " if image_path is None:\r\n", " if timeout:\r\n", " time.sleep(timeout)\r\n", " else:\r\n", " try:\r\n", " self._last_match = self._wait_for_appear_or_vanish(image_path, timeout, 'appear')\r\n", " except FailExit:\r\n", " self._last_match = None\r\n", " raise FailExit('Incorrect wait() method call:'\r\n", " '\\n\\tregion = {region}\\n\\timage_path = {path}\\n\\ttimeout = {t}'.format(\r\n", " region=str(self), path=image_path, t=timeout))\r\n", " else:\r\n", " return self._last_match\r\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009259259259259259, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0.011904761904761904, 0, 0, 0.010638297872340425, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0.011904761904761904, 0.010869565217391304, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0.009615384615384616, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0.009615384615384616, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0.01020408163265306, 0.00980392156862745, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0.008620689655172414, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0.01, 0, 0, 0.010752688172043012, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0.009615384615384616, 0.012048192771084338, 0, 0 ]
343
0.001366
false
# Used for perf measurements. Defines a simple function and makes 100 calls in a row. # I ran 10K calls, total 1 million invocations, total time is around 250ms on my laptop (64bit 2.7GHz, Release mode, running .exe directly, no debugger); # 0.4 microseconds per call. m = 5 def addm(x, y): x + y + m x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11) x = addm(1, 2) x = addm(2, 3) x = addm(3, 4) x = addm(4, 5) x = addm(5, 6) x = addm(6, 7) x = addm(7, 8) x = addm(8, 9) x = addm(9, 10) x = addm(10, 11)
[ "# Used for perf measurements. Defines a simple function and makes 100 calls in a row.\n", "# I ran 10K calls, total 1 million invocations, total time is around 250ms on my laptop (64bit 2.7GHz, Release mode, running .exe directly, no debugger);\n", "# 0.4 microseconds per call. \n", "\n", "m = 5\n", "\n", "def addm(x, y):\n", " x + y + m\n", " \n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n", "x = addm(1, 2)\n", "x = addm(2, 3)\n", "x = addm(3, 4)\n", "x = addm(4, 5)\n", "x = addm(5, 6)\n", "x = addm(6, 7)\n", "x = addm(7, 8)\n", "x = addm(8, 9)\n", "x = addm(9, 10)\n", "x = addm(10, 11)\n", "\n" ]
[ 0.011627906976744186, 0.006493506493506494, 0.03333333333333333, 0, 0, 0, 0.0625, 0.08333333333333333, 0.3333333333333333, 0.06666666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
119
0.013423
false
from django import template register = template.Library() """ Will be included in 1.4 """ @register.filter() def truncatechars(value, length): """ truncate after a certain number of characters, if the last character is not space truncate at the next space Edit by Taco: Now adds three dots at the end, and takes those dots in to account when checking where to truncate and add the dots. """ le = length-3 if value == None: return value if len(value) > length: try: if value[le] == " ": return '{0}...'.format(value[:le]) else: while value[le] != " ": le += 1 else: return '{0}...'.format(value[:le]) except IndexError: return value[:length] else: return value def truncatewords(value, length): """ truncate after a certain number of characters, if the last character is not space truncate at the next space Edit by Taco: Now adds three dots at the end, and takes those dots in to account when checking where to truncate and add the dots. """ if value == None or value == '': return value value_array = [] try: value_array = str(value).lsplit(' ') except Exception: return value try: return_value = value_array[0] le = 0 while le < length: return_value += value_array[le] le += 1 return_value += '...' return return_value except IndexError: return value
[ "from django import template\n", "\n", "register = template.Library()\n", "\n", "\"\"\"\n", " Will be included in 1.4\n", "\"\"\"\n", "\n", "@register.filter()\n", "def truncatechars(value, length):\n", " \"\"\"\n", " truncate after a certain number of characters,\n", " if the last character is not space truncate at the next space\n", " \n", " Edit by Taco: Now adds three dots at the end, and takes those dots\n", " in to account when checking where to truncate and add the dots.\n", " \"\"\"\n", " le = length-3\n", " if value == None:\n", " return value\n", " if len(value) > length:\n", " try:\n", " if value[le] == \" \":\n", " return '{0}...'.format(value[:le])\n", " else:\n", " while value[le] != \" \":\n", " le += 1\n", " else:\n", " return '{0}...'.format(value[:le])\n", "\n", " except IndexError:\n", " return value[:length]\n", " else:\n", " return value\n", "\n", "def truncatewords(value, length):\n", " \"\"\"\n", " truncate after a certain number of characters,\n", " if the last character is not space truncate at the next space\n", " \n", " Edit by Taco: Now adds three dots at the end, and takes those dots\n", " in to account when checking where to truncate and add the dots.\n", " \"\"\"\n", " if value == None or value == '':\n", " return value\n", " value_array = []\n", " try:\n", " value_array = str(value).lsplit(' ')\n", " except Exception:\n", " return value\n", " \n", " try:\n", " return_value = value_array[0]\n", " le = 0\n", " while le < length:\n", " return_value += value_array[le]\n", " le += 1\n", " return_value += '...'\n", " return return_value\n", " except IndexError:\n", " return value" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0.02702702702702703, 0, 0, 0, 0, 0, 0, 0.2, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
61
0.010695
false
#!/usr/bin/env python #coding:utf-8 # Author: mozman # Purpose: a hack to generate XML containing CDATA by ElementTree # Created: 26.05.2012 # Copyright (C) 2012, Manfred Moitzi # License: GPLv3 # usage: # # from svgwrite.etree import etree, CDATA # # element = etree.Element('myTag') # element.append(CDATA("< and >")) # # assert etree.tostring(element) == "<myTag><![CDATA[< and >]]></myTag>" import sys PY3 = sys.version_info[0] > 2 import xml.etree.ElementTree as etree CDATA_TPL = "<![CDATA[%s]]>" CDATA_TAG = CDATA_TPL def CDATA(text): element = etree.Element(CDATA_TAG) element.text = text return element try: original_serialize_xml = etree._serialize_xml except AttributeError, e: print 'etree patch error', str(e) if PY3: def _serialize_xml_with_CDATA_support(write, elem, qnames, namespaces): if elem.tag == CDATA_TAG: write(CDATA_TPL % elem.text) else: original_serialize_xml(write, elem, qnames, namespaces) else: def _serialize_xml_with_CDATA_support(write, elem, encoding, qnames, namespaces): if elem.tag == CDATA_TAG: write(CDATA_TPL % elem.text.encode(encoding)) else: original_serialize_xml(write, elem, encoding, qnames, namespaces) # ugly, ugly, ugly patching try: etree._serialize_xml = _serialize_xml_with_CDATA_support except AttributeError, e: print 'etree patch error', str(e)
[ "#!/usr/bin/env python\r\n", "#coding:utf-8\r\n", "# Author: mozman\r\n", "# Purpose: a hack to generate XML containing CDATA by ElementTree\r\n", "# Created: 26.05.2012\r\n", "# Copyright (C) 2012, Manfred Moitzi\r\n", "# License: GPLv3\r\n", "\r\n", "# usage:\r\n", "#\r\n", "# from svgwrite.etree import etree, CDATA\r\n", "#\r\n", "# element = etree.Element('myTag')\r\n", "# element.append(CDATA(\"< and >\"))\r\n", "#\r\n", "# assert etree.tostring(element) == \"<myTag><![CDATA[< and >]]></myTag>\"\r\n", "\r\n", "\r\n", "import sys\r\n", "PY3 = sys.version_info[0] > 2\r\n", "\r\n", "import xml.etree.ElementTree as etree\r\n", "\r\n", "CDATA_TPL = \"<![CDATA[%s]]>\"\r\n", "CDATA_TAG = CDATA_TPL\r\n", "\r\n", "\r\n", "def CDATA(text):\r\n", " element = etree.Element(CDATA_TAG)\r\n", " element.text = text\r\n", " return element\r\n", "\r\n", "try:\r\n", " original_serialize_xml = etree._serialize_xml\r\n", "except AttributeError, e:\r\n", " print 'etree patch error', str(e)\r\n", "\r\n", "if PY3:\r\n", " def _serialize_xml_with_CDATA_support(write, elem, qnames, namespaces):\r\n", " if elem.tag == CDATA_TAG:\r\n", " write(CDATA_TPL % elem.text)\r\n", " else:\r\n", " original_serialize_xml(write, elem, qnames, namespaces)\r\n", "else:\r\n", " def _serialize_xml_with_CDATA_support(write, elem, encoding, qnames, namespaces):\r\n", " if elem.tag == CDATA_TAG:\r\n", " write(CDATA_TPL % elem.text.encode(encoding))\r\n", " else:\r\n", " original_serialize_xml(write, elem, encoding, qnames, namespaces)\r\n", "\r\n", "# ugly, ugly, ugly patching\r\n", "try:\r\n", " etree._serialize_xml = _serialize_xml_with_CDATA_support\r\n", "except AttributeError, e:\r\n", " print 'etree patch error', str(e)\r\n", " \r\n" ]
[ 0, 0.06666666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666 ]
56
0.007806
false

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card