commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
0728e6a4f8f06e1d4d137259f76796d1dbfa1a9d | add a wsgi.py that eagerly reads in POSTdata | edx/edx-ora,edx/edx-ora,edx/edx-ora,edx/edx-ora | edx_ora/wsgi_eager.py | edx_ora/wsgi_eager.py | """
WSGI config for ora project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edx_ora.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import WSGIHandler
class ForceReadPostHandler(WSGIHandler):
"""WSGIHandler that forces reading POST data before forwarding to the
application.
nginx as a proxy expects the backend to respond only after the
whole body of the request has been read. In some cases (see below)
the backend starts responding before reading the request. This
causes nginx to return a 502 error, instead of forwarding the
proper response to the client, which makes very hard to debug
problems with the backend.
Cases where the backend responds early:
- Early errors from django, for example errors from view decorators.
- POST request with large payloads, which may get chunked by nginx.
django sends a 100 Continue response before reading the whole body.
For more information:
http://kudzia.eu/b/2012/01/switching-from-apache2-to-nginx-as-reverse-proxy
"""
def get_response(self, request):
data = request.POST.copy() # read the POST data passing it
return super(ForceReadPostHandler, self).get_response(request)
application = ForceReadPostHandler()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 | Python |
|
fb336764f1a95d591e04f0061009c555b7217274 | Create FoodDiversity.py | LAMakerspce/Economics | FoodDiversity.py | FoodDiversity.py | import csv
import math
import collections
from collections import Counter
# EntFunc calculates the Shannon index for the diversity of venues in a given zip code.
def EntFunc(list,list2):
k = 0
Entropy = 0
for k in range(0, len(BusinessName)):
if BusinessName[k] != BusinessName[k - 1]:
p = float(BusinessName.count(BusinessName[k])) / float(len(BusinessName))
Entropy = -1.0 * math.log(p) * p + Entropy
k = k + 1
if Entropy != 0: print zip[j],k,Entropy
#Take in data from ESRI business lists by zip code.
#The entry matrix takes in values by zip code then the business name within the zip code.
#The BusinessName list is there simply to take in business names and determine how often unique values repeat for diversity calculations.
ReadFile = 'SIC581208.csv'
inf = csv.reader(open(ReadFile, "rU"))
i = 0
entry=[[],[]]
BusinessName=[]
#Store zip and business name data from ESRI file.
for row in inf:
i = i + 1
if i > 1:
entry[0].append(long(row[6]))
entry[1].append(row[1])
#Sort the zip code values by zip code.
zip = sorted(list(set(entry[0])),key=float)
#Sort all stored information by zip code.
#Output business diversity by zip code.
j=0
entry.sort(key=lambda x: x[0])
for i in range(0,len(entry[0])):
if entry[0][i] == zip[j]:
BusinessName.append(entry[1][i])
else:
EntFunc(BusinessName,zip[j])
j=j+1
BusinessName=[]
| cc0-1.0 | Python |
|
388a7eea596fdbcd79005b06d26d54b182b75696 | add package | kokokele/packageSpineAtlas,kokokele/packageSpineAtlas,kokokele/packageSpineAtlas,kokokele/packageSpineAtlas | package.py | package.py | # coding=UTF-8
#!/usr/bin/python
'''
文件夹结构 --zip
animate
unzip.py
release
'''
import os, sys, zipfile
import shutil
import commands
# Config
dir = "equip"
output = "output"
def texturePacker(floder, oriName):
'''
打包图片
'''
target = "../" + floder + "/"
#pwd
#--content-protection 64baaa7a545a4eb7a90f7a452a703f13
##打包 atlas
cmd = "TexturePacker --size-constraints NPOT --force-squared --multipack --force-word-aligned --algorithm MaxRects --premultiply-alpha --format spine --opt RGBA4444 --dither-atkinson-alpha --data " + target + oriName + ".atlas --texture-format png --sheet " + target + oriName + ".png " + "./"
#cmd = "TexturePacker --size-constraints NPOT --force-squared --content-protection 64baaa7a545a4eb7a90f7a452a703f13 --multipack --force-word-aligned --algorithm MaxRects --reduce-border-artifacts --format cocos2d --opt RGBA4444 --dither-fs-alpha --data " + target + "animate.plist --texture-format pvr2ccz --sheet " + target + "animate.pvr.ccz " + target + "texture/"
print commands.getoutput(cmd)
print "-------" + oriName + " is success-----"
def run():
nameConfig = {"horse": ["ma-bz", "ma-jiao-q", "ma-dt-q", "ma-xt-q", "ma-pg", "ma-yao", "ma-xiong", "ma-jiao-h", "ma-dt-h", "ma-xt-h", "ma-wb-s", "ma-tou", "ma-jiao-h-01", "ma-dt-h-01", "ma-xt-h-01", "ma-jiao-q-01", "ma-dt-q-01", "ma-xt-q-01"],
"helmet":["tou-tk", "tou-tk-s", "tou-tf-r", "tou-tf-l", "tou-tk-h"],
"armour":["db-r-s", "xb-r", "yd", "qz-r", "qz-l", "qz-m", "xt-l", "dt-r", "yao-q", "xiong", "bozi", "pg", "yao", "shou-l", "shou-r", "xb-l", "db-l-s", "db-l", "pf", "xiong-q"],
"weapon":["wq-r", "wq-l"],
"face": ["tou"],
"face1": ["tou1"],
"face2": ["tou2"],
"face3": ["tou3"],
"face4": ["tou4"],
"face5": ["tou5"],
"face6": ["tou6"],
"face7": ["tou7"],
"face8": ["tou8"],
"face9": ["tou9"]
}
list = os.listdir(dir)
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
for l in list:
tf = os.path.join(dir, l)
if os.path.isdir(tf):
#os.chdir(tf)
#pngList = os.listdir(os.getcwd())
for (key , value) in nameConfig.items():
print key, value
tmp = os.path.join(output, "tmp")
if os.path.exists(tmp) == False:
os.mkdir(tmp)
for e in value :
copyFile = os.path.join(os.getcwd(), tf, e + ".png")
if os.path.exists(copyFile):
shutil.copy(copyFile, tmp)
tmpLen = len(os.listdir(tmp))
os.chdir(os.path.join(output, "tmp"))
#print("HHHHHHH:" + len(os.listdir(tmp)))
if tmpLen > 0:
texturePacker(l, key)
os.chdir("../../")
shutil.rmtree(tmp)
return
#main fun
run()
| apache-2.0 | Python |
|
40d687be843e3de56eb00a028e07866391593315 | Add defaults.py | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/defaults.py | salt/defaults.py | # -*- coding: utf-8 -*-
'''
Default values, to be imported elsewhere in Salt code
Do NOT, import any salt modules (salt.utils, salt.config, etc.) into this file,
as this may result in circular imports.
'''
# Default delimiter for multi-level traversal in targeting
DEFAULT_TARGET_DELIM = ':'
| apache-2.0 | Python |
|
26db82fd8560d54b1f385814b8871e6fba42fa91 | Add Linked List (#71) | yangshun/tech-interview-handbook,yangshun/tech-interview-handbook,yangshun/tech-interview-handbook,yangshun/tech-interview-handbook,yangshun/tech-interview-handbook | utilities/python/linked_list.py | utilities/python/linked_list.py | # Singly-Linked List
#
# The linked list is passed around as a variable pointing to the
# root node of the linked list, or None if the list is empty.
class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
def linked_list_append(linked_list, value):
'''Appends a value to the end of the linked list'''
node = linked_list
insert_node = LinkedListNode(value)
if not node:
return insert_node
while node.next:
node = node.next
node.next = insert_node
return linked_list
def linked_list_insert_index(linked_list, value, index):
'''Inserts a value at a particular index'''
node = linked_list
insert_node = LinkedListNode(value)
# Check if inserting at head
if index == 0:
insert_node.next = node
return insert_node
# Skip ahead
for _ in range(index - 1):
node = node.next
if not node:
raise ValueError
insert_node.next = node.next
node.next = insert_node
return linked_list
def linked_list_delete(linked_list, value):
'''Deletes the first occurrence of a value in the linked list'''
node = linked_list
# Check if deleting at head
if node.value == value:
return node.next
# Skip ahead
while node.next:
if node.next.value == value:
node.next = node.next.next
return linked_list
node = node.next
raise ValueError
def linked_list_delete_index(linked_list, index):
'''Deletes the element at a particular index in the linked list'''
node = linked_list
# Check if deleting at head
if index == 0:
return node.next
# Skip ahead
for _ in range(index - 1):
node = node.next
if not node:
raise ValueError
if not node.next:
raise ValueError
node.next = node.next.next
return linked_list
def linked_list_iter(linked_list):
'''Lazy iterator over each node in the linked list'''
node = linked_list
while node is not None:
yield node
node = node.next
# Append to back
linked_list = None # Start with an empty linked list
linked_list = linked_list_append(linked_list, 1)
linked_list = linked_list_append(linked_list, 2)
linked_list = linked_list_append(linked_list, 4)
print([node.value for node in linked_list_iter(linked_list)])
# Insert by index
linked_list = linked_list_insert_index(linked_list, 0, 0) # Front
print([node.value for node in linked_list_iter(linked_list)])
linked_list = linked_list_insert_index(linked_list, 3, 3) # Back
print([node.value for node in linked_list_iter(linked_list)])
# Delete "3"
linked_list = linked_list_delete(linked_list, 3)
print([node.value for node in linked_list_iter(linked_list)])
# Delete by index
linked_list = linked_list_delete_index(linked_list, 0)
print([node.value for node in linked_list_iter(linked_list)])
linked_list = linked_list_delete_index(linked_list, 1)
print([node.value for node in linked_list_iter(linked_list)])
# Delete until empty
linked_list = linked_list_delete_index(linked_list, 0)
linked_list = linked_list_delete_index(linked_list, 0)
print([node.value for node in linked_list_iter(linked_list)])
| mit | Python |
|
43510585fcf2d9bd3953f3a4948f3aaebbc00e10 | Add pyglet.info | Alwnikrotikz/pyglet,gdkar/pyglet,xshotD/pyglet,gdkar/pyglet,qbektrix/pyglet,arifgursel/pyglet,mpasternak/michaldtz-fixes-518-522,google-code-export/pyglet,odyaka341/pyglet,mpasternak/michaldtz-fix-552,qbektrix/pyglet,Alwnikrotikz/pyglet,cledio66/pyglet,qbektrix/pyglet,mpasternak/pyglet-fix-issue-552,mpasternak/michaldtz-fixes-518-522,qbektrix/pyglet,mpasternak/michaldtz-fixes-518-522,mpasternak/michaldtz-fix-552,kmonsoor/pyglet,arifgursel/pyglet,Austin503/pyglet,kmonsoor/pyglet,google-code-export/pyglet,Austin503/pyglet,kmonsoor/pyglet,mpasternak/michaldtz-fix-552,cledio66/pyglet,mpasternak/michaldtz-fixes-518-522,gdkar/pyglet,google-code-export/pyglet,gdkar/pyglet,odyaka341/pyglet,Austin503/pyglet,shaileshgoogler/pyglet,mpasternak/pyglet-fix-issue-552,xshotD/pyglet,mpasternak/pyglet-fix-issue-518-522,xshotD/pyglet,mpasternak/pyglet-fix-issue-518-522,qbektrix/pyglet,mpasternak/pyglet-fix-issue-518-522,shaileshgoogler/pyglet,mpasternak/pyglet-fix-issue-552,arifgursel/pyglet,mpasternak/pyglet-fix-issue-518-522,mpasternak/michaldtz-fix-552,Austin503/pyglet,Alwnikrotikz/pyglet,xshotD/pyglet,odyaka341/pyglet,shaileshgoogler/pyglet,google-code-export/pyglet,Alwnikrotikz/pyglet,odyaka341/pyglet,cledio66/pyglet,mpasternak/pyglet-fix-issue-552,kmonsoor/pyglet,google-code-export/pyglet,xshotD/pyglet,shaileshgoogler/pyglet,kmonsoor/pyglet,cledio66/pyglet,arifgursel/pyglet,arifgursel/pyglet,odyaka341/pyglet,cledio66/pyglet,Alwnikrotikz/pyglet,gdkar/pyglet,Austin503/pyglet,shaileshgoogler/pyglet | pyglet/info.py | pyglet/info.py | #!/usr/bin/env python
'''Get environment information useful for debugging.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
_first_heading = True
def _heading(heading):
global _first_heading
if not _first_heading:
print
else:
_first_heading = False
print heading
print '-' * 78
def dump_python():
'''Dump Python version and environment to stdout.'''
import os
import sys
print 'sys.version:', sys.version
print 'sys.platform:', sys.platform
print 'os.getcwd():', os.getcwd()
for key, value in os.environ.items():
if key.startswith('PYGLET_'):
print "os.environ['%s']: %s" % (key, value)
def dump_pyglet():
'''Dump pyglet version and options.'''
import pyglet
print 'pyglet.version:', pyglet.version
print 'pyglet.__file__:', pyglet.__file__
for key, value in pyglet.options.items():
print "pyglet.options['%s'] = %r" % (key, value)
def dump_window():
'''Dump display, windowm, screen and default config info.'''
import pyglet.window
platform = pyglet.window.get_platform()
print 'platform:', repr(platform)
display = platform.get_default_display()
print 'display:', repr(display)
screens = display.get_screens()
for i, screen in enumerate(screens):
print 'screens[%d]: %r' % (i, screen)
window = pyglet.window.Window()
for key, value in window.config.get_gl_attributes():
print "config['%s'] = %r" % (key, value)
print 'context:', repr(window.context)
window.close()
def dump_gl():
'''Dump GL info.'''
from pyglet.gl import gl_info
print 'gl_info.get_version():', gl_info.get_version()
print 'gl_info.get_vendor():', gl_info.get_vendor()
print 'gl_info.get_renderer():', gl_info.get_renderer()
print 'gl_info.get_extensions():'
extensions = list(gl_info.get_extensions())
extensions.sort()
for name in extensions:
print ' ', name
def dump_glu():
'''Dump GLU info.'''
from pyglet.gl import glu_info
print 'glu_info.get_version():', glu_info.get_version()
print 'glu_info.get_extensions():'
extensions = list(glu_info.get_extensions())
extensions.sort()
for name in extensions:
print ' ', name
def dump_media():
'''Dump pyglet.media info.'''
import pyglet.media
print 'driver:', pyglet.media.driver.__name__
def dump_avbin():
'''Dump AVbin info.'''
try:
import pyglet.media.avbin
print 'Library:', pyglet.media.avbin.av
print 'AVbin version:', pyglet.media.avbin.av.avbin_get_version()
print 'FFmpeg revision:', \
pyglet.media.avbin.av.avbin_get_ffmpeg_revision()
except:
print 'AVbin not available.'
def _try_dump(heading, func):
_heading(heading)
try:
func()
except:
import traceback
traceback.print_exc()
def dump():
'''Dump all information to stdout.'''
_try_dump('Python', dump_python)
_try_dump('pyglet', dump_pyglet)
_try_dump('pyglet.window', dump_window)
_try_dump('pyglet.gl.gl_info', dump_gl)
_try_dump('pyglet.gl.glu_info', dump_glu)
_try_dump('pyglet.media', dump_media)
_try_dump('pyglet.media.avbin', dump_avbin)
if __name__ == '__main__':
dump()
| bsd-3-clause | Python |
|
52dc608438940e098900e1380f11ee3094c118ae | Add log file in script higher_education and add download by year. | DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site | scripts/data_download/higher_education/create_files.py | scripts/data_download/higher_education/create_files.py | # -*- coding: utf-8 -*-
'''
python scripts/data_download/higher_education/create_files.py en scripts/data/higher_education/en/ 2009
'''
from collections import namedtuple
from datetime import datetime
import pandas as pd
import os
import bz2
import sys
import logging
import imp
def local_imports():
global common, dictionary
f, filename, desc = imp.find_module('common', ['./scripts/data_download/'])
common = imp.load_module('common', f, filename, desc)
f, filename, desc = imp.find_module('dictionary', ['./scripts/data_download/'])
dictionary = imp.load_module('common', f, filename, desc)
def select_table(conditions):
s = 'y'
# 0 year, 1 location, 3 major
if conditions[1] != ' 1 = 1 ':
s += 'b'
if conditions[1] == ' 1 = 1 ' and conditions[2] == ' 1 = 1 ':
s += 'b'
if conditions[2] != ' 1 = 1 ':
s += 'c'
return 'hedu_' + s
def save(year, locations, majors, lang, output_path):
conditions = [' 1 = 1', ' 1 = 1', ' 1 = 1'] # 5 condicoes
table_columns = {}
columns_deleted=['bra_id_len', 'course_hedu_id_len', 'enrolled_rca']
if lang == 'en':
dic_lang = dictionary.en
else:
dic_lang = dictionary.pt
conditions[0] = year.condition
for location in locations:
conditions[1] = location.condition
for major in majors:
conditions[2] = major.condition
if location.condition == ' 1 = 1 ' and major.condition == ' 1 = 1 ':
continue;
table = select_table(conditions)
name_file = 'hedu'+str(year.name)+str(location.name)+str(major.name)
new_file_path = os.path.join(output_path, name_file+".csv.bz2")
if table not in table_columns.keys():
table_columns[table] = [ i+" as '"+dic_lang[i]+"'" for i in common.get_colums(table, columns_deleted)]
query = 'SELECT '+','.join(table_columns[table])+' FROM '+table+' WHERE '+' and '.join(conditions) + ' LIMIT 5'
logging.info('Query for file ('+str(datetime.now().hour)+':'+str(datetime.now().minute)+':'+str(datetime.now().second)+'): \n '+name_file+'\n'+query)
print "Gerando ... " + new_file_path
f = pd.read_sql_query(query, common.engine)
f.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep=",", index=False, float_format="%.3f", encoding='utf-8')
logging.info("\nError:\n"+str(sys.stderr)+"\n-----------------------------------------------\n")
Condition = namedtuple('Condition', ['condition', 'name'])
locations = [
Condition(' 1 = 1 ', ''),
Condition('bra_id_len=1', '-regions'),
Condition('bra_id_len=3', '-states'),
Condition('bra_id_len=5', '-mesoregions'),
Condition('bra_id_len=7', '-microregions'),
Condition('bra_id_len=9', '-municipalities')]
majors = [
Condition(' 1 = 1 ', ''),
Condition('course_hedu_id_len=2', '-field'),
Condition('course_hedu_id_len=6', '-majors')]
if len(sys.argv) != 4 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! use :\npython scripts/data_download/higher_education/create_files.py en/pt output_path year"
exit()
output_path = os.path.abspath(sys.argv[2])
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],'higher-education-data-download.log' )),level=logging.DEBUG)
year = Condition('year='+str(sys.argv[3]), '-'+str(sys.argv[3]))
local_imports()
save(year=year, locations=locations, majors=majors, lang=sys.argv[1], output_path=output_path)
| mit | Python |
|
3de2e625af9047b64cc2718e6e79be0c428b6ae7 | Solve Code Fights extract each kth problem | HKuz/Test_Code | CodeFights/extractEachKth.py | CodeFights/extractEachKth.py | #!/usr/local/bin/python
# Code Fights Extract Each Kth Problem
def extractEachKth(inputArray, k):
return [e for i, e in enumerate(inputArray) if (i + 1) % k != 0]
def main():
tests = [
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3, [1, 2, 4, 5, 7, 8, 10]],
[[1, 1, 1, 1, 1], 1, []],
[[1, 2, 1, 2, 1, 2, 1, 2], 2, [1, 1, 1, 1]]
]
for t in tests:
res = extractEachKth(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: extractEachKth({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: extractEachKth({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
| mit | Python |
|
0f1f96ce23ab89c8de3cf24645c4ea77fa2a9196 | add first test with random data | MaxNoe/cta_event_viewer | test_window.py | test_window.py | from telescope import LST
from windows import TelescopeEventView
import tkinter as tk
import numpy as np
lst = LST(0, 0, 0)
root = tk.Tk()
viewer1 = TelescopeEventView(root, lst, np.random.normal(size=lst.n_pixel))
viewer2 = TelescopeEventView(root, lst, np.random.normal(size=lst.n_pixel))
viewer1.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
viewer2.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
root.mainloop()
| mit | Python |
|
60b5948508a67cb213ca04b5faacb77e27d8f84c | Add fields expicitly declared in form | gems-uff/labsys,gems-uff/labsys,gems-uff/labsys,gcrsaldanha/fiocruz,gcrsaldanha/fiocruz | samples/forms.py | samples/forms.py | import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
from fiocruz.settings.base import DATE_INPUT_FORMATS
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
date_applied = forms.DateField(input_formats=DATE_INPUT_FORMATS)
class Meta:
model = FluVaccine
fields = ['was_applied', 'date_applied', ]
| import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
from fiocruz.settings.base import DATE_INPUT_FORMATS
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
date_applied = forms.DateField(input_formats=DATE_INPUT_FORMATS)
class Meta:
model = FluVaccine
exclude = ['admission_note', ]
| mit | Python |
5ca9e468b9709ae2c7358551a19e668e580ea396 | add deserialized json object validation functions | JDReutt/BayesDB,poppingtonic/BayesDB,mit-probabilistic-computing-project/crosscat,JDReutt/BayesDB,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,JDReutt/BayesDB,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat,fivejjs/crosscat,poppingtonic/BayesDB,poppingtonic/BayesDB,probcomp/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,poppingtonic/BayesDB,JDReutt/BayesDB,probcomp/crosscat,fivejjs/crosscat,probcomp/crosscat,fivejjs/crosscat,JDReutt/BayesDB,mit-probabilistic-computing-project/crosscat,poppingtonic/BayesDB | src/validate.py | src/validate.py | from collections import Counter
modeltypes = set(["asymmetric_beta_bernoulli", "normal_inverse_gamma", "pitmanyor_atom", "symmetric_dirichlet_discrete", "poisson_gamma"])])
def assert_map_consistency(map_1, map_2):
assert(len(map_1)==len(map_2))
for key in map_1:
assert(key == map_2[map_1[key]])
def assert_mc_consistency(mc):
assert_map_consistency(mc["name_to_idx"], mc["idx_to_name"])
assert(len(mc["name_to_idx"])==len(mc["column_metadata"]))
for column_metadata_i in column_metadata:
assert(column_metadata_i["modeltype"] in modeltypes)
assert_map_consistency(column_metadata_i["value_to_code"],
column_metadata_i["code_to_value"])
def assert_mr_consistency(mr):
assert_map_consistency(mr["name_to_idx"], mr["idx_to_name"])
| apache-2.0 | Python |
|
827828dc479f295e6051d69c919f5f1c97dcb6e2 | Add management command to verify MOTECH connection certificates. | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/motech/management/commands/verify_motech_connection_certs.py | corehq/motech/management/commands/verify_motech_connection_certs.py | from urllib.parse import urlparse, urlunparse
import requests
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError
from corehq.motech.models import ConnectionSettings
from corehq.util.log import with_progress_bar
IMPLICIT_HTTPS_PORT = 443
class Command(BaseCommand):
help = "Verify MOTECH connection certificates by performing an HTTP HEAD " \
"request to all unique domains where the URL method == HTTPS and " \
"SSL validation is enabled."
def add_arguments(self, parser):
parser.add_argument("-c", "--ca-bundle", metavar="FILE",
help="Use a custom CA trust store for SSL verifications.")
parser.add_argument("--connect-timeout", metavar="SECONDS", type=float,
default=None, help="Use custom HTTP connection timeout value.")
def handle(self, *args, **options):
verbose = options["verbosity"] > 1
timeout = options["connect_timeout"]
castore = options["ca_bundle"]
def debug(msg):
if verbose:
self.stdout.write(msg)
netlocs = {}
for connection in ConnectionSettings.objects.all():
if connection.skip_cert_verify:
debug(f"skipping (verify disabled): {connection.url}")
continue
urlparts = urlparse(connection.url)
if urlparts.scheme == "https":
hostname, x, port = urlparts.netloc.partition(":")
if not port:
# Key URL dict by explicit port numbers to avoid duplicate
# hits on domains where multiple URLs exist, some with the
# port implied and others with port 443 set explicitly.
port = IMPLICIT_HTTPS_PORT
root_url = urlunparse(("https", urlparts.netloc, "/", "", "", ""))
netlocs.setdefault((hostname, int(port)), root_url)
elif urlparts.scheme == "http":
debug(f"skipping (non-SSL): {connection.url}")
else:
debug(f"skipping (unknown scheme): {connection.url}")
errors = []
failures = []
urls = [v for (k, v) in sorted(netlocs.items())]
for url in with_progress_bar(urls, oneline=(not verbose)):
try:
debug(f"HEAD {url}")
requests.head(url, verify=(castore or True), timeout=timeout)
except SSLError:
failures.append(url)
except requests.RequestException as exc:
errors.append((url, str(exc)))
if errors:
self.stdout.write(f"{len(errors)} HTTP error(s):")
for url, msg in errors:
self.stderr.write(f"WARNING: {url} {msg}", self.style.NOTICE)
if failures:
self.stdout.write(f"{len(failures)} SSL verification failure(s):")
for url in failures:
self.stdout.write(f"FAIL: {url}", self.style.ERROR)
total = len(urls)
successes = total - (len(failures) + len(errors))
final_msg = f"\nSuccessfully verified {successes} of {total} domain(s)"
if total and not successes:
style = self.style.ERROR
elif total > successes:
style = self.style.WARNING
else:
style = self.style.SUCCESS
self.stdout.write(final_msg, style)
| bsd-3-clause | Python |
|
fc017a578a402b3d24523d1a41b7a4fdc0b107ef | add a starter proxy script | kmacrow/jquery.instagram.js,kmacrow/jquery.instagram.js | scripts/proxy.py | scripts/proxy.py | #!/usr/bin/env python
'''
Copyright (C) Kalan MacRow, 2013
This code is distributed with jquery.instagram.js
under the MIT license.
https://github.com/kmacrow/jquery.instagram.js
'''
import os
import cgi
import sys
import cgitb
import urllib2
# Base URL for Instagram API endpoints
INSTAGRAM_BASE = 'https://api.instagram.com/v1/'
# Add acceptable origins here...
ACCEPT_ORIGINS = ['http://localhost',
'http://localhost:8888',
'http://localhost:8080']
# Initialize CGI with JSON output
cgitb.enable()
form = cgi.FieldStorage()
print "Content-Type: application/json"
# Support cross origin resource sharing
origin = os.environ.get('HTTP_ORIGIN')
if origin in ACCEPT_ORIGINS:
print "Access-Control-Allow-Origin: %s" % origin
# empty line after headers
print
client_id = form.getfirst('client_id', None)
tag_name = form.getfirst('tag', None)
if not client_id or not tag_name:
print '{"error":"client_id and tag required."}'
sys.exit(0)
# Get the data from Instagram
stream = urllib2.urlopen(INSTAGRAM_BASE + 'tags/' + tag_name \
+ '/media/recent/?client_id=' + client_id)
print stream.read()
| mit | Python |
|
a319f2f1606a5c4d33e846b496e555140607c98d | Add track_name script | haldean/midigen | track_names.py | track_names.py | import midi
import sys
def track_name(track):
for ev in track:
if isinstance(ev, midi.TrackNameEvent):
return ''.join(map(chr, ev.data))
name = 'no name, first 6 events:'
for ev in track[:6]:
name += '\n %s' % ev
return name
def main(argv):
if len(argv) < 2:
print 'usage: track_names.py file.mid'
return
mid = midi.read_midifile(argv[1])
print '%d tracks' % len(mid)
for i, t in enumerate(mid):
print ' %03d: %s' % (i, track_name(t))
if __name__ == '__main__':
main(sys.argv)
| mit | Python |
|
877a7b7449a1d88c14633376a2dfaca8c619c26a | Add solution to exercis 3.6. | HenrikSamuelsson/python-crash-course | exercises/chapter_03/exercise_03_06/exercise_03_06.py | exercises/chapter_03/exercise_03_06/exercise_03_06.py | # 3-6 Guest List
guest_list = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
cancelation_message = guest_list[1] + " can not attend the dinner."
print(cancelation_message)
guest_list[1] = "Charles Darwin"
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
message = "I have a bigger table now so three more people will be invited."
print(message)
guest_list.insert(0, "Stephen Hawking")
guest_list.insert(2, "Louis Pasteur")
guest_list.append("Nicolaus Copernicus")
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[4] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[5] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[6] + " you are invited to dinner at 7 on saturday."
print(message)
| mit | Python |
|
5c7538ca1e43eb4529c04169a9a15c513bc3e659 | Add segment_tangent_angle tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/morphology/test_segment_tangent_angle.py | tests/plantcv/morphology/test_segment_tangent_angle.py | import pytest
import cv2
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import segment_tangent_angle
@pytest.mark.parametrize("size", [3, 100])
def test_segment_tangent_angle(size, morphology_test_data):
# Clear previous outputs
outputs.clear()
skel = cv2.imread(morphology_test_data.skel_img, -1)
leaf_obj = morphology_test_data.load_segments(morphology_test_data.segments_file, "leaves")
_ = segment_tangent_angle(segmented_img=skel, objects=leaf_obj, size=size)
assert len(outputs.observations['default']['segment_tangent_angle']['value']) == 4
| mit | Python |
|
6d2d224d246569a35a7b4ae5d8086e83bbb67155 | move server.py to project dir | youngam/LearningAndroid,youngam/LearningAndroid,youngam/LearningAndroid | server/server.py | server/server.py | from datetime import datetime
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
SERVER_PORT = 90
HOST_ADDRESS = ''
def save_data(user_email):
file = open('users.txt', 'a+')
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
file.write("{}, {}".format(user_email, current_time))
file.write("\n")
print("save {}".format(user_email))
def get_json(data):
try:
return json.loads(data)
except ValueError:
# if user send not json --> ignore all that he sent
return []
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("{\"hello\":\"friend\"}".encode("utf-8"))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data_str = self.rfile.read(content_length).decode()
post_data_json = get_json(post_data_str)
email_key = "email"
# if client didn't send email as param
user_email = post_data_json[email_key] if email_key in post_data_json else None
self._set_headers()
if user_email is not None:
save_data(user_email)
self.wfile.write("{\"successfully\":\"registered\"}".encode("utf-8"))
else:
self.wfile.write("{\"error\":\"invalid request\"}".encode("utf-8"))
def run(server_class=HTTPServer, handler_class=S, port=SERVER_PORT):
server_address = (HOST_ADDRESS, port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
run()
| mit | Python |
|
a8db8c0448d98e2de0e662581542bd644e673c7c | Add migration removing generated objects with factories | makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin | geotrek/core/migrations/0018_remove_other_objects_from_factories.py | geotrek/core/migrations/0018_remove_other_objects_from_factories.py | # Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_objects_factories(apps, schema_editor):
ComfortModel = apps.get_model('core', 'Comfort')
PathSourceModel = apps.get_model('core', 'PathSource')
StakeModel = apps.get_model('core', 'Stake')
ComfortModel.objects.filter(paths__isnull=True, comfort__icontains="Comfort ").delete()
PathSourceModel.objects.filter(paths__isnull=True, comfort__icontains="PathSource ").delete()
StakeModel.objects.filter(paths__isnull=True, comfort__icontains="Stake ").delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0017_remove_path_from_factories'),
]
operations = [
migrations.RunPython(remove_generated_objects_factories)
]
| bsd-2-clause | Python |
|
3ddf1f4a2bcae247978b66fd63848b3ed9782234 | add donwloader | Menooker/MistMusic | MistDownloader.py | MistDownloader.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import os
import time
import sys
import traceback
cnt=0
least_cnt=0
if len(sys.argv)==2:
least_cnt=int(sys.argv[1])
print least_cnt
if not os.path.exists("mp3"):
os.mkdir("mp3")
for path,dirname,filenames in os.walk("outdir"):
for filename in filenames:
if filename.startswith("mp3_url_"):
cnt+=1
if cnt%100==0:
print ("Has already downloaded %d songs!" % cnt)
f=open("outdir/"+filename)
for line in f:
values=line.split()
if len(values)!=3:
sys.stderr.write("Bad line '%s' in file %s\n" % (line,filename))
sid=values[0]
play_cnt=int(values[1])
url=values[2]
if play_cnt<least_cnt:
continue
fn="mp3/%s.mp3" % sid
if not os.path.exists(fn):
try:
urllib.urlretrieve(url, fn)
print(sid)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=None, file=sys.stderr)
time.sleep(2)
f.close()
| apache-2.0 | Python |
|
54e3d3147feb33f21c5bc78a8f3b4721574fcbb9 | Create A.py | Pouf/CodingCompetition,Pouf/CodingCompetition | Google-Code-Jam/2017-1B/A.py | Google-Code-Jam/2017-1B/A.py | import os
import sys
script = __file__
scriptPath = os.path.dirname(script)
scriptFile = os.path.basename(script)[0]
files = [f for f in os.listdir(scriptPath) if scriptFile in f and '.in' in f]
if '{}-large'.format(scriptFile) in str(files):
size = 'large'
elif '{}-small'.format(scriptFile) in str(files):
size = 'small'
elif '{}-test'.format(scriptFile) in str(files):
size = 'test'
else:
print('{}-test not found'.format(scriptFile))
sys.exit()
latest = sorted(f for f in files if size in f)[-1][:-3]
F = '{}/{}'.format(scriptPath, latest)
I = open(F + '.in', 'r')
O = open(F + '.out', 'w')
print(F)
T = int(I.readline()) # nb of test cases
# https://code.google.com/codejam/contest/8294486/dashboard
# Problem A.
for x in range(T):
D, N = map(int, I.readline().rstrip().split())
horses = [tuple(map(int, I.readline().split())) for _ in range(N)]
slowpoke = max((D-K)/S for K, S in horses)
y = D/slowpoke
result = '{}Case #{}: {}'.format('\n' if x else '', x + 1, y)
print(result)
O.write(result)
I.close()
O.close()
| mit | Python |
|
28e113daa5bd9080f772a30807a930070871aaea | clone del main | meraz1990/TheIoTLearningInitiative | InternetOfThings101/main1.py | InternetOfThings101/main1.py | #!/usr/bin/python
import psutil
import signal
import sys
import time
from threading import Thread
def interruptHandler(signal, frame):
sys.exit(0)
def dataNetwork():
netdata = psutil.net_io_counters()
return netdata.packets_sent + netdata.packets_recv
def dataNetworkHandler():
idDevice = "IoT101Device"
while True:
packets = dataNetwork()
message = idDevice + " " + str(packets)
print "dataNetworkHandler " + message
time.sleep(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
threadx = Thread(target=dataNetworkHandler)
threadx.start()
while True:
print "Hello Internet of Things 101"
time.sleep(5)
# End of File
| apache-2.0 | Python |
|
607bd42a40b8f9909e3d889b6b9011b4d14a4e52 | add nexpose.py | liberza/python-nexpose | nexpose.py | nexpose.py | #!/usr/bin/python3
import xml.etree.ElementTree as etree
import urllib.request
import urllib.parse
import sys
import ssl
__author__ = 'Nick Levesque <nick@portcanary.com>'
# Nexpose API wrapper.
class Nexpose:
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
self.url = 'https://%s:%s/api/1.2/xml' % (self.hostname, self.port)
self.session_id = None
# Often the Nexpose Console is run with a self-signed cert. We allow for that here.
self.ctx = ssl.create_default_context()
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
# Generic API request, feed it an xml string and off it goes.
def api_request(self, xml_string):
# Encode the xml so that urllib will accept it.
post_data = (xml_string).encode('utf-8')
# Prepare the request.
request = urllib.request.Request(url)
request.add_header("Content-type", "text/xml")
# Get a response.
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors and return response.
if not xml_response.tag == 'Failure':
return response
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
# Login function, we must capture the session-id contained in the response if successful.
def login(self, username, password):
# Encode the login request string so that urllib will accept it.
xml_string = "<LoginRequest user-id=\"%s\" password=\"%s\" />" % (username, password)
post_data = (xml_string).encode('utf-8')
# Prepare the request
request = urllib.request.Request(self.url)
request.add_header("Content-type", "text/xml")
# Get a response
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors and set session-id.
if not xml_response.tag == 'Failure':
self.session_id = xml_response.attrib.get('session-id')
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
def logout(self):
# Encode the logout request string so that urllib will accept it.
xml_string = "<LogoutRequest session-id=\"%s\" />" % (self.session_id)
post_data = (xml_string).encode('utf-8')
# Prepare the request.
request = urllib.request.Request(self.url)
request.add_header("Content-type", "text/xml")
# Get a response.
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors.
if not xml_response.tag == 'Failure':
pass
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
if __name__ == '__main__':
# Usage: ./nexpose.py hostname port username password
nexpose = Nexpose(sys.argv[1], sys.argv[2])
result = nexpose.login(sys.argv[3], sys.argv[4])
if result:
print(result)
if nexpose.session_id:
print(nexpose.session_id)
nexpose.logout()
| apache-2.0 | Python |
|
a067c18f8534d79a85538eaf11e34e99f9e17286 | develop update to pair master, going to rename master now | rkk09c/Broadcast | oh_shit.py | oh_shit.py | from app import app, db
from app.mod_sms.models import *
ug1 = UserGroup(name='Canyon Time', phone='+17868378095', active=True)
ug2 = UserGroup(name='test', phone='+18503783607', active=True)
ryan = User(fname='Ryan', lname='Kuhl', phone='+13058985985', active=True)
simon = User(fname='Simon', lname='', phone='+13109264989', active=True)
dan = User(fname='Dan' , lname='Malik', phone='+14152718694', active=True)
tom = User(fname='Tom' , lname='Scorer', phone='+13109022760', active=True)
steve = User(fname='Steve', lname='Makuch', phone='+16164609893', active=True)
chris = User(fname='Chris', lname='', phone='+16269882527', active=True)
ben = User(fname='Ben' , lname='Eisenbise', phone='+13234017625', active=True)
alex = User(fname='Alex', lname='Thorpe', phone='+14243869550', active=True)
ug1.groups_to_users.append(ryan)
ug1.groups_to_users.append(simon)
ug1.groups_to_users.append(dan)
ug1.groups_to_users.append(tom)
ug1.groups_to_users.append(steve)
ug1.groups_to_users.append(chris)
ug1.groups_to_users.append(ben)
ug1.groups_to_users.append(alex)
ug2.groups_to_users.append(ryan)
db.session.add(ug1)
db.session.add(ug2)
db.session.add(ryan)
db.session.add(simon)
db.session.add(dan)
db.session.add(tom)
db.session.add(steve)
db.session.add(chris)
db.session.add(ben)
db.session.add(alex)
db.session.commit()
| apache-2.0 | Python |
|
01c98087541828421da49295abedd3d894cdb3b5 | Create luz.py | bettocr/rpi-proyecto-final,bettocr/rpi-proyecto-final,bettocr/rpi-proyecto-final,bettocr/rpi-proyecto-final | opt/luz.py | opt/luz.py | #!/usr/bin/env python
# Realizado por: Roberto Arias (@bettocr)
#
# Permite encender y apagar luces leds
#
import RPi.GPIO as GPIO, time, os
GPIO.setmode(GPIO.BCM)
on = 0 # luces encendidas
MAX=5200 # luminocidad maxima antes de encender el led, entre mayor mas oscuro
PIN=23 # pin al relay
PINRC=24 #pin que lee la photocell
GPIO.setup(PIN,GPIO.OUT)
def RCtime (RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
while True:
#print RCtime(24)
luz = RCtime(PINRC)
if luz > MAX:
GPIO.output(PIN,True)
on = 1
if luz < MAX and on == 1:
GPIO.output(PIN,False)
on = 0
| bsd-3-clause | Python |
|
87af377fab216e3db9ad700e124b356b15da492f | add form_register.py | EscapeLife/web_crawler | 7.验证码处理/1.form_register.py | 7.验证码处理/1.form_register.py | #!/usr/bin/env python
# coding:utf-8
import csv
import string
import urllib
import urllib2
import cookielib
import lxml.html
import pytesseract
from PIL import Image
from io import BytesIO
REGISTER_URL = 'http://example.webscraping.com/places/default/user/register'
def parse_form(html):
"""从表单中找到所有的隐匿的input变量属性
"""
tree = lxml.html.fromstring(html)
data = {}
for e in tree.cssselect('form input'):
if e.get('name'):
data[e.get('name')] = e.get('value')
return data
def extract_image(html):
"""处理表单中嵌入的图片,解码之后保存img"""
tree = lxml.html.fromstring(html)
# 获取嵌入的图片数据
img_data = tree.cssselect('div#recaptcha img')[0].get('src')
# remove data:image/png;base64, header
img_data = img_data.partition(',')[-1]
# open('test_.png', 'wb').write(data.decode('base64'))
binary_img_data = img_data.decode('base64')
file_like = BytesIO(binary_img_data)
img = Image.open(file_like)
# img.save('test.png')
return img
def ocr(img):
"""使用开源的Tesseract OCR引擎对图片进行处理和识别
pytesseract.image_to_string(Image.open('xxx.png'))
"""
# 原始验证码图像 img = img.save('captcha_original.png')
# 处理阈值图像忽略背景和文本, 灰度处理
gray = img.convert('L')
# 转换之后的灰度图 gray.save('captcha_greyscale.png')
# 只有阀值小于1(全黑的颜色)的像素才能够保留下来
bw = gray.point(lambda x: 0 if x < 1 else 255, '1')
# 取阀值之后的图像 bw.save('captcha_threshold.png')
word = pytesseract.image_to_string(bw)
# 因为验证码重视小写, 我们进行消协处理, 增加识别率
ascii_word = ''.join(c for c in word if c in string.letters).lower()
return ascii_word
def register(first_name, last_name, email, password, captcha_fn):
"""实现自动注册
:param first_name: 注册填写的名字
:param last_name: 注册填写的姓氏
:param email: 注册填写的邮箱
:param password: 注册填写的密码
:param captcha_fn: 识别验证码的函数
:return: 是否登录成功
"""
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
html = opener.open(REGISTER_URL).read()
form = parse_form(html)
form['first_name'] = first_name
form['last_name'] = last_name
form['email'] = email
form['password'] = form['password_two'] = password
img = extract_image(html)
captcha = captcha_fn(img)
form['recaptcha_response_field'] = captcha
encoded_data = urllib.urlencode(form)
request = urllib2.Request(REGISTER_URL, encoded_data)
response = opener.open(request)
success = '/user/register' not in response.geturl()
return success
def test_samples():
"""测试精度的OCR图像样本
"""
correct = total = 0
for filename, text in csv.reader(open('samples/samples.csv')):
img = Image.open('samples/' + filename)
if ocr(img) == text:
correct += 1
total += 1
print 'Accuracy: %d/%d' % (correct, total)
if __name__ == '__main__':
print register(first_name='Test', last_name='Test', email='Test@webscraping.com', password='Test', captcha_fn=ocr)
| mit | Python |
|
fe186bf85472cf4e683d9838e36e60c680e6dc77 | Add test | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/library-tests/PointsTo/new/code/w_function_values.py | python/ql/test/library-tests/PointsTo/new/code/w_function_values.py | def test_conditoinal_function(cond):
def foo():
return "foo"
def bar():
return "bar"
if cond:
f = foo
else:
f = bar
sink = f()
return sink
f_false = test_conditoinal_function(False)
f_true = test_conditoinal_function(True)
def foo():
return "foo"
def test_redefinition():
f = foo
def foo():
return "refined"
sink = f()
return sink | mit | Python |
|
1afb7bb7b1f3e8ef3070f1100dac683b2b8254ee | remove unused table | macarthur-lab/seqr,ssadedin/seqr,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr,macarthur-lab/seqr,macarthur-lab/seqr,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr | xbrowse_server/base/migrations/0003_delete_xhmmfile.py | xbrowse_server/base/migrations/0003_delete_xhmmfile.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20160117_1843'),
]
operations = [
migrations.DeleteModel(
name='XHMMFile',
),
]
| agpl-3.0 | Python |
|
49c673c5c8374867fc9bf026717fe137bdba84bc | Add test file for graph.py and add test of Greengraph class constructor | MikeVasmer/GreenGraphCoursework | greengraph/test/test_graph.py | greengraph/test/test_graph.py | from greengraph.map import Map
from greengraph.graph import Greengraph
from mock import patch
import geopy
from nose.tools import assert_equal
start = "London"
end = "Durham"
def test_Greengraph_init():
with patch.object(geopy.geocoders,'GoogleV3') as mock_GoogleV3:
test_Greengraph = Greengraph(start,end)
#Test that GoogleV3 is called with the correct parameters
mock_GoogleV3.assert_called_with(domain="maps.google.co.uk")
#Test that the start and end fields are initialised correctly
assert_equal(test_Greengraph.start,start)
assert_equal(test_Greengraph.end,end)
| mit | Python |
|
54553efa024d74ec60647ea7616191a52fe9948f | Add a command to create collaborator organisations | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/management/commands/create_collaborator_organisation.py | akvo/rsr/management/commands/create_collaborator_organisation.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""Create a collaborator organisation for a given organisation.
Usage:
python manage.py create_collaborator_organisation <org-id>
"""
import sys
from django.core.management.base import BaseCommand
from akvo.rsr.models import Organisation
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('org_id', type=int)
def handle(self, *args, **options):
org_id = options['org_id']
try:
organisation = Organisation.objects.get(id=org_id)
except Organisation.DoesNotExist:
sys.exit('Could not find organisation with ID: {}'.format(org_id))
collaborator, _ = Organisation.objects.get_or_create(
content_owner=organisation,
original=organisation,
defaults=dict(
name='Collaborator: {}'.format(organisation.name),
long_name='Collaborator: {}'.format(organisation.long_name),
)
)
print('Collaborator Organisation created with ID: {}'.format(collaborator.id))
| agpl-3.0 | Python |
|
6318c1dd7d3c942bc7702402eb6ae50a86c023b7 | add hastad | JustHitTheCore/ctf_workshops,disconnect3d/jhtc_ctf_workshops,GrosQuildu/jhtc_ctf_workshops,JustHitTheCore/ctf_workshops,GrosQuildu/jhtc_ctf_workshops | lab3/hastad/code.py | lab3/hastad/code.py | # https://id0-rsa.pub/problem/11/
import gmpy2
def crt(a, n):
"""Chinese remainder theorem
from: http://rosettacode.org/wiki/Chinese_remainder_theorem#Python
x = a[0] % n[0]
x = a[1] % n[1]
x = a[2] % n[2]
Args:
a(list): remainders
n(list): modules
Returns:
long: solution to crt
"""
if len(a) != len(n):
log.critical_error("Different number of remainders({}) and modules({})".format(len(a), len(n)))
sum = 0
prod = reduce(lambda x, y: x * y, n)
for n_i, a_i in zip(n, a):
p = prod / n_i
sum += a_i * gmpy2.invert(p, n_i) * p
return long(sum % prod)
e = 3
C1 = 0x94f145679ee247b023b09f917beea7e38707452c5f4dc443bba4d089a18ec42de6e32806cc967e09a28ea6fd2e683d5bb7258bce9e6f972d6a30d7e5acbfba0a85610261fb3e0aac33a9e833234a11895402bc828da3c74ea2979eb833cd644b8ab9e3b1e46515f47a49ee602c608812241e56b94bcf76cfbb13532d9f4ff8ba
N1 = 0xa5d1c341e4837bf7f2317024f4436fb25a450ddabd7293a0897ebecc24e443efc47672a6ece7f9cac05661182f3abbb0272444ce650a819b477fd72bf01210d7e1fbb7eb526ce77372f1aa6c9ce570066deee1ea95ddd22533cbc68b3ba20ec737b002dfc6f33dcb19e6f9b312caa59c81bb80cda1facf16536cb3c184abd1d5
C2 = 0x5ad248df283350558ba4dc22e5ec8325364b3e0b530b143f59e40c9c2e505217c3b60a0fae366845383adb3efe37da1b9ae37851811c4006599d3c1c852edd4d66e4984d114f4ea89d8b2aef45cc531cfa1ab16c7a2e04d8884a071fed79a8d30af66edf1bbbf695ff8670b9fccf83860a06e017d67b1788b19b72d597d7d8d8
N2 = 0xaf4ed50f72b0b1eec2cde78275bcb8ff59deeeb5103ccbe5aaef18b4ddc5d353fc6dc990d8b94b3d0c1750030e48a61edd4e31122a670e5e942ae224ecd7b5af7c13b6b3ff8bcc41591cbf2d8223d32eeb46ba0d7e6d9ab52a728be56cd284842337db037e1a1da246ed1da0fd9bdb423bbe302e813f3c9b3f9414b25e28bda5
C3 = 0x8a9315ee3438a879f8af97f45df528de7a43cd9cf4b9516f5a9104e5f1c7c2cdbf754b1fa0702b3af7cecfd69a425f0676c8c1f750f32b736c6498cac207aa9d844c50e654ceaced2e0175e9cfcc2b9f975e3183437db73111a4a139d48cc6ce4c6fac4bf93b98787ed8a476a9eb4db4fd190c3d8bf4d5c4f66102c6dd36b73
N3 = 0x5ca9a30effc85f47f5889d74fd35e16705c5d1a767004fec7fdf429a205f01fd7ad876c0128ddc52caebaa0842a89996379ac286bc96ebbb71a0f8c3db212a18839f7877ebd76c3c7d8e86bf6ddb17c9c93a28defb8c58983e11304d483fd7caa19b4b261fc40a19380abae30f8d274481a432c8de488d0ea7b680ad6cf7776b
n_all = [N1, N2, N3]
ciphertext_all = [C1, C2, C3]
c_e = crt(ciphertext_all, n_all)
c = gmpy2.iroot(c_e, e)
print c
c = long(c[0])
print hex(c)[2:].strip('L').decode('hex')
| mit | Python |
|
0fce5f54490d9ae9014280a5e0e96fd53128d299 | Add kubevirt_preset module (#52498) | thaim/ansible,thaim/ansible | lib/ansible/modules/cloud/kubevirt/kubevirt_preset.py | lib/ansible/modules/cloud/kubevirt/kubevirt_preset.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_preset
short_description: Manage KubeVirt virtual machine presets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine presets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine preset.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine preset exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine preset."
type: dict
extends_documentation_fragment:
- k8s_auth_options
- k8s_resource_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine preset 'vmi-preset-small'
kubevirt_preset:
state: present
name: vmi-preset-small
namespace: vms
memory: 64M
selector:
matchLabels:
kubevirt.io/vmPreset: vmi-preset-small
- name: Remove virtual machine preset 'vmi-preset-small'
kubevirt_preset:
state: absent
name: vmi-preset-small
namespace: vms
'''
RETURN = '''
kubevirt_preset:
description:
- The virtual machine preset managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC
)
KIND = 'VirtualMachineInstancePreset'
VMP_ARG_SPEC = {
'selector': {'type': 'dict'},
}
class KubeVirtVMPreset(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VMP_ARG_SPEC)
return argument_spec
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
if selector:
definition['spec']['selector'] = selector
# FIXME: Devices must be set, but we don't yet support any
# attributes there, remove when we do:
definition['spec']['domain']['devices'] = dict()
# Execute the CURD of VM:
dummy, definition = self.construct_vm_definition(KIND, definition, definition)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_preset': result,
'result': result_crud,
})
def main():
module = KubeVirtVMPreset()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| mit | Python |
|
a3d837afe6662edb10395baa8851de551d0915a5 | add email templates tests | auth0/auth0-python,auth0/auth0-python | auth0/v3/test/management/test_email_endpoints.py | auth0/v3/test/management/test_email_endpoints.py | import unittest
import mock
from ...management.email_templates import EmailTemplates
class TestClients(unittest.TestCase):
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/email-templates',
data={'a': 'b', 'c': 'd'}
)
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.get('this-template-name')
mock_instance.get.assert_called_with(
'https://domain/api/v2/email-templates/this-template-name'
)
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.update('this-template-name', {'a': 'b', 'c': 'd'})
mock_instance.patch.assert_called_with(
'https://domain/api/v2/email-templates/this-template-name',
data={'a': 'b', 'c': 'd'}
)
| mit | Python |
|
9fe4ce918456a3470cf4eb50212af9a487c03ce4 | add tests for utils | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/auditcare/tests/test_auditcare_migration.py | corehq/apps/auditcare/tests/test_auditcare_migration.py | from django.test.testcases import TransactionTestCase
from corehq.apps.auditcare.models import AuditcareMigrationMeta
from datetime import datetime
from unittest.mock import patch
from django.test import SimpleTestCase
from corehq.apps.auditcare.management.commands.copy_events_to_sql import Command
from corehq.apps.auditcare.utils.migration import AuditCareMigrationUtil, get_formatted_datetime_string
from django.core.cache import cache
class TestCopyEventsToSQL(SimpleTestCase):
start_time = datetime(2020, 6, 1)
@classmethod
def setUpClass(cls):
return super().setUpClass()
@patch('corehq.apps.auditcare.management.commands.copy_events_to_sql.AuditCareMigrationUtil.get_next_batch_start', return_value=start_time)
def test_generate_batches(self, _):
batches = Command().generate_batches(2, 'h')
expected_batches = [
[datetime(2020, 6, 1), datetime(2020, 6, 1, 1)],
[datetime(2020, 6, 1, 1), datetime(2020, 6, 1, 2)]
]
self.assertEquals(batches, expected_batches)
batches = Command().generate_batches(2, 'd')
expected_batches = [
[datetime(2020, 6, 1), datetime(2020, 6, 2)],
[datetime(2020, 6, 2), datetime(2020, 6, 3)]
]
self.assertEquals(batches, expected_batches)
class TestAuditcareMigrationUtil(TransactionTestCase):
util = AuditCareMigrationUtil()
start_time = datetime(2020, 6, 1)
@classmethod
def setUpClass(cls):
cls.key = get_formatted_datetime_string(datetime.now()) + '_' + get_formatted_datetime_string(datetime.now())
cache.set(cls.util.start_key, cls.start_time)
return super().setUpClass()
def test_get_next_batch_start(self):
start_time = self.util.get_next_batch_start()
self.assertEqual(start_time, self.start_time)
def test_locking_functionality(self):
self.util.acquire_read_lock()
self.assertRaises(Exception, self.util.get_next_batch_start)
self.util.release_read_lock()
start_time = self.util.get_next_batch_start()
self.assertEqual(start_time, self.start_time)
def test_log_batch_start(self):
self.util.log_batch_start(self.key)
self.util.log_batch_start(self.key)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(len(expected_log), 1)
self.assertEqual(expected_log[0].key, self.key)
expected_log[0].delete()
def test_set_batch_as_finished(self):
AuditcareMigrationMeta.objects.create(key=self.key, state=AuditcareMigrationMeta.STARTED)
self.util.set_batch_as_finished(self.key, 30)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(expected_log[0].state, AuditcareMigrationMeta.FINISHED)
expected_log[0].delete()
def test_set_batch_as_errored(self):
AuditcareMigrationMeta.objects.create(key=self.key, state=AuditcareMigrationMeta.STARTED)
self.util.set_batch_as_errored(self.key)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(expected_log[0].state, AuditcareMigrationMeta.ERRORED)
expected_log[0].delete()
def test_get_errored_keys(self):
start_time = datetime(2020, 6, 20)
end_time = datetime(2020, 6, 21)
key = get_formatted_datetime_string(start_time) + '_' + get_formatted_datetime_string(end_time)
obj = AuditcareMigrationMeta.objects.create(key=key, state=AuditcareMigrationMeta.ERRORED)
keys = self.util.get_errored_keys(1)
self.assertEqual([[start_time, end_time]], keys)
obj.delete()
@classmethod
def tearDownClass(cls):
cache.delete(cls.util.start_key)
cache.delete(cls.util.start_lock_key)
AuditcareMigrationMeta.objects.all().delete()
return super().tearDownClass()
| bsd-3-clause | Python |
|
cee2683d3c0a60739b8e4f1c1dbaa74981a42392 | add class skeleton for schedule generator | ahoskins/Winston,rosshamish/classtime,rosshamish/classtime,ahoskins/Winston | angular_flask/classtime/scheduler.py | angular_flask/classtime/scheduler.py | class Scheduler(object):
"""
Helper class which builds optimal schedules out of
class listings.
Use static methods only - do not create instances of
the class.
"""
def __init__(self):
pass
@staticmethod
def generate_schedule(classtimes):
"""
Generates one good schedule based on the classtimes
provided.
classtimes should be in the following format:
[
{
'course_name' : 'somename',
'course_attr_a' : 'someattr',
...
'day' : '<daystring>',
'startTime' : '<time>',
'endTime' : '<time>'
},
...
{
...
}
]
Where <daystring> is a string containing the days the
class is scheduled on:
- UMTWRFS is Sunday...Saturday
- eg 'MWF' or 'TR'
And <time> is a time of format 'HH:MM XM'
- eg '08:00 AM'
"""
pass
| agpl-3.0 | Python |
|
82e4c67bd7643eed06e7cd170ca1d0de41c70912 | Add a data analyzer class. | berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop | core/data/DataAnalyzer.py | core/data/DataAnalyzer.py | """
DataAnalyzer
:Authors:
Berend Klein Haneveld
"""
class DataAnalyzer(object):
"""
DataAnalyzer
"""
def __init__(self):
super(DataAnalyzer, self).__init__()
@classmethod
def histogramForData(cls, data, nrBins):
"""
Samples the image data in order to create bins
for making a histogram of the data.
"""
dims = data.GetDimensions()
minVal, maxVal = data.GetScalarRange()
bins = [0 for x in range(nrBins)]
stepSize = 3
for z in range(0, dims[2], stepSize):
for y in range(0, dims[1], stepSize):
for x in range(0, dims[0], stepSize):
element = data.GetScalarComponentAsFloat(x, y, z, 0)
index = int(((element - minVal) / float(maxVal - minVal)) * (nrBins-1))
bins[index] += 1
return bins
| mit | Python |
|
3f85610873d88592970c64661e526b2a576e300f | Add new sms message generator | bsharif/SLOT,nhshd-slot/SLOT,bsharif/SLOT,nhshd-slot/SLOT,nhshd-slot/SLOT,bsharif/SLOT | sms_generator.py | sms_generator.py | def generate_new_procedure_message(procedure, ward, timeframe, doctor):
unique_reference = str(1)
message = str.format("{0} is available on {1}. Attend the ward in {2} and meet {3} in the junior doctors' office. "
"To accept this opportunity reply with {4}",
procedure,
ward,
timeframe,
doctor,
unique_reference)
print(message)
return message
def generate_success_response_message(procedure, ward, timeframe, doctor):
message = str.format("Please attend {0} in {1} and ask for {2} to complete this supervised "
"procedure. This learning opportunity has been reserved exclusively for you, please make "
"every effort to attend.",
ward,
timeframe,
doctor)
print(message)
return message
def generate_not_success_response_message():
message = str.format("Sorry - procedure already taken this time.")
print(message)
return message | mit | Python |
|
926631d068a223788714cd645ae5336881c6853f | Update messageable.py | gschizas/praw,gschizas/praw,praw-dev/praw,praw-dev/praw | praw/models/reddit/mixins/messageable.py | praw/models/reddit/mixins/messageable.py | """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to
send the message from. When provided, messages are sent from
the subreddit rather than from the authenticated user.
Note that the authenticated user must be a moderator of the
subreddit and have the ``mail`` moderator permission.
For example, to send a private message to ``u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
| """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to send the
message from. When provided, messages are sent from the subreddit
rather than from the authenticated user. Note that the
authenticated user must be a moderator of the subreddit and have
the ``mail`` moderator permission.
For example, to send a private message to ``/u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``/r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
| bsd-2-clause | Python |
73bd8200f6ad23c60a05831e3b79497b830f19cd | Update old lithium comments about llvm-symbolizer 3.6 to 3.8 versions. | nth10sd/lithium,nth10sd/lithium,MozillaSecurity/lithium,MozillaSecurity/lithium | interestingness/envVars.py | interestingness/envVars.py | #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import platform
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
ENV_PATH_SEPARATOR = ';' if os.name == 'nt' else ':'
def envWithPath(path, runningEnv=os.environ):
"""Append the path to the appropriate library path on various platforms."""
if isLinux:
libPath = 'LD_LIBRARY_PATH'
elif isMac:
libPath = 'DYLD_LIBRARY_PATH'
elif isWin:
libPath = 'PATH'
env = copy.deepcopy(runningEnv)
if libPath in env:
if path not in env[libPath]:
env[libPath] += ENV_PATH_SEPARATOR + path
else:
env[libPath] = path
return env
def findLlvmBinPath():
"""Return the path to compiled LLVM binaries, which differs depending on compilation method."""
if isLinux:
# Assumes clang was installed through apt-get. Works with version 3.6.2,
# assumed to work with clang 3.8.0.
# Create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.8
if os.path.isfile('/usr/bin/llvm-symbolizer'):
return ''
else:
print 'WARNING: Please install clang via `apt-get install clang` if using Ubuntu.'
print 'then create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.8.'
print 'Try: `ln -s /usr/bin/llvm-symbolizer-3.8 /usr/bin/llvm-symbolizer`'
return ''
if isMac:
# Assumes LLVM was installed through Homebrew. Works with at least version 3.6.2.
brewLLVMPath = '/usr/local/opt/llvm/bin'
if os.path.isdir(brewLLVMPath):
return brewLLVMPath
else:
print 'WARNING: Please install llvm from Homebrew via `brew install llvm`.'
print 'ASan stacks will not have symbols as Xcode does not install llvm-symbolizer.'
return ''
# https://developer.mozilla.org/en-US/docs/Building_Firefox_with_Address_Sanitizer#Manual_Build
if isWin:
return None # The harness does not yet support Clang on Windows
| #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import platform
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
ENV_PATH_SEPARATOR = ';' if os.name == 'nt' else ':'
def envWithPath(path, runningEnv=os.environ):
"""Append the path to the appropriate library path on various platforms."""
if isLinux:
libPath = 'LD_LIBRARY_PATH'
elif isMac:
libPath = 'DYLD_LIBRARY_PATH'
elif isWin:
libPath = 'PATH'
env = copy.deepcopy(runningEnv)
if libPath in env:
if path not in env[libPath]:
env[libPath] += ENV_PATH_SEPARATOR + path
else:
env[libPath] = path
return env
def findLlvmBinPath():
"""Return the path to compiled LLVM binaries, which differs depending on compilation method."""
if isLinux:
# Assumes clang was installed through apt-get. Works with version 3.6.2.
# Create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.6
if os.path.isfile('/usr/bin/llvm-symbolizer'):
return ''
else:
print 'WARNING: Please install clang via `apt-get install clang` if using Ubuntu.'
print 'then create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.6.'
print 'Try: `ln -s /usr/bin/llvm-symbolizer-3.6 /usr/bin/llvm-symbolizer`'
return ''
if isMac:
# Assumes LLVM was installed through Homebrew. Works with at least version 3.6.2.
brewLLVMPath = '/usr/local/opt/llvm/bin'
if os.path.isdir(brewLLVMPath):
return brewLLVMPath
else:
print 'WARNING: Please install llvm from Homebrew via `brew install llvm`.'
print 'ASan stacks will not have symbols as Xcode does not install llvm-symbolizer.'
return ''
# https://developer.mozilla.org/en-US/docs/Building_Firefox_with_Address_Sanitizer#Manual_Build
if isWin:
return None # The harness does not yet support Clang on Windows
| mpl-2.0 | Python |
378cb69d413eb8ffaf811b607fc037be923a2aba | Write tests for SSLRedirectMiddleware | praekelt/molo-iogt,praekelt/molo-iogt,praekelt/molo-iogt | iogt/tests/test_middleware.py | iogt/tests/test_middleware.py | from django.test import (
TestCase,
Client,
RequestFactory,
override_settings,
)
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import Main
from iogt.middleware import SSLRedirectMiddleware
PERMANENT_REDIRECT_STATUS_CODE = 301
@override_settings(HTTPS_PATHS=['admin'])
class TestSSLRedirectMiddleware(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.factory = RequestFactory()
def test_no_redirect_for_home_page(self):
request = self.factory.get('/')
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_no_redirect_with_https(self):
headers = {'HTTP_X_FORWARDED_PROTO': 'https'}
request = self.factory.get('/', **headers)
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_no_redirect_when_secure(self):
headers = {'HTTP_X_FORWARDED_PROTO': 'https'}
request = self.factory.get('/admin/', **headers)
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_redirect_when_not_secure(self):
request = self.factory.get('/admin/')
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response.status_code,
PERMANENT_REDIRECT_STATUS_CODE)
| bsd-2-clause | Python |
|
e0ac456eae45a1b7e1482ff712be600b384f94b3 | Include new example to show group circle connectivity. | pravsripad/jumeg,jdammers/jumeg | examples/connectivity/plot_custom_grouped_connectivity_circle.py | examples/connectivity/plot_custom_grouped_connectivity_circle.py | #!/usr/bin/env python
"""
Example how to create a custom label groups and plot grouped connectivity
circle with these labels.
Author: Praveen Sripad <pravsripad@gmail.com>
Christian Kiefer <ch.kiefer@fz-juelich.de>
"""
import matplotlib.pyplot as plt
from jumeg import get_jumeg_path
from jumeg.connectivity import (plot_grouped_connectivity_circle,
generate_random_connectivity_matrix)
import yaml
labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
replacer_dict_fname = get_jumeg_path() + '/data/replacer_dictionaries.yaml'
with open(labels_fname, 'r') as f:
label_names = yaml.safe_load(f)['label_names']
with open(replacer_dict_fname, 'r') as f:
replacer_dict = yaml.safe_load(f)['replacer_dict_aparc']
# make a random matrix with 68 nodes
# use simple seed for reproducibility
con = generate_random_connectivity_matrix(size=(68, 68), symmetric=True)
# make groups based on lobes
occipital = ['lateraloccipital', 'lingual', 'cuneus', 'pericalcarine']
parietal = ['superiorparietal', 'inferiorparietal', 'precuneus',
'postcentral', 'supramarginal']
temporal = ['bankssts', 'temporalpole', 'superiortemporal', 'middletemporal',
'transversetemporal', 'inferiortemporal', 'fusiform',
'entorhinal', 'parahippocampal']
insula = ['insula']
cingulate = ['rostralanteriorcingulate', 'caudalanteriorcingulate',
'posteriorcingulate', 'isthmuscingulate']
frontal = ['superiorfrontal', 'rostralmiddlefrontal', 'caudalmiddlefrontal',
'parsopercularis', 'parsorbitalis', 'parstriangularis',
'lateralorbitofrontal', 'medialorbitofrontal', 'precentral',
'paracentral', 'frontalpole']
# we need a list of dictionaries, one dict for each group to denote grouping
label_groups = [{'occipital': occipital}, {'parietal': parietal},
{'temporal': temporal}, {'insula': insula},
{'cingulate': cingulate},
{'frontal': frontal}]
n_colors = len(label_groups)
cmap = plt.get_cmap('Pastel1')
cortex_colors = cmap.colors[:n_colors] + cmap.colors[:n_colors][::-1]
# plot simple connectivity circle with cortex based grouping and colors
plot_grouped_connectivity_circle(label_groups, con, label_names,
labels_mode='replace',
replacer_dict=replacer_dict,
cortex_colors=cortex_colors, vmin=0., vmax=1.,
out_fname='fig_grouped_con_circle_cortex.png',
colorbar_pos=(0.1, 0.1), n_lines=50, colorbar=True,
colormap='viridis')
| bsd-3-clause | Python |
|
f50efeb78d9b503a7d6e97db8b1cd68b429aa2c4 | allow to run tox as 'python -m tox', which is handy on Windoze | jcb91/tox | tox/__main__.py | tox/__main__.py | from tox._cmdline import main
main()
| mit | Python |
|
f7e504652707b09c0a0b7e7b1691094ef6d35509 | add proper tomography example | aringh/odl,odlgroup/odl,kohr-h/odl,aringh/odl,kohr-h/odl,odlgroup/odl | examples/solvers/conjugate_gradient_tomography.py | examples/solvers/conjugate_gradient_tomography.py | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Total variation tomography using the `conjugate_gradient_normal` solver.
Solves the inverse problem
A(x) = g
Where ``A`` is a parallel beam forward projector, ``x`` the result and
``g`` is given noisy data.
"""
import numpy as np
import odl
# --- Set up the forward operator (ray transform) --- #
# Discrete reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_corner=[-20, -20], max_corner=[20, 20], nsamples=[300, 300],
dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = 558, min = -30, max = 30
detector_partition = odl.uniform_partition(-30, 30, 558)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
# The implementation of the ray transform to use, options:
# 'scikit' Requires scikit-image (can be installed by
# running ``pip install scikit-image``).
# 'astra_cpu', 'astra_cuda' Require astra tomography to be installed.
# Astra is much faster than scikit. Webpage:
# https://github.com/astra-toolbox/astra-toolbox
impl = 'scikit'
# Ray transform aka forward projection.
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
# --- Generate artificial data --- #
# Create phantom
discr_phantom = odl.util.shepp_logan(reco_space, modified=True)
# Create sinogram of forward projected phantom with noise
data = ray_trafo(discr_phantom)
data += odl.util.white_noise(ray_trafo.range) * np.mean(data) * 0.1
# Optionally pass partial to the solver to display intermediate results
partial = (odl.solvers.PrintIterationPartial() &
odl.solvers.ShowPartial())
# Choose a starting point
x = ray_trafo.domain.zero()
# Run the algorithm
odl.solvers.conjugate_gradient_normal(
ray_trafo, x, data, niter=20, partial=partial)
# Display images
discr_phantom.show(title='original image')
data.show(title='convolved image')
x.show(title='deconvolved image', show=True)
| mpl-2.0 | Python |
|
236d7d885dadcb681357212a5c6b53c28eac0aa1 | Create d1-1.py | chazzam/adventofcode | 2018/d1-1.py | 2018/d1-1.py | with open("completed/input_c1-1.txt", "r") as f:
line = "0"
sum = 0
while line:
sum += int(line)
line = f.readline()
print("Final Frequency: {}", sum)
| apache-2.0 | Python |
|
8de92e74317a74b53991bdcbb3594f0e94e4cf17 | Add Monty Hall simulation | martindisch/pytry | montyhall.py | montyhall.py | import random
import sys
def game():
# Place car behind one door
car = random.randint(1, 3)
# Player selects a door
first_choice = random.randint(1, 3)
reveal_options = [1, 2, 3]
# Don't reveal the car
reveal_options.remove(car)
# Don't reveal the player's choice
if first_choice in reveal_options: reveal_options.remove(first_choice)
# Reveal a door with a goat
reveal = random.choice(reveal_options)
second_options = [1, 2, 3]
# Don't select your first choice
second_options.remove(first_choice)
# Don't select the revealed door
second_options.remove(reveal)
# Choose the remaining door
second_choice = second_options[0]
# Collect and return result
first_succ = 1 if first_choice == car else 0
second_succ = 1 if second_choice == car else 0
return (first_succ, second_succ)
def simulate(rounds):
first, second = 0, 0
for i in range(rounds):
res = game()
first += res[0]
second += res[1]
print("First choice wins {:.1f}% of cases".format(first / rounds * 100))
print("Second choice wins {:.1f}% of cases".format(second / rounds * 100))
if __name__ == '__main__':
simulate(int(sys.argv[1]))
| mit | Python |
|
b177a0f2e9b42347f56c4499aaa080af97e0e530 | add validity check | Yokan-Study/study,Yokan-Study/study,Yokan-Study/study | 2018/04.10/python/jya_gAPIclass.2.py | 2018/04.10/python/jya_gAPIclass.2.py | import requests, base64
import config
id = config.GAPI_CONFIG['client_id']
secret = config.GAPI_CONFIG['client_secret']
type = config.GAPI_CONFIG['grant_type']
class GapiClass:
def __init__(self, host='https://gapi.gabia.com'):
self.__host = host
self.__headers = self.__encoded_token()
self.__max_retry = 5
self.__p = 1
def __Requests_get(self, url):
r = requests.get('{0}{1}'.format(self.__host, url), headers = self.__headers)
# print(r.status_code)
if (r.status_code == 401):
print("유효하지 않은 토큰입니다")
while self.__p < self.__max_retry:
self.__p += 1
self.__headers = self.__encoded_token()
self.__Requests_get(url)
elif (r.status_code == 200):
j = r.json()
return j
else:
print("다음 기회에")
def __Requests_post(self, url, data):
r = requests.post('{0}{1}'.format(self.__host, url), data = data)
j = r.json()
return j
def __getToken(self):
j = self.__Requests_post('/oauth/token', {'client_id': id, 'client_secret': secret, 'grant_type': type})
token_1 = j['access_token']
token_2 = 'www_front:{0}'.format(token_1)
return token_2
def __makeHeadersAuth(self, token):
encoded_text = token.encode()
k = base64.b64encode(encoded_text)
l = k.decode()
return {'Authorization': 'Basic {0}'.format(l)}
def __encoded_token(self):
return self.__makeHeadersAuth(self.__getToken())
def getMember(self, id):
j = self.__Requests_get('/members?user_id={0}'.format(id))
hanname = j['client_info']['hanadmin']
return hanname
# api1 = GapiClass()
# a = api1.getMember('planning_d')
# if __name__ == "__main__":
# print(a) | mit | Python |
|
8e8e11990e430302eca24d32ba0b88dcc66233d6 | Add connect2 wifi via pyobjc | clburlison/scripts,clburlison/scripts,clburlison/scripts | clburlison_scripts/connect2_wifi_pyobjc/connect2_wifi_pyobjc.py | clburlison_scripts/connect2_wifi_pyobjc/connect2_wifi_pyobjc.py | #!/usr/bin/python
"""
I didn't create this but I'm storing it so I can reuse it.
http://stackoverflow.com/a/34967364/4811765
"""
import objc
SSID = "MyWifiNetwork"
PASSWORD = "MyWifiPassword"
objc.loadBundle('CoreWLAN',
bundle_path='/System/Library/Frameworks/CoreWLAN.framework',
module_globals=globals())
iface = CWInterface.interface()
networks, err = iface.scanForNetworksWithName_err_(SSID, None)
network = networks.anyObject()
success, err = iface.associateToNetwork_password_err_(network, PASSWORD, None)
| mit | Python |
|
a6d6b833e33dc465b0fa828018e2cbba748f8282 | Add utility class for evaluation | studiawan/pygraphc | pygraphc/evaluation/EvaluationUtility.py | pygraphc/evaluation/EvaluationUtility.py |
class EvaluationUtility(object):
@staticmethod
def convert_to_text(graph, clusters):
# convert clustering result from graph to text
new_clusters = {}
for cluster_id, nodes in clusters.iteritems():
for node in nodes:
members = graph.node[node]['member']
for member in members:
new_clusters.setdefault(cluster_id, []).append(member)
return new_clusters
| mit | Python |
|
a2a2d6ab7edaa6fab9d2fb95586fde8f1f74b1cc | add new package (#24672) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-aniso8601/package.py | var/spack/repos/builtin/packages/py-aniso8601/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAniso8601(PythonPackage):
"""A library for parsing ISO 8601 strings."""
homepage = "https://bitbucket.org/nielsenb/aniso8601"
pypi = "aniso8601/aniso8601-9.0.1.tar.gz"
version('9.0.1', sha256='72e3117667eedf66951bb2d93f4296a56b94b078a8a95905a052611fb3f1b973')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python |
|
0ee3990781488c54b3d45c722b843a06a28da235 | Add test that explores exploitability of tilted agents | JakubPetriska/poker-cfr,JakubPetriska/poker-cfr | verification/action_tilted_agents_exploitability_test.py | verification/action_tilted_agents_exploitability_test.py | import unittest
import numpy as np
import os
import matplotlib.pyplot as plt
import acpc_python_client as acpc
from tools.constants import Action
from weak_agents.action_tilted_agent import create_agent_strategy_from_trained_strategy, TiltType
from tools.io_util import read_strategy_from_file
from evaluation.exploitability import Exploitability
FIGURES_FOLDER = 'verification/action_tilted_agents'
KUHN_EQUILIBRIUM_STRATEGY_PATH = 'strategies/kuhn.limit.2p-equilibrium.strategy'
LEDUC_EQUILIBRIUM_STRATEGY_PATH = 'strategies/leduc.limit.2p-equilibrium.strategy'
TILT_TYPES = [
('fold-add', Action.FOLD, TiltType.ADD),
('call-add', Action.CALL, TiltType.ADD),
('raise-add', Action.RAISE, TiltType.ADD),
('fold-multiply', Action.FOLD, TiltType.MULTIPLY),
('call-multiply', Action.CALL, TiltType.MULTIPLY),
('raise-multiply', Action.RAISE, TiltType.MULTIPLY),
]
class ActionTiltedAgentsExploitabilityTest(unittest.TestCase):
def test_plot_kuhn_agent_exploitabilities(self):
self.create_agents_and_plot_exploitabilities({
'title': 'Kuhn poker action tilted agents exploitability',
'figure_filename': 'kuhn_action_tilted_agents',
'base_strategy_path': KUHN_EQUILIBRIUM_STRATEGY_PATH,
'game_file_path': 'games/kuhn.limit.2p.game',
'tilt_probabilities': np.arange(-1, 1, 0.01),
})
def test_plot_leduc_agent_exploitabilities(self):
self.create_agents_and_plot_exploitabilities({
'title': 'Leduc Hold\'em action tilted agents exploitability',
'figure_filename': 'leduc_action_tilted_agents',
'base_strategy_path': LEDUC_EQUILIBRIUM_STRATEGY_PATH,
'game_file_path': 'games/leduc.limit.2p.game',
'tilt_probabilities': np.arange(-1, 1, 0.1),
})
def create_agents_and_plot_exploitabilities(self, test_spec):
base_strategy = read_strategy_from_file(
test_spec['game_file_path'],
test_spec['base_strategy_path'])
game = acpc.read_game_file(test_spec['game_file_path'])
exploitability = Exploitability(game)
equilibrium_exploitability = exploitability.evaluate(base_strategy)
tilt_probabilities = test_spec['tilt_probabilities']
exploitability_values = np.zeros([len(TILT_TYPES), len(tilt_probabilities)])
for i, tilt_type in enumerate(TILT_TYPES):
for j, tilt_probability in enumerate(tilt_probabilities):
tilted_agent = create_agent_strategy_from_trained_strategy(
test_spec['game_file_path'],
base_strategy,
tilt_type[1],
tilt_type[2],
tilt_probability)
exploitability_values[i, j] = exploitability.evaluate(tilted_agent)
plt.figure(dpi=160)
for j in range(i + 1):
plt.plot(
tilt_probabilities,
exploitability_values[j],
label=TILT_TYPES[j][0],
linewidth=0.8)
plt.plot(
tilt_probabilities,
[equilibrium_exploitability] * len(tilt_probabilities),
'r--',
label='Equilibrium',
linewidth=1.5)
plt.title(test_spec['title'])
plt.xlabel('Tilt probability')
plt.ylabel('Agent exploitability [mbb/g]')
plt.grid()
plt.legend()
figure_output_path = '%s/%s.png' % (FIGURES_FOLDER, test_spec['figure_filename'])
figures_directory = os.path.dirname(figure_output_path)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
plt.savefig(figure_output_path)
| mit | Python |
|
0c9c3a801077c241cc32125ab520746935b04f89 | Create LAElectionResults.py | ahplummer/ElectionUtilities | LAElectionResults.py | LAElectionResults.py | # The MIT License (MIT)
# Copyright (C) 2014 Allen Plummer, https://www.linkedin.com/in/aplummer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import feedparser
import argparse
from BeautifulSoup import BeautifulSoup
class CandidateIssue:
def __init__(self,name):
self.Name = name
self.TotalNumber = ''
self.Percentage = ''
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
template = "{0:40}{1:10}{2:15}"
printstring = template.format(self.Name, self.Percentage, self.TotalNumber)
#return self.Name + " " + self.Percentage + ", Number of Votes: " + self.TotalNumber
return printstring
class Election:
def __init__(self,title):
self.Title = title
self.Progress = ''
self.CandidateIssues = []
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
printstring = '======' + self.Title
printstring += '\nProgress: ' + self.Progress
for c in self.CandidateIssues:
printstring += '\n' + str(c)
return printstring
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Gets Election Results')
parser.add_argument('-url','--url',help='Base URL',required=True)
parser.add_argument('-election', '--election', help='Election to show', required=False)
args = vars(parser.parse_args())
url = args['url']
efilter = args['election']
d = feedparser.parse(url)
elections = []
for item in d.entries:
title = item['title_detail']['value']
election = Election(title)
soup = BeautifulSoup(item['summary'])
tables = soup.findChildren('table')
# This will get the first (and only) table. Your page may have more.
my_table = tables[0]
# You can find children with multiple tags by passing a list of strings
rows = my_table.findChildren(['th', 'tr'])
i = 0
for row in rows:
i += 1
cells = row.findChildren('td')
if i == 1:
election.Progress = cells[0].text.strip()
else:
candidate = CandidateIssue(cells[0].text.strip())
candidate.Percentage = cells[1].text.strip()
candidate.TotalNumber = cells[2].text.strip()
election.CandidateIssues.append(candidate)
elections.append(election)
for e in elections:
if efilter != None:
if e.Title.upper().find(efilter.upper()) >= 0:
print(e)
else:
print(e)
| mit | Python |
|
6a6abadc2395810076b89fb38c759f85426a0304 | Add framework for own SVM from scratch | a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms | supportVectorMachine/howItWorksSupportVectorMachine.py | supportVectorMachine/howItWorksSupportVectorMachine.py | # -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question.
This is the file where I create the algorithm from scratch.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python howItWorksSupportVectorMachine.py
Todo:
* Sketch out the framework
"""
# minimize magnitude(w) and maximize b
# with constraint y_i*(x_i*w+b)>=1
# or Class*(KnownFeatures*w+b)>=1
| mit | Python |
|
2e1c257c0215f398e4ac5cc7d2d20ffa62492817 | Create NewChatAlert.pyw | Grezzo/TeamSupportChatAlert | NewChatAlert.pyw | NewChatAlert.pyw | # TODO: Check for cookie expiration
# TODO: Check for failed request
# TODO: Check for rejected cookie
# TODO: Get Cookie from other browsers (IE and Firefox)
# - See https://bitbucket.org/richardpenman/browser_cookie (and perhaps contribute)?
from os import getenv
from sqlite3 import connect
from win32crypt import CryptUnprotectData
from requests import post
from ctypes import windll
from time import sleep, ctime
# Function that displays a message box
def MsgBox(title, text, style):
windll.user32.MessageBoxW(0, text, title, style)
# Function that returns session cookie from chrome
def GetSecureCookie(name):
# Connect to Chrome's cookies db
cookies_database_path = getenv(
"APPDATA") + r"\..\Local\Google\Chrome\User Data\Default\Cookies"
conn = connect(cookies_database_path)
cursor = conn.cursor()
# Get the encrypted cookie
cursor.execute(
"SELECT encrypted_value FROM cookies WHERE name IS \"" + name + "\"")
results = cursor.fetchone()
# Close db
conn.close()
if results == None:
decrypted = None
else:
decrypted = CryptUnprotectData(results[0], None, None, None, 0)[
1].decode("utf-8")
return decrypted
# Function that returns chat status using a provided session cookie
def GetChatRequestCount(cookie):
# Ask TeamSupport for the chat status using cookie
response = post(
"https://app.teamsupport.com/chatstatus",
cookies={"TeamSupport_Session": cookie},
data='{"lastChatMessageID": -1, "lastChatRequestID": -1}'
)
return response.json()["ChatRequestCount"]
def main():
# Loop forever - checking for new chat requests
while True:
cookie = GetSecureCookie("TeamSupport_Session")
if cookie == None:
MsgBox("Session cookie not found",
"""TeamSupport session cookie could not be found in Chrome store
New chat notifications will not work until this is resolved
Log in to TeamSupport using Chrome to fix this""",
16)
# Pause for 30 seconds before trying again
sleep(30)
else:
chat_request_count = GetChatRequestCount(cookie)
# Alert if there are new chat requests or log if none
if chat_request_count == 0:
print(ctime() + " - No new chat requests")
elif chat_request_count == 1:
MsgBox("New Chat Request", "There is 1 new chat request", 64)
else:
MsgBox("New Chat Requests", "There are " +
str(chat_request_count) + " chat requests", 48)
# Pause for 10 seconds before checking again
sleep(10)
if __name__ == "__main__":
main()
| mit | Python |
|
1d4938232aa103ea2a919796e9fa35e2699d41d9 | Create PythonAnswer2.py | GBarrett18/dt211-cloud-repo | PythonAnswer2.py | PythonAnswer2.py | def fibonacci(x):
a = 0 #first number
b = 1 #second number
for x in range(x - 1):
a, b = b, a + b #a becomes b and b becomes a and b added together
return a #returns the next number in the sequence
print "Fibonacci Answer"
for x in range(1, 35): #number of times I need the sequence to run to reach 4million
print fibonacci(x)
| mit | Python |
|
5c60be411e61d5edfbf658509b437973d596a3ba | Create server.py | adrien-bellaiche/Interceptor,adrien-bellaiche/Interceptor,adrien-bellaiche/Interceptor | Networking/server.py | Networking/server.py | # -*- coding: utf-8 -*-
import socket, math
# demarrage du serveur
server = "127.0.0.1"
port = 55042
mysock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mysock.bind((server, port))
JOG_IP = [None,None,None,None,None,None,None,None,None,None]
JOG_coordinates= [None,None,None,None,None,None,None,None,None,None]
ennemy_coordinates = [0.0,0.0]
# ne peut etre appelee qu'a condition que tous les tableaux soient remplis
def update_coordinates() :
global JOG_IP, JOG_coordinates, ennemy_coordinates
for e in JOG_IP :
# determination des deux plus proches voisins
JOG_ID = JOG_IP.index(e)
current_coordinates = JOG_coordinates[JOG_ID]
distances = [float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")]
for c in JOG_coordinates :
if c != current_coordinates :
distances[JOG_coordinates.index(c)] = math.sqrt( (c[0]-current_coordinates[0])**2 + (c[1]-current_coordinates[1])**2 )
neighbour1_ID = distances.index(min(distances))
distances[distances.index(min(distances))] = max(distances)
neighbour2_ID = distances.index(min(distances))
# formatage et envoi du message
msg_coordinates = 'C'+' '+'A'+str(JOG_coordinates[neighbour1_ID][0])+' '+'A'+str(JOG_coordinates[neighbour1_ID][1])+' '+'B'+str(JOG_coordinates[neighbour2_ID][0])+' '+'B'+str(JOG_coordinates[neighbour1_ID][1])+' '+'T'+str(ennemy_coordinates[0])+'T'+str(ennemy_coordinates[1])+' '+'T'+str(ennemy_velocity[1])+'V'+str(ennemy_velocity[1])
mysock.sendto(msg_coordinates, e)
while True :
msg, client = mysock.recvfrom(255)
if msg :
msg_parts = msg.split()
JOG_IP[msg_parts[0]] = client
if msg_parts[1] == 'C' : # cas où le message reçu est une mise à jour de la position
JOG_coordinates[msg_parts[0]] = [float(msg_parts[2]), float(msg_parts[3])]
elif msg_parts[1] == 'E' : # cas où le message reçu est une erreur
# TODO
pass
if not ((None in JOG_IP) | (None in JOG_coordinates)) :
update_coordinates()
| apache-2.0 | Python |
|
92ec849fc18d7cb610839abe2213ce30ceced46b | Add ci settings file for postgresql database | inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree | InvenTree/InvenTree/ci_postgresql.py | InvenTree/InvenTree/ci_postgresql.py | """
Configuration file for running tests against a MySQL database.
"""
from InvenTree.settings import *
# Override the 'test' database
if 'test' in sys.argv:
eprint('InvenTree: Running tests - Using MySQL test database')
DATABASES['default'] = {
# Ensure postgresql backend is being used
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'inventree_test_db',
'USER': 'postgres',
'PASSWORD': '',
}
| mit | Python |
|
f7db5d9cac80432a7016043a1b2781fbaa7f040e | Create new package. (#6891) | matthiasdiener/spack,EmreAtes/spack,matthiasdiener/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,LLNL/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,LLNL/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,krafczyk/spack,iulian787/spack,tmerrick1/spack,krafczyk/spack,mfherbst/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,LLNL/spack,LLNL/spack,EmreAtes/spack,mfherbst/spack,iulian787/spack | var/spack/repos/builtin/packages/r-rappdirs/package.py | var/spack/repos/builtin/packages/r-rappdirs/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRappdirs(RPackage):
"""An easy way to determine which directories on the users computer
you should use to save data, caches and logs. A port of Python's
'Appdirs' to R."""
homepage = "https://cran.r-project.org/package=rappdirs"
url = "https://cran.rstudio.com/src/contrib/rappdirs_0.3.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/rappdirs"
version('0.3.1', 'fbbdceda2aa49374e61c7d387bf9ea21')
depends_on('r@2.14:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
6d2efcea281775c31cd1df29eac63054e3fe51df | Create solution.py | lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms | data_structures/linked_list/problems/delete_n_after_m/py/solution.py | data_structures/linked_list/problems/delete_n_after_m/py/solution.py | import LinkedList
# Problem description:
# Solution time complexity:
# Comments:
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def DeleteNAfterMNodes(head: LinkedList.Node, n: int, m: int) -> LinkedList.Node:
if head == None:
return None
slow = head
while slow != None:
for _ in range(m - 1):
if slow == None:
break
else:
slow = slow.nxt
if slow == None:
break
else:
fast = slow.nxt
for _ in range(n):
if fast == None:
break
else:
fast = fast.nxt
slow.nxt = fast
slow = slow.nxt
return head
| mit | Python |
|
c84ce4b2494771c48890c122420e4665828ac4f8 | Solve Code Fights different rightmost bit problem | HKuz/Test_Code | CodeFights/differentRightmostBit.py | CodeFights/differentRightmostBit.py | #!/usr/local/bin/python
# Code Different Right-most Bit (Core) Problem
def differentRightmostBit(n, m):
return (n ^ m) & -(n ^ m)
def main():
tests = [
[11, 13, 2],
[7, 23, 16],
[1, 0, 1],
[64, 65, 1],
[1073741823, 1071513599, 131072],
[42, 22, 4]
]
for t in tests:
res = differentRightmostBit(t[0], t[1])
if t[2] == res:
print("PASSED: differentRightmostBit({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: differentRightmostBit({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
| mit | Python |
|
8d0d564eae53a10b98b488b8c13eb952134cfc5e | Create 0408_country_body_part.py | boisvert42/npr-puzzle-python | 2018/0408_country_body_part.py | 2018/0408_country_body_part.py | #!/usr/bin/python
'''
NPR 2018-04-08
http://www.npr.org/puzzle
Name part of the human body, insert a speech hesitation, and you'll name a country — what is it?
'''
from nltk.corpus import gazetteers
import nprcommontools as nct
#%%
BODY_PARTS = nct.get_category_members('body_part')
# COUNTRIES
COUNTRIES = frozenset([x.lower() for x in gazetteers.words('countries.txt')])
#%%
for c in COUNTRIES:
for b in BODY_PARTS:
if c.startswith(b[0]) and c.endswith(b[-1]):
for i in range(1,len(b)-1):
if c.startswith(b[:i]) and c.endswith(b[i:]):
print b,c
| cc0-1.0 | Python |
|
61ec74a685deec0b1ddc0a9274e5df0a597c6b6b | Create TweetStreamer.py | nremynse/Automation-Scripts | TweetStreamer.py | TweetStreamer.py | import tweepy
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import json
from elasticsearch import Elasticsearch
import datetime
from watson_developer_cloud import NaturalLanguageUnderstandingV1
import watson_developer_cloud.natural_language_understanding.features.v1 as Features
"""
This twitter code uses a user's numerical ID and will track their tweets live as the come in. Runs through watson's NLU
API and then uploads to ES.
"""
consumer_key="YBFMgErZkiN8MWqBGcHXm2dCp"
consumer_secret="fmuMKwya4XyyjegvSyYAwBalZYI8heom3Ds56hkxVZmBuRNQ6t"
access_token="918660934528155648-InbzRO92y5NFmhGEmiGI7NGc0wxZhAO"
access_token_secret="mn3PehlsuJwJnQ4dlMC3cASwMyqlC0GHPT2uok8KbJltt"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Setup elasticsearch
es = Elasticsearch("10.0.2.81:9200")
# Setup watson NLU API
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2017-05-19',
username='3efc3d64-d9ee-43b3-a289-e530bad6347b',
password='uDs5p3a4CPyd')
def natural_language(tweet):
response = natural_language_understanding.analyze(
text=tweet,
features=[Features.Sentiment(), Features.Emotion()])
return response
def fix_tstamp(tstamp):
# Mon Oct 16 12:57:50 +0000 2017
date = tstamp.replace(" +0000", "")
date = datetime.datetime.strptime(date, '%a %b %d %H:%M:%S %Y')
return str(date)
class listener(StreamListener):
def on_data(self, data):
print(data)
data = json.loads(data)
if not data['retweeted'] and '@realDonaldTrump' not in data['text']:
data["created_at"] = fix_tstamp(data["created_at"])
indexdate = data["created_at"][:7]
try:
data["watson_natural_lang"] = (natural_language(data["text"]))
except:
print data["text"]
pass
print data
#es.index(index='presidentialtweets-' + indexdate, doc_type='twitter', id=data["id"], body=data)
return(True)
def on_error(self, status):
print status
def main():
twitterStream = Stream(auth, listener())
twitterStream.filter(follow=['25073877'])
if __name__ == '__main__':
main()
| mit | Python |
|
3345dc2f1ac15f06d3e95b5ead894ee9d3a27d9e | Add file writer utility script | rbuchmann/pivicl | piwrite.py | piwrite.py | #!/bin/env python
import argparse
import sys
import os
parser = argparse.ArgumentParser(description="Write multiple svgs from stdin to files")
parser.add_argument('-o', '--outfile', metavar='OUTFILE', default='output.svg')
args = parser.parse_args()
base, extension = os.path.splitext(args.outfile)
def write_files(collection):
for i,s in enumerate(collection):
f = open(base + "%06d" % i + extension, 'w')
f.write(s)
f.close()
write_files(sys.stdin.readlines())
| epl-1.0 | Python |
|
7147dfc237acb64a8e655e63681a387282043994 | Add lc0031_next_permutation.py | bowen0701/algorithms_data_structures | lc0031_next_permutation.py | lc0031_next_permutation.py | """Leetcode 31. Next Permutation
Medium
URL: https://leetcode.com/problems/next-permutation/
Implement next permutation, which rearranges numbers into the lexicographically
next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible
order (ie, sorted in ascending order).
The replacement must be in-place and use only constant extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding
outputs are in the right-hand column.
1,2,3 -> 1,3,2
3,2,1 -> 1,2,3
1,1,5 -> 1,5,1
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
46d5b197e022815c2074fbc94ca324d31d470dd0 | Implement a fasttext example (#3446) | dolaameng/keras,keras-team/keras,DeepGnosis/keras,nebw/keras,keras-team/keras,kemaswill/keras,kuza55/keras | examples/imdb_fasttext.py | examples/imdb_fasttext.py | '''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the AveragePooling1D layer
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
| apache-2.0 | Python |
|
dde0efeec1aca8ed3ec2e444bbb4c179be89fec5 | Create MooreNeightbourhood.py | Oscarbralo/TopBlogCoder,Oscarbralo/TopBlogCoder,Oscarbralo/TopBlogCoder | Checkio/MooreNeightbourhood.py | Checkio/MooreNeightbourhood.py | def count_neighbours(grid, row, col):
neig = 0
if (col - 1 >= 0):
if (grid[row][col - 1] == 1):
neig += 1
if (col - 1 >= 0 and row - 1 >= 0):
if (grid[row - 1][col -1] == 1):
neig += 1
if (row - 1 >= 0):
if (grid[row - 1][col] == 1):
neig += 1
if (col + 1 < len(grid[0]) and row - 1 >= 0):
if (grid[row - 1][col + 1] == 1):
neig += 1
if (col + 1 < len(grid[0])):
if (grid[row][col + 1] == 1):
neig += 1
if (col + 1 < len(grid[0]) and row + 1 < len(grid)):
if (grid[row + 1][col + 1] == 1):
neig += 1
if (row + 1 < len(grid)):
if (grid[row + 1][col] == 1):
neig += 1
if (col - 1 >= 0 and row + 1 < len(grid)):
if (grid[row + 1][col - 1] == 1):
neig += 1
return neig
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 1, 2) == 3, "1st example"
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 0, 0) == 1, "2nd example"
assert count_neighbours(((1, 1, 1),
(1, 1, 1),
(1, 1, 1),), 0, 2) == 3, "Dense corner"
assert count_neighbours(((0, 0, 0),
(0, 1, 0),
(0, 0, 0),), 1, 1) == 0, "Single"
| mit | Python |
|
a3df0567c295f0b2879c9a4f095a31108359d531 | Add missing migration for invoice status | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/billing/migrations/0003_invoice_status.py | nodeconductor/billing/migrations/0003_invoice_status.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('billing', '0002_pricelist'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='status',
field=models.CharField(max_length=80, blank=True),
preserve_default=True,
),
]
| mit | Python |
|
269a34e87797a3271013e23d504a6f6a159ae48e | Index testgroup_test.test_id | bowlofstew/changes,dropbox/changes,wfxiang08/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,wfxiang08/changes | migrations/versions/3a3366fb7822_index_testgroup_test.py | migrations/versions/3a3366fb7822_index_testgroup_test.py | """Index testgroup_test.test_id
Revision ID: 3a3366fb7822
Revises: 139e272152de
Create Date: 2014-01-02 22:20:55.132222
"""
# revision identifiers, used by Alembic.
revision = '3a3366fb7822'
down_revision = '139e272152de'
from alembic import op
def upgrade():
op.create_index('idx_testgroup_test_test_id', 'testgroup_test', ['test_id'])
def downgrade():
op.drop_index('idx_testgroup_test_test_id', 'testgroup_test')
| apache-2.0 | Python |
|
7b40a4902d1dc43c73a7858fc9286a641b3a9666 | Add validation function removed from main script. | COMBINE-lab/piquant,lweasel/piquant,lweasel/piquant | assess_isoform_quantification/options.py | assess_isoform_quantification/options.py | from schema import Schema
def validate_file_option(file_option, msg):
msg = "{msg} '{file}'.".format(msg=msg, file=file_option)
return Schema(open, error=msg).validate(file_option)
| mit | Python |
|
d04118acc5421d4b48e31c78874a740eb469c3d7 | fix boan1244 'Boëng' | clld/glottolog3,clld/glottolog3 | migrations/versions/506dcac7d75_fix_boan1244_mojibake.py | migrations/versions/506dcac7d75_fix_boan1244_mojibake.py | # coding=utf-8
"""fix boan1244 mojibake
Revision ID: 506dcac7d75
Revises: 4513ba6253e1
Create Date: 2015-04-15 19:20:59.059000
"""
# revision identifiers, used by Alembic.
revision = '506dcac7d75'
down_revision = '4513ba6253e1'
import datetime
from alembic import op
import sqlalchemy as sa
def upgrade():
id, before, after = 'boan1244', u'Bo\xc3\xabng', u'Bo\xebng'
update_name = sa.text('UPDATE language SET updated = now(), '
'name = :after WHERE id = :id AND name = :before')
update_ident = sa.text('UPDATE identifier SET updated = now(), '
'name = :after WHERE type = :type AND name = :before ')
op.execute(update_name.bindparams(id=id, before=before, after=after))
op.execute(update_ident.bindparams(type='name', before=before, after=after))
def downgrade():
pass
| mit | Python |
|
85e4a327ba641fbe9c275b4760c60683ca215d61 | Add unit tests. | gthank/pto,gthank/pto | test_pto.py | test_pto.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for PTO."""
import unittest
import pto
import time
_TIMEOUT = 5
_FUZZ_FACTOR = 1
class SlowClass(object):
@pto.timeout(_TIMEOUT)
def slow_instance_method(self):
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
@classmethod
@pto.timeout(_TIMEOUT)
def slow_class_method(cls):
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
@staticmethod
@pto.timeout(_TIMEOUT)
def slow_static_method():
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
class PtoTestCase(unittest.TestCase):
def setUp(self):
self.slowInstance = SlowClass()
def tearDown(self):
pass
def test_function(self):
@pto.timeout(_TIMEOUT)
def slow_func():
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
self.assertRaises(pto.TimedOutException, slow_func)
def test_instance_method(self):
self.assertRaises(pto.TimedOutException, self.slowInstance.slow_instance_method)
def test_class_method(self):
self.assertRaises(pto.TimedOutException, self.slowInstance.slow_class_method)
def test_static_method(self):
self.assertRaises(pto.TimedOutException, SlowClass.slow_static_method)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
2067bdb9d0f9947a674cb94d0c988049f3038ea4 | create test stubs | BMJHayward/navtome,BMJHayward/navtome,BMJHayward/navtome,BMJHayward/navtome | test_viz.py | test_viz.py | def test_create_distance_matrix():
pass
def test_get_translation_table():
pass
def test_naive_backtranslate():
pass
def test_get_peptide_index():
pass
def test_demo_dna_features_viewer():
pass
def test_ngrams():
pass
def test_make_trigrams():
pass
def test_nucleotide_distribution():
pass
def test_get_peptide_toplot():
pass
def test_peptide_distribution():
pass
def test_plot_ABI():
pass
def test_get_genbank_sequence():
pass
def test_get_fasta_sequence():
pass
def test_calc_sequence_similarity():
pass
def test_make_parser():
pass
def test_main():
pass
| mit | Python |
|
e304aae71617cdba0ffcb720a24406375fb866a1 | Copy of Ryan's PCMToWave component. | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/MH/audio/ToWAV.py | Sketches/MH/audio/ToWAV.py | from Axon.Component import component
import string
import struct
from Axon.Ipc import producerFinished, shutdown
class PCMToWave(component):
def __init__(self, bytespersample, samplingfrequency):
super(PCMToWave, self).__init__()
self.bytespersample = bytespersample
self.samplingfrequency = samplingfrequency
if self.bytespersample not in [2,4]:
print "Currently bytespersample must be 2 or 4"
raise ValueError
bytestofunction = { 2: self.sample2Byte, 4: self.sample4Byte }
self.pack = bytestofunction[self.bytespersample]
def sample2Byte(self, value):
return struct.pack("<h", int(value * 32768.0))
def sample4Byte(self, value):
return struct.pack("<l", int(value * 2147483648.0))
def main(self):
#we don't know the length yet, so we say the file lasts an arbitrary (long) time
riffchunk = "RIFF" + struct.pack("<L", 0xEFFFFFFF) + "WAVE"
bytespersecond = self.bytespersample * self.samplingfrequency
formatchunk = "fmt "
formatchunk += struct.pack("<L", 0x10) #16 for PCM
formatchunk += struct.pack("<H", 0x01) #PCM/Linear quantization
formatchunk += struct.pack("<H", 0x01) #mono
formatchunk += struct.pack("<L", self.samplingfrequency)
formatchunk += struct.pack("<L", bytespersecond)
formatchunk += struct.pack("<H", self.bytespersample)
formatchunk += struct.pack("<H", self.bytespersample * 8)
self.send(riffchunk, "outbox")
self.send(formatchunk, "outbox")
datachunkheader = "data" + struct.pack("<L", 0xEFFFFFFF) #again, an arbitrary (large) value
self.send(datachunkheader, "outbox")
running = True
while running:
yield 1
codedsamples = []
while self.dataReady("inbox"): # we accept lists of floats
samplelist = self.recv("inbox")
for sample in samplelist:
if sample < -1:
sample = -1
elif sample > 1:
sample = 1
codedsamples.append(self.pack(sample))
del samplelist
if codedsamples:
self.send(string.join(codedsamples, ""), "outbox")
while self.dataReady("control"): # we accept lists of floats
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
return
self.pause()
| apache-2.0 | Python |
|
16275938769c16c79b89349612e8e7b2891de815 | Add migration for user manager | benjaoming/kolibri,jonboiser/kolibri,christianmemije/kolibri,lyw07/kolibri,mrpau/kolibri,mrpau/kolibri,DXCanas/kolibri,mrpau/kolibri,benjaoming/kolibri,lyw07/kolibri,mrpau/kolibri,christianmemije/kolibri,learningequality/kolibri,jonboiser/kolibri,learningequality/kolibri,lyw07/kolibri,indirectlylit/kolibri,DXCanas/kolibri,lyw07/kolibri,christianmemije/kolibri,jonboiser/kolibri,benjaoming/kolibri,DXCanas/kolibri,christianmemije/kolibri,learningequality/kolibri,learningequality/kolibri,jonboiser/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,DXCanas/kolibri,benjaoming/kolibri | kolibri/auth/migrations/0008_auto_20180222_1244.py | kolibri/auth/migrations/0008_auto_20180222_1244.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-22 20:44
from __future__ import unicode_literals
import kolibri.auth.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0007_auto_20171226_1125'),
]
operations = [
migrations.AlterModelManagers(
name='facilityuser',
managers=[
('objects', kolibri.auth.models.FacilityUserModelManager()),
],
),
]
| mit | Python |
|
74450edae5d327659ec618f7e160bc5dd37bd512 | 添加在Python2DWrapper中使用的Python文件 | MR-algorithms/YAP,MR-algorithms/YAP,MR-algorithms/YAP | PluginSDK/BasicRecon/Python/Py2C.py | PluginSDK/BasicRecon/Python/Py2C.py | from PIL import Image
import numpy as np
import matplotlib
"""
This is Module Py2C for c++
"""
class A: pass
class B: pass
def ShowImage(image, width, height):
img = [[]] * width
for x in range(width):
for y in range(height):
img[x] = img[x] + [image[x*width + y]]
npimg = np.array(img)
npimg = npimg / npimg.max() *255
pil_image = Image.fromarray(npimg)
pil_image.show()
return 'success!'
## image is 2d list
def ShowImage2D(image, width, height):
pil_image = Image.fromarray(np.array(image))
pil_image2 = Image.fromarray(np.array(image)*2)
pil_image.show()
pil_image2.show()
return np.array(image)
if __name__=='__main__':
width = 256
height = 256
li = [i for i in range(width*height)]
image = ShowImage(li, width, height)
li2d = [[i for j in range(height)] for i in range(width)] # *width+j)*255/(width*height)
image2d = ShowImage2D(li2d,width, height)
| mit | Python |
|
883aac8a282d4525e82d3eb151ea293c5577424c | Add data migration to create gesinv | tic-ull/portal-del-investigador,tic-ull/portal-del-investigador,tic-ull/portal-del-investigador,tic-ull/portal-del-investigador | core/migrations/0002_auto_20141008_0853.py | core/migrations/0002_auto_20141008_0853.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_extra_users(apps, schema_editor):
user = apps.get_model("auth.User").objects.create(username='GesInv-ULL')
apps.get_model("core", "UserProfile").objects.create(user=user,
documento='00000000A')
def delete_extra_users(apps, schema_editor):
user = apps.get_model("auth.User").objects.get(username='GesInv-ULL')
apps.get_model("core", "UserProfile").objects.get(user=user).delete()
user.delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RunPython(create_extra_users, delete_extra_users),
]
| agpl-3.0 | Python |
|
e6181c5d7c95af23ee6d51d125642104782f5cf1 | Add solution for 136_Single Number with XOR operation. | comicxmz001/LeetCode,comicxmz001/LeetCode | Python/136_SingleNumber.py | Python/136_SingleNumber.py | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#Using XOR to find the single number.
#Because every number appears twice, while N^N=0, 0^N=N,
#XOR is cummutative, so the order of elements does not matter.
#Finally, it will be res = 0 ^ singlenumber ==> res = singlenumber
res = 0
for num in nums:
res ^= num
return res
nums = [1,1,5,5,3,4,4,9,9,8,8,7,7]
foo = Solution()
print foo.singleNumber(nums)
| mit | Python |
|
83ebfe1ff774f8d5fb5ae610590ca8fca1c87100 | add migration for on_delete changes | bcgov/gwells,bcgov/gwells,bcgov/gwells,bcgov/gwells | app/backend/wells/migrations/0034_auto_20181127_0230.py | app/backend/wells/migrations/0034_auto_20181127_0230.py | # Generated by Django 2.1.3 on 2018-11-27 02:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0033_auto_20181119_1857'),
]
operations = [
migrations.AlterField(
model_name='activitysubmission',
name='decommission_method',
field=models.ForeignKey(blank=True, db_column='decommission_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DecommissionMethodCode', verbose_name='Method of Decommission'),
),
migrations.AlterField(
model_name='productiondata',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='well',
name='aquifer',
field=models.ForeignKey(blank=True, db_column='aquifer_id', null=True, on_delete=django.db.models.deletion.PROTECT, to='aquifers.Aquifer', verbose_name='Aquifer ID Number'),
),
migrations.AlterField(
model_name='well',
name='bcgs_id',
field=models.ForeignKey(blank=True, db_column='bcgs_id', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BCGS_Numbers', verbose_name='BCGS Mapsheet Number'),
),
migrations.AlterField(
model_name='well',
name='decommission_method',
field=models.ForeignKey(blank=True, db_column='decommission_method_code', null='True', on_delete=django.db.models.deletion.PROTECT, to='wells.DecommissionMethodCode', verbose_name='Method of Decommission'),
),
migrations.AlterField(
model_name='well',
name='observation_well_status',
field=models.ForeignKey(blank=True, db_column='obs_well_status_code', null='True', on_delete=django.db.models.deletion.PROTECT, to='wells.ObsWellStatusCode', verbose_name='Observation Well Status'),
),
]
| apache-2.0 | Python |
|
59c62bb0f13be7910bf2280126a0909ffbe716f0 | Create simple_trie.py | nik-hil/scripts | simple_trie.py | simple_trie.py | class Trie:
def __init__(self):
self.node = {}
self.word = None
def add(self,string):
node = self.node
currentNode = None
for char in string:
currentNode = node.get(char, None)
if not currentNode:
node[char] = Trie()
currentNode = node[char]
node = currentNode.node
currentNode.word = string
def find(self, query):
node = self
result = []
for char in query:
currentNode = node.node.get(char, None)
if not currentNode:
return result
node = currentNode
return self.findall(node, result)
def findall(self, node, result):
if node.word:
result.append(node.word)
for value in node.node.values():
self.findall(value, result)
return result
t = Trie()
t.add("cat")
t.add("cats")
t.add("cow")
t.add("camp")
print t.find('c')
print t.find('ca')
print t.find("abcde")
print t.find("cows")
| mit | Python |
|
983f041b25b0de77f3720378e12b22e7d8f2e040 | Create same_first_last.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/same_first_last.py | Python/CodingBat/same_first_last.py | # http://codingbat.com/prob/p179078
def same_first_last(nums):
return ( len(nums) >= 1 and nums[0] == nums[-1] )
| mit | Python |
|
ae6c6f3aa0863b919e0f00543cab737ae9e94129 | Add bubblesort as a placeholder for a refactored implementation | jonspeicher/blinkyfun | bubblesort.py | bubblesort.py | #!/usr/bin/env python
# TBD: Sort animation could take a pattern that it assumed to be "final",
# shuffle it, then take a sort generator that produced a step in the sort
# algorithm at every call. It would be sorting shuffled indices that the
# animation would use to construct each frame.
from blinkytape import blinkytape, blinkycolor, blinkyplayer
from patterns import gradient
import random, sys, time
tape = blinkytape.BlinkyTape.find_first()
start_color = blinkycolor.BlinkyColor.from_string(sys.argv[1])
end_color = blinkycolor.BlinkyColor.from_string(sys.argv[2])
pattern = gradient.Gradient(tape.pixel_count, start_color, end_color)
indexes = range(0, tape.pixel_count)
random.shuffle(indexes)
pixels = [pattern.pixels[index] for index in indexes]
tape.update(pixels)
time.sleep(5)
swap_occurred = True
while swap_occurred:
swap_occurred = False
for i in range(1, tape.pixel_count):
if indexes[i - 1] > indexes[i]:
temp = indexes[i - 1]
indexes[i - 1] = indexes[i]
indexes[i] = temp
swap_occurred = True
pixels = [pattern.pixels[index] for index in indexes]
tape.update(pixels)
| mit | Python |
|
3852ae6fcf6271ef19a182e5dfb199e4539536a1 | Create 6kyu_spelling_bee.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/6kyu/6kyu_spelling_bee.py | Solutions/6kyu/6kyu_spelling_bee.py | from itertools import zip_longest as zlo
def how_many_bees(hive):
return sum(''.join(x).count('bee') + ''.join(x).count('eeb') for x in hive) + \
sum(''.join(y).count('bee') + ''.join(y).count('eeb') for y in zlo(*hive, fillvalue = '')) if hive else 0
| mit | Python |
|
ddb58206a52ef46f5194bf6f5c11ac68b16ab9a8 | Create minimum-window-subsequence.py | tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode | Python/minimum-window-subsequence.py | Python/minimum-window-subsequence.py | # Time: O(S * T)
# Space: O(S)
class Solution(object):
def minWindow(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
dp = [[None for _ in xrange(len(S))] for _ in xrange(2)]
for i, c in enumerate(S):
if c == T[0]:
dp[0][i] = i
for j in xrange(1, len(T)):
prev = None
dp[j%2] = [None] * len(S)
for i, c in enumerate(S):
if prev is not None and c == T[j]:
dp[j%2][i] = prev
if dp[(j-1)%2][i] is not None:
prev = dp[(j-1)%2][i]
start, end = 0, len(S)
for j, i in enumerate(dp[(len(T)-1)%2]):
if i >= 0 and j-i < end-start:
start, end = i, j
return S[start:end+1] if end < len(S) else ""
| mit | Python |
|
eaca815937ebd1fbdd6ec5804dc52257d2775181 | Create time-gui.py | oriwen/time-check | time-gui.py | time-gui.py | from tkinter import *
import time
import sys
import datetime as dt
from datetime import timedelta
def validateTextInputSize(event):
""" Method to Validate Entry text input size """
global TEXT_MAXINPUTSIZE
if (event.widget.index(END) >= TEXT_MAXINPUTSIZE - 1):
event.widget.delete(TEXT_MAXINPUTSIZE - 1)
def displayText():
""" Display the Entry text value. """
global entryWidget
if entryWidget.get().strip() == "":
tkMessageBox.showerror("Tkinter Entry Widget", "Enter a text value")
else:
tkMessageBox.showinfo("Tkinter Entry Widget", "Text value =" + entryWidget.get().strip())
if __name__ == "__main__":
main = Tk()
main.title("Main Widget")
main["padx"] = 40
main["pady"] = 2
# Create a text frame to hold the text Label and the Entry widget
textFrame = Frame(main)
#Create a Label in textFrame
l1 = Label(textFrame)
l1["text"] = "Enter time of arrival:"
l1.grid(row=0, column=0)
# Create an Entry Widget in textFrame
e1 = Entry(textFrame)
e1.bind("<Key>", validateTextInputSize)
e1["width"] = 2
e1.grid(row=0, column=1)
e1.insert(0, "6")
e1.config(bg="white")
l2 = Label(textFrame)
l2["text"] = ":"
l2.grid(row=0, column=2)
e2 = Entry(textFrame)
e2.bind("<Key>", validateTextInputSize)
e2["width"] = 2
e2.grid(row=0, column=3)
e2.insert(0, "00")
e2.config(bg="white")
#Create a Label in textFrame
l3 = Label(textFrame)
l3["text"] = "How long will you work?:"
l3.grid(row=1, column=0)
# Create an Entry Widget in textFrame
e3 = Entry(textFrame)
e3.bind("<Key>", validateTextInputSize)
e3["width"] = 2
e3.grid(row=1, column=1)
e3.insert(0, "8")
e3.config(bg="white")
l4 = Label(textFrame)
l4["text"] = ":"
l4.grid(row=1, column=2)
e4 = Entry(textFrame)
e4.bind("<Key>", validateTextInputSize)
e4["width"] = 2
e4.grid(row=1, column=3)
e4.insert(0, "00")
e4.config(bg="white")
l5 = Label(textFrame)
l5["text"] = "And lunch? :"
l5.grid(row=1, column=4)
e5 = Entry(textFrame)
e5.bind("<Key>", validateTextInputSize)
e5["width"] = 2
e5.grid(row=1, column=5)
e5.insert(0, "30")
e5.config(bg="white")
l6 = Label(textFrame)
l6["text"] = "minutes"
l6.grid(row=1, column=6)
textFrame.pack()
clock = Label(main, font=('times', 20, 'bold'), bg='green')
clock.pack(fill=BOTH, expand=1)
def tick():
s = time.strftime('%H:%M:%S')
if s != clock["text"]:
clock["text"] = s
clock.after(200, tick)
tick()
main.mainloop()
| unlicense | Python |
|
077d4b8954918ed51c43429efd74b4911083c4f4 | Add instance_id field. | indirectlylit/kolibri,jtamiace/kolibri,lyw07/kolibri,mrpau/kolibri,learningequality/kolibri,rtibbles/kolibri,benjaoming/kolibri,mrpau/kolibri,jtamiace/kolibri,indirectlylit/kolibri,MingDai/kolibri,benjaoming/kolibri,jayoshih/kolibri,jtamiace/kolibri,christianmemije/kolibri,rtibbles/kolibri,jonboiser/kolibri,benjaoming/kolibri,ralphiee22/kolibri,indirectlylit/kolibri,lyw07/kolibri,jonboiser/kolibri,learningequality/kolibri,MingDai/kolibri,learningequality/kolibri,DXCanas/kolibri,mrpau/kolibri,66eli77/kolibri,lyw07/kolibri,jayoshih/kolibri,lyw07/kolibri,jamalex/kolibri,whitzhu/kolibri,jamalex/kolibri,66eli77/kolibri,aronasorman/kolibri,66eli77/kolibri,DXCanas/kolibri,MingDai/kolibri,christianmemije/kolibri,aronasorman/kolibri,jayoshih/kolibri,mrpau/kolibri,ralphiee22/kolibri,jonboiser/kolibri,whitzhu/kolibri,aronasorman/kolibri,jamalex/kolibri,whitzhu/kolibri,ralphiee22/kolibri,indirectlylit/kolibri,ralphiee22/kolibri,whitzhu/kolibri,rtibbles/kolibri,jamalex/kolibri,benjaoming/kolibri,jayoshih/kolibri,DXCanas/kolibri,jonboiser/kolibri,christianmemije/kolibri,learningequality/kolibri,jtamiace/kolibri,christianmemije/kolibri,DXCanas/kolibri,MingDai/kolibri,66eli77/kolibri,rtibbles/kolibri,aronasorman/kolibri | kolibri/content/migrations/0002_auto_20160630_1959.py | kolibri/content/migrations/0002_auto_20160630_1959.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-06-30 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import kolibri.content.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contentnode',
options={},
),
migrations.AddField(
model_name='contentnode',
name='instance_id',
field=kolibri.content.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32, unique=True),
),
migrations.AlterField(
model_name='contentnode',
name='kind',
field=models.CharField(blank=True, choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('image', 'Image')], max_length=200),
),
migrations.AlterField(
model_name='file',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('pdf', 'pdf')], max_length=40),
),
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High resolution video'), ('low_res_video', 'Low resolution video'), ('vector_video', 'Vertor video'), ('thumbnail', 'Thumbnail'), ('thumbnail', 'Thumbnail'), ('caption', 'Caption')], max_length=150),
),
]
| mit | Python |
|
48172fa94043fb004dfaf564afac42e632be2bc0 | add test for DataManager | missionpinball/mpf,missionpinball/mpf | mpf/tests/test_DataManager.py | mpf/tests/test_DataManager.py | """Test the bonus mode."""
import time
from unittest.mock import mock_open, patch
from mpf.file_interfaces.yaml_interface import YamlInterface
from mpf.core.data_manager import DataManager
from mpf.tests.MpfTestCase import MpfTestCase
class TestDataManager(MpfTestCase):
def testSaveAndLoad(self):
YamlInterface.cache = False
open_mock = mock_open(read_data="")
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
manager = DataManager(self.machine, "machine_vars")
self.assertTrue(open_mock.called)
self.assertNotIn("hallo", manager.get_data())
open_mock = mock_open(read_data="")
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
with patch('mpf.core.file_manager.os.rename') as move_mock:
manager.save_key("hallo", "world")
while not move_mock.called:
time.sleep(.00001)
open_mock().write.assert_called_once_with('hallo: world\n')
self.assertTrue(move_mock.called)
open_mock = mock_open(read_data='hallo: world\n')
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
manager2 = DataManager(self.machine, "machine_vars")
self.assertTrue(open_mock.called)
self.assertEqual("world", manager2.get_data()["hallo"])
YamlInterface.cache = True
| mit | Python |
|
8d0f6ed81377e516c5bb266894a8cf39b6852383 | add multiple rsi sample | xclxxl414/rqalpha,xclxxl414/rqalpha | examples/multi_rsi.py | examples/multi_rsi.py | # 可以自己import我们平台支持的第三方python模块,比如pandas、numpy等。
import talib
import numpy as np
import math
import pandas
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
#选择我们感兴趣的股票
context.s1 = "000001.XSHE"
context.s2 = "601988.XSHG"
context.s3 = "000068.XSHE"
context.stocks = [context.s1,context.s2,context.s3]
update_universe(context.stocks)
context.TIME_PERIOD = 14
context.HIGH_RSI = 85
context.LOW_RSI = 30
context.ORDER_PERCENT = 0.3
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
# 开始编写你的主要的算法逻辑
# bar_dict[order_book_id] 可以拿到某个证券的bar信息
# context.portfolio 可以拿到现在的投资组合状态信息
# 使用order_shares(id_or_ins, amount)方法进行落单
# TODO: 开始编写你的算法吧!
# 对我们选中的股票集合进行loop,运算每一只股票的RSI数值
for stock in context.stocks:
# 读取历史数据
prices = history(context.TIME_PERIOD+1,'1d','close')[stock].values
# 用Talib计算RSI值
rsi_data = talib.RSI(prices,timeperiod=context.TIME_PERIOD)[-1]
curPosition = context.portfolio.positions[stock].quantity
#用剩余现金的30%来购买新的股票
target_available_cash = context.portfolio.cash * context.ORDER_PERCENT
#当RSI大于设置的上限阀值,清仓该股票
if rsi_data > context.HIGH_RSI and curPosition > 0:
order_target_value(stock,0)
#当RSI小于设置的下限阀值,用剩余cash的一定比例补仓该股
if rsi_data < context.LOW_RSI:
logger.info("target available cash caled: " + str(target_available_cash))
#如果剩余的现金不够一手 - 100shares,那么会被ricequant 的order management system reject掉
order_value(stock, target_available_cash)
| apache-2.0 | Python |
|
859b3de112549f070e7b56901b86d40e8b8c1f51 | update scorer | MultimediaSemantics/entity2vec,MultimediaSemantics/entity2vec | lib/scorer.py | lib/scorer.py | import numpy as np
import pandas as pd
import json
#from SPARQLWrapper import SPARQLWrapper, JSON
from collections import defaultdict
from operator import itemgetter
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
import optparse
from ranking import ndcg_at_k, average_precision
def scorer(embeddings, gold_standard,N, similarity):
similarity = similarity
gold_standard = pd.read_table(gold_standard, header = None)
candidate_scores = defaultdict(list)
sorted_candidate_scores = defaultdict(list)
e2v_embeddings = get_e2v_embedding(embeddings)
ndcg = {}
AP = {}
for i in gold_standard.values:
query_wiki_id = int(i[2])
candidate_wiki_id = int(i[4])
truth_value = int(i[5])
print query_wiki_id, candidate_wiki_id, truth_value
query_e2v = e2v_embeddings[e2v_embeddings[0] == query_wiki_id].values #query vector = [0.2,-0.3,0.1,0.7 ...]
candidate_e2v = e2v_embeddings[e2v_embeddings[0] == candidate_wiki_id].values
print query_e2v, candidate_e2v
candidate_scores[query_wiki_id].append((similarity_function(query_e2v,candidate_e2v, similarity),truth_value))
for q in candidate_scores.keys():
sorted_candidate_scores[q] = sorted(candidate_scores[q], key = itemgetter(0), reverse = True)
relevance = []
for score, rel in sorted_candidate_scores[q]:
relevance.append(rel)
ndcg[q] = ndcg_at_k(relevance,N)
AP[q] = average_precision(relevance)
print sorted_candidate_scores
print np.mean(ndcg.values()), np.mean(AP.values())
def similarity_function(vec1,vec2, similarity):
#compute cosine similarity or other similarities
v1 = np.array(vec1)
v2 = np.array(vec2)
if len(v1)*len(v2) == 0: #any of the two is 0
global count
count +=1
return 0
else:
if similarity == 'cosine':
return cosine_similarity(v1,v2)[0][0] #returns an double array [[sim]]
elif similarity == 'L1':
return 0
elif similarity == 'L2':
return 0
elif similarity == 'linear_kernel':
return linear_kernel(v1,v2)[0][0]
else:
raise NameError('Choose a valid similarity function')
def get_e2v_embedding(embeddings):
#read the embedding file into a dictionary
emb = pd.read_table(embeddings, skiprows = 1, header = None, sep = ' ')
return emb
def wiki_to_local(wiki_id):
url = get_url_from_id(wiki_id)
url = '<'+url+'>'
json_open = open('dictionaries/dictionary_dbpedia2015.json')
json_string = json_open.read()
json_open.close()
json_dict = json.loads(json_string)
local_id = json_dict[url]
return local_id
def get_url_from_id(wiki_id):
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX db: <http://dbpedia.org/resource/>
SELECT *
WHERE { ?s dbo:wikiPageID %d }
""" %wiki_id)
sparql.setReturnFormat(JSON)
return str(sparql.query().convert()['results']['bindings'][0]['s']['value'])
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'file_name', help = 'file_name')
parser.add_option('-g','--gold', dest = 'gold_standard_name', help = 'gold_standard_name')
parser.add_option('-N','--number', dest = 'N', help = 'cutting threshold scorers',type = int)
parser.add_option('-s','--similarity', dest = 'similarity', help = 'similarity measure')
(options, args) = parser.parse_args()
if options.file_name is None:
options.file_name = raw_input('Enter file name:')
if options.gold_standard_name is None:
options.gold_standard_name = raw_input('Enter gold standard name:')
if options.N is None:
options.N = 10
if options.similarity is None:
options.similarity = 'cosine'
file_name = options.file_name
gold_standard_name = options.gold_standard_name
N = options.N
similarity = options.similarity
count = 0
scorer(file_name, gold_standard_name,N, similarity)
print count | apache-2.0 | Python |
|
3dcfc2f7e9a2ed696a2b4a006e4d8a233a494f2f | move sitemap to core | agepoly/DjangoBB,slav0nic/DjangoBB,slav0nic/DjangoBB,hsoft/DjangoBB,saifrahmed/DjangoBB,agepoly/DjangoBB,saifrahmed/DjangoBB,agepoly/DjangoBB,hsoft/DjangoBB,hsoft/DjangoBB,hsoft/slimbb,hsoft/slimbb,slav0nic/DjangoBB,hsoft/slimbb,saifrahmed/DjangoBB | djangobb_forum/sitemap.py | djangobb_forum/sitemap.py | from django.contrib.sitemaps import Sitemap
from djangobb_forum.models import Forum, Topic
class SitemapForum(Sitemap):
priority = 0.5
def items(self):
return Forum.objects.all()
class SitemapTopic(Sitemap):
priority = 0.5
def items(self):
return Topic.objects.all() | bsd-3-clause | Python |
|
a6f26893189376f64b6be5121e840acc4cfeebae | ADD utils.py : model_to_json / expand_user_database methods | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | packages/syft/src/syft/core/node/common/tables/utils.py | packages/syft/src/syft/core/node/common/tables/utils.py | # grid relative
from .groups import Group
from .usergroup import UserGroup
from .roles import Role
def model_to_json(model):
"""Returns a JSON representation of an SQLAlchemy-backed object."""
json = {}
for col in model.__mapper__.attrs.keys():
if col != "hashed_password" and col != "salt":
if col == "date" or col == "created_at" or col == "destroyed_at":
# Cast datetime object to string
json[col] = str(getattr(model, col))
else:
json[col] = getattr(model, col)
return json
def expand_user_object(user, db):
def get_group(user_group):
query = db.session().query
group = user_group.group
group = query(Group).get(group)
group = model_to_json(group)
return group
query = db.session().query
user = model_to_json(user)
user["role"] = query(Role).get(user["role"])
user["role"] = model_to_json(user["role"])
user["groups"] = query(UserGroup).filter_by(user=user["id"]).all()
user["groups"] = [get_group(user_group) for user_group in user["groups"]]
return user
def seed_db(db):
new_role = Role(
name="User",
can_triage_requests=False,
can_edit_settings=False,
can_create_users=False,
can_create_groups=False,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=False,
)
db.add(new_role)
new_role = Role(
name="Compliance Officer",
can_triage_requests=True,
can_edit_settings=False,
can_create_users=False,
can_create_groups=False,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=False,
)
db.add(new_role)
new_role = Role(
name="Administrator",
can_triage_requests=True,
can_edit_settings=True,
can_create_users=True,
can_create_groups=True,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=True,
)
db.add(new_role)
new_role = Role(
name="Owner",
can_triage_requests=True,
can_edit_settings=True,
can_create_users=True,
can_create_groups=True,
can_edit_roles=True,
can_manage_infrastructure=True,
can_upload_data=True,
)
db.add(new_role)
db.commit()
| apache-2.0 | Python |
|
d7d0af678a52b357ecf479660ccee1eab43c443f | Add gender choices model | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/models/gender_choices.py | accelerator/models/gender_choices.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models import BaseGenderChoices
class GenderChoices(BaseGenderChoices):
class Meta(BaseGenderChoices.Meta):
swappable = swapper.swappable_setting(
BaseGenderChoices.Meta.app_label, "GenderChoices")
| mit | Python |
|
95edeaa711e8c33e1b431f792e0f2638126ed461 | Add test case for dynamic ast | cornell-brg/pymtl,cornell-brg/pymtl,cornell-brg/pymtl | pymtl/tools/translation/dynamic_ast_test.py | pymtl/tools/translation/dynamic_ast_test.py | #=======================================================================
# verilog_from_ast_test.py
#=======================================================================
# This is the test case that verifies the dynamic AST support of PyMTL.
# This test is contributed by Zhuanhao Wu through #169, #170 of PyMTL v2.
#
# Author : Zhuanhao Wu, Peitian Pan
# Date : Jan 23, 2019
import pytest
import random
from ast import *
from pymtl import *
from pclib.test import run_test_vector_sim
from verilator_sim import TranslationTool
pytestmark = requires_verilator
class ASTRTLModel(Model):
def __init__( s ):
s.a = InPort(2)
s.b = InPort(2)
s.out = OutPort(2)
# a simple clocked adder
# @s.posedge_clk
# def logic():
# s.out.next = s.a + s.b
# generate the model from ast
tree = Module(body=[
FunctionDef(name='logic', args=arguments(args=[], defaults=[]),
body= [
Assign(targets=[
Attribute(value=Attribute(value=Name(id='s', ctx=Load()), attr='out', ctx=Load()), attr='next', ctx=Store())
],
value=BinOp(left=Attribute(value=Name(id='s', ctx=Load()), attr='a', ctx=Load()), op=Add(), right=Attribute(value=Name(id='s', ctx=Load()), attr='b', ctx=Load()))
)
],
decorator_list=[
Attribute(value=Name(id='s', ctx=Load()), attr='posedge_clk', ctx=Load())
],
returns=None)
])
tree = fix_missing_locations(tree)
# Specifiy the union of globals() and locals() so the free
# variables in the closure can be captured.
exec(compile(tree, filename='<ast>', mode='exec')) in globals().update( locals() )
# As with #175, the user needs to supplement the dynamic AST to
# the .ast field of the generated function object.
logic.ast = tree
def test_ast_rtl_model_works_in_simulation():
mod = ASTRTLModel()
test_vector_table = [('a', 'b', 'out*')]
last_result = '?'
for i in xrange(3):
rv1 = Bits(2, random.randint(0, 3))
rv2 = Bits(2, random.randint(0, 3))
test_vector_table.append( [ rv1, rv2, last_result ] )
last_result = Bits(2, rv1 + rv2)
run_test_vector_sim(mod, test_vector_table)
def test_ast_rtl_model_to_verilog():
mod = ASTRTLModel()
# TranslationTool should successfully compile ASTRTLModel
tool = TranslationTool(mod)
| bsd-3-clause | Python |
|
49cab51aa8697a56c7cf74e45b77d9a20ad1a178 | add topk/gen.py | chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes | topk/gen.py | topk/gen.py | #!/usr/bin/python
import random
word_len = 5
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-'
output = open('word_count', 'w')
words = set()
N = 1000*1000
for x in xrange(N):
arr = [random.choice(alphabet) for i in range(word_len)]
words.add(''.join(arr))
print len(words)
for word in words:
output.write(word)
output.write('\t')
output.write(str(random.randint(1, 2*N)))
output.write('\n')
| bsd-3-clause | Python |
|
c5da52c38d280873066288977f021621cb9653d0 | Apply orphaned migration | barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api | project/apps/api/migrations/0010_remove_chart_song.py | project/apps/api/migrations/0010_remove_chart_song.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20150722_1041'),
]
operations = [
migrations.RemoveField(
model_name='chart',
name='song',
),
]
| bsd-2-clause | Python |
|
ab5d1fd5728b9c2f27d74c2896e05a94b061f3f9 | add config.py | Plantain/sms-mailinglist,Plantain/sms-mailinglist,Plantain/sms-mailinglist,Plantain/sms-mailinglist | config.py | config.py | # twilio account details
account = ""
token = ""
| apache-2.0 | Python |
|
42cc2864f03480e29e55bb0ef0b30e823c11eb2f | complete check online window | misterlihao/network-programming-project | check_online_window.py | check_online_window.py | import tkinter as tk
import tkinter.messagebox as tkmb
from online_check import CheckSomeoneOnline
class open_check_online_window():
def __init__(self, x, y):
self.co = tk.Tk()
self.co.title('enter ip to check')
self.co.resizable(False, False)
self.co.wm_attributes("-toolwindow", 1)
self.entry = tk.Entry(self.co, width=15)
self.entry.pack(side = 'left', fill = 'both')
check = tk.Button(self.co,
text='check',
relief = 'flat',
command=self.check_online)
check.pack(side = 'right', fill = 'both')
self.co.geometry('+%d+%d'% (x,y))
self.co.mainloop()
def on_return(self, event):
self.check_online()
def check_online(self):
ip = self.entry.get()
try:
if True:#if CheckSomeoneOnline(ip) == True:
tkmb.showinfo('online check', ip+'is online')
else:
tkmb.showinfo('online check', ip+'is offline')
except Exception as err:
tkmb.showerror('Error', err)
self.co.quit()
if __name__ == '__main__':
open_check_online_window(600, 300) | mit | Python |
|
05b9859fb7d4577dfa95ec9edd3a6f16bf0fd86e | Create __init__.py | rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python | fade/fade/__init__.py | fade/fade/__init__.py | bsd-3-clause | Python |
||
1b6fecb5819fbead0aadcc1a8669e915542c5ea0 | Add script for gameifying testing | spotify/testing-game,spotify/testing-game,spotify/testing-game,spotify/testing-game | other/testing-game.py | other/testing-game.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import subprocess
import re
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='The directory to search for files in', required=False, default=os.getcwd())
args = parser.parse_args()
names = {}
for root, dirs, files in os.walk(args.directory):
for name in files:
filename, fileextension = os.path.splitext(name)
absfile = os.path.join(root, name)
if fileextension == '.m' or fileextension == '.mm':
try:
with open(absfile) as sourcefile:
source = sourcefile.read()
if source.find('XCTestCase') != -1:
p = subprocess.Popen(['git', 'blame', absfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
for blame_line in out.splitlines():
if blame_line.replace(' ', '').find('-(void)test') != -1:
blame_info = blame_line[blame_line.find('(')+1:]
blame_info = blame_info[:blame_info.find(')')]
blame_components = blame_info.split()
name_components = blame_components[:len(blame_components)-4]
name = ' '.join(name_components)
name_count = names.get(name, 0)
names[name] = name_count + 1
except:
'Could not open file: ' + absfile
print names | apache-2.0 | Python |
|
908013aa5e64589b6c1c6495812a13109244a69a | add dottests testconfig, but leave it deactivted yet. lots of import failures, since no imports are declared explicitly | nylas/icalendar,geier/icalendar,untitaker/icalendar | src/icalendar/tests/XXX_test_doctests.py | src/icalendar/tests/XXX_test_doctests.py | from interlude import interact
import doctest
import os.path
import unittest
OPTIONFLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
DOCFILES = [
'example.txt',
'groupscheduled.txt',
'multiple.txt',
'recurrence.txt',
'small.txt'
]
DOCMODS = [
'icalendar.caselessdict',
'icalendar.cal',
'icalendar.parser',
'icalendar.prop',
]
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
doctest.DocFileSuite(
os.path.join(os.path.dirname(__file__), docfile),
module_relative=False,
optionflags=OPTIONFLAGS,
globs={'interact': interact}
) for docfile in DOCFILES
])
suite.addTests([
doctest.DocTestSuite(
docmod,
optionflags=OPTIONFLAGS,
globs={'interact': interact}
) for docmod in DOCMODS
])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| bsd-2-clause | Python |
|
4a99dcd629a830ad1ec0c658f312a4793dec240b | add basic file for parser | SamuraiT/RedBlue,SamuraiT/RedBlue | RedBlue/Parser3.py | RedBlue/Parser3.py |
class Parser(object):
@classmethod
def read_html(cls, html):
pass
| bsd-3-clause | Python |
|
0648ca26ba195e4d5ce55d801975a161907e655f | Add test for translation | czpython/aldryn-faq,czpython/aldryn-faq,czpython/aldryn-faq,czpython/aldryn-faq | aldryn_faq/tests/test_aldryn_faq.py | aldryn_faq/tests/test_aldryn_faq.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase # , TransactionTestCase
# from django.utils import translation
from hvad.test_utils.context_managers import LanguageOverride
from aldryn_faq.models import Category, Question
EN_CAT_NAME = "Example"
EN_CAT_SLUG = "example"
EN_QUE_TITLE = "Test Question"
EN_QUE_ANSWER_TEXT = "Test Answer"
DE_CAT_NAME = "Beispiel"
DE_CAT_SLUG = "beispiel"
DE_QUE_TITLE = "Testfrage"
DE_QUE_ANSWER_TEXT = "Test Antwort"
class AldrynFaqTestMixin(object):
@staticmethod
def reload(object):
"""Simple convenience method for re-fetching an object from the ORM."""
return object.__class__.objects.get(id=object.id)
def mktranslation(self, obj, lang, **kwargs):
"""Simple method of adding a translation to an existing object."""
obj.translate(lang)
for k, v in kwargs.iteritems():
setattr(obj, k, v)
obj.save()
def setUp(self):
"""Setup a prebuilt and translated Question with Category
for testing."""
with LanguageOverride("en"):
self.category = Category(**{
"name": EN_CAT_NAME,
"slug": EN_CAT_SLUG
})
self.category.save()
self.question = Question(**{
"title": EN_QUE_TITLE,
"answer_text": EN_QUE_ANSWER_TEXT,
})
self.question.category = self.category
self.question.save()
# Make a DE translation of the category
self.mktranslation(self.category, "de", **{
"name": DE_CAT_NAME,
"slug": DE_CAT_SLUG,
})
# Make a DE translation of the question
self.mktranslation(self.question, "de", **{
"title": DE_QUE_TITLE,
"answer_text": DE_QUE_ANSWER_TEXT,
})
class TestFAQTranslations(AldrynFaqTestMixin, TestCase):
def test_fetch_faq_translations(self):
"""Test we can fetch arbitrary translations of the question and
its category."""
# Can we target the EN values?
with LanguageOverride("en"):
question = self.reload(self.question)
category = self.reload(self.question.category)
self.assertEqual(question.title, EN_QUE_TITLE)
self.assertEqual(question.answer_text, EN_QUE_ANSWER_TEXT)
self.assertEqual(category.name, EN_CAT_NAME)
self.assertEqual(category.slug, EN_CAT_SLUG)
# And the DE values?
with LanguageOverride("de"):
question = self.reload(self.question)
category = self.reload(self.question.category)
self.assertEqual(question.title, DE_QUE_TITLE)
self.assertEqual(question.answer_text, DE_QUE_ANSWER_TEXT)
self.assertEqual(category.name, DE_CAT_NAME)
self.assertEqual(category.slug, DE_CAT_SLUG)
| bsd-3-clause | Python |
|
d1f71e1c6468799247d07d810a6db7d0ad5f89b0 | add support for jinja2 template engine | PythonZone/PyAlaOCL,megaplanet/PyAlaOCL | alaocl/jinja2.py | alaocl/jinja2.py | from alaocl import *
#__all__ = (
# 'addOCLtoEnvironment',
#)
_FILTERS = {
'asSet': asSet,
'asBag': asBag,
'asSeq': asSeq,
}
_GLOBALS = {
'floor': floor,
'isUndefined': isUndefined,
'oclIsUndefined': oclIsUndefined,
'oclIsKindOf': oclIsKindOf,
'oclIsTypeOf': oclIsTypeOf,
'isCollection': isCollection,
'asSet': asSet,
'asBag': asBag,
'asSeq': emptyCollection
}
try:
# noinspection PyUnresolvedReferences
from org.modelio.api.modelio import Modelio
WITH_MODELIO = True
except:
WITH_MODELIO = False
if WITH_MODELIO:
# TODO: in fact, this piece of code should be in modelio
# and it should be possible to import global stuff at once
# - at the top level script
# - as jinja global
# - in any python module
# Lambda expressions cannot be defined directly in the loop. See below:
# http://stackoverflow.com/questions/841555/
# whats-going-on-with-the-lambda-expression-in-this-python-function?rq=1
def _newIsInstanceFun(metaInterface):
return lambda e: isinstance(e, metaInterface)
from alaocl.modelio import allMetaInterfaces
for m_interface in allMetaInterfaces():
metaName = m_interface.metaName
_GLOBALS[metaName] = m_interface
isFunction = _newIsInstanceFun(m_interface)
_GLOBALS['is' + metaName] = isFunction
globals()['is' + metaName] = isFunction
def addOCLtoEnvironment(jinja2Environment):
"""
Add OCL functions to a jinja2 environment so that OCL can be
used in jinja2 templates.
:param jinja2Environment: Jinja2 environment to be instrumented.
:type jinja2Environment: jinja2.Environment
:return: The modified environment.
:rtype: jinja2.Environment
"""
jinja2Environment.filters.update(_FILTERS)
jinja2Environment.globals.update(_GLOBALS)
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.