text
stringlengths
4
1.02M
meta
dict
""" During World War I, the German army used a very clever pen and paper cipher called the ADFGVX cipher [http://en.wikipedia.org/wiki/ADFGVX_cipher], and your task today is to implement functions to both encrypt and decrypt messages using this cipher. What follows is a rather lengthy description of how it works (you can also find a description in that wikipedia link), but in essence it is actually quite simple. Here is how it works: The cleartext (the message that is to be encrypted) could consist of characters selected from an alphabet of 36 characters. For the purposes of today's problem, that alphabet will be: "ABCDEFGHIKLMNOPQRSTUVWXYZ0123456789 " That is, it will be the regular uppercase alphabet except for the letter J (if there's a J in the cleartext, replace it with an I), ten numbers, and a space character. That makes 25 + 10 + 1 = 36 different characters. The ciphertext will consist of only 6 different characters, namely the characters "ADFGVX". Supposedly these were selected because they are quite unlike each other in morse code, lessening the risk for errors in transmission. The key for the encryption and decryption consists of two parts: first one scrambled version of the cleartext alphabet (i.e. some permutation of "ABCDEFGHIKLMNOPQRSTUVWXYZ0123456789 "), called the substitution key. Second we need a transposition key which is a just a codeword of some sort. Lets illustrate the encryption of the cleartext "Brake me out of jail on the 21st." using the substitution key "R3FLMX7KWQ69D4Y5NOZ STV2EH8AP1ICBGU0" and the transposition key "PROGRAMMER" Encryption proceeds as follows: we begin by putting the cleartext in A suitable format, so that it only contains characters from the alphabet. Our cleartext then becomes "BRAKE ME OUT OF IAIL ON THE 21ST". As you can see, all characters have been put into uppercase, the "J" have been replaced by an "I", and all characters not in the alphabet (in this example, only the period ".") have been removed. Next we put the substitution key into a 6x6 table with the cipher chars "ADFGVX" as row and column headers, like this: A D F G V X +------------ A | R 3 F L M X D | 7 K W Q 6 9 F | D 4 Y 5 N O G | Z S T V 2 V | E H 8 A P 1 X | I C B G U 0 Each letter of the cleartext now gets replaced by two letters representing the row and column of the character in this square. So for instance, 'A' becomes 'VG' (because it's in the V row and the G column), 'B' becomes 'XF', 'C' becomes 'XD', etc. This is called "fractioning" the text. If we convert our cleartext using this method it becomes: B R A K E M E O U T O F XF AA VG DD VA GD AV VA GD FX XV GG GD FX AF GD I A I L O N T H E 2 1 S T XA VG XA AG GD FX FV GD GG VD VA GD GX VX GF GG Note that the space character is encoded as GD. Next, this fractioned text is put into a table with the transposition key as headers, as follows: P R O G R A M M E R ------------------- X F A A V G D D V A G D A V V A G D F X X V G G G D F X A F G D X A V G X A A G G D F X F V G D G G V D V A G D G X V X G F G G F G F A D F The last row didn't quite fit (it was six letters short), so we add in some random characters, in this case "FGFADF", to fill it out. Now the columns are sorted in alphabetical order of the header characters: A E G M M O P R R R ------------------- G V A D D A X F V A A F V G D A G D V X D A G F X G X V G F G A A X A X G D V G V G X G D F G D F G D V A G X V V D G X G D G F A G G F F F As you can see, the sorting is "stable", i.e. when there are two or more characters are identical in the transposition key, they keep the original order they had. So in this example, there are three R's and two M's, and they are in the same order relative to each other both before and after the transposition. Now, finally, we simply read off the table column by column to get our ciphertext. This is the final result: GADGVDGVFAAGVDAVGAXAGDGFXGGFDDXADXAAAGXFVGXGXGGVGFDVDDDFVVGVFGFAXFGGXF To decrypt, reverse the operations described here. EDIT: so, I just realized that I misspelled "Break out of jail" as "Brake out of jail", but it would be too much work to fix it now :) I apologize for the silly mistake. Then again, hardened criminals aren't known for being great spellers! """ import numpy as np from collections import defaultdict def encrypt_adfgvx(cleartext, subs_key, trans_key): text = adfgvx_formatter(cleartext, subs_key) subs_arr = subs_array(subs_key) code = adfgvx_encode(text, subs_arr) code = reshape_encode(code, trans_key) code = reorder_encode(code, trans_key) return code def subs_array(subs_key): subs_arr = np.array(list(subs_key)) subs_arr.resize(6, 6) return subs_arr def adfgvx_formatter(cleartext, subs_key): cleartext = cleartext.upper().replace('J', 'I') key_set = set(subs_key) return ''.join([t for t in cleartext if t in key_set]) def adfgvx_encode(text, subs_arr): code_dict = 'ADFGVX' enc = [] for t in text: index = np.asarray(np.where(subs_arr == t)).T[0] enc.append(code_dict[index[0]]) enc.append(code_dict[index[1]]) return enc def reshape_encode(enc, trans_key): code_dict = list('ADFGVX') length = len(trans_key) diff = length - (len(enc) % length) enc.extend(np.random.choice(code_dict, diff)) return np.array(enc).reshape(-1, length) def reorder_encode(enc, trans_key): trans_list = [(k, n) for n, k in enumerate(trans_key)] sorted_list = sorted(trans_list, key=lambda x: x[0]) res = enc[:, [s[1] for s in sorted_list]] return ''.join(res.T.reshape(1, -1)[0]) def decrypt_adfgvx(code, subs_key, trans_key): subs_arr = subs_array(subs_key) answer = reorder_decode(code, trans_key) answer = ''.join(answer.reshape(1, -1)[0]) answer = adfgvx_decode(answer, subs_arr) return answer def reorder_decode(code, trans_key): code = np.array(list(code)) code = code.reshape(len(trans_key), -1) code = code.T trans_list = [(k, n) for n, k in enumerate(trans_key)] sorted_list = sorted(trans_list, key=lambda x: x[0]) sorter = [s[1] for s in sorted_list] sorter = [n for n, s in sorted(enumerate(sorter), key=lambda x: x[1])] return code[:, sorter] def adfgvx_decode(code, subs_arr): code_dict = 'ADFGVX' res = '' for s in range(0, len(code), 2): res += subs_arr[code_dict.index(code[s]), code_dict.index(code[s+1])] return res def generate_substitution_key(): subs_key = list("ABCDEFGHIKLMNOPQRSTUVWXYZ0123456789 ") np.random.shuffle(subs_key) return ''.join(subs_key) def main(): cleartext = "Brake me out of jail on the 21st." # subs_key = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ") # np.random.shuffle(subs_key) # subs_key = ''.join(subs_key) subs_key = generate_substitution_key() print(subs_key) trans_key = "PROGRAMMER" code = encrypt_adfgvx(cleartext, subs_key, trans_key) answer = decrypt_adfgvx(code, subs_key, trans_key) print(cleartext) print(code) print(answer) if __name__ == "__main__": main()
{ "content_hash": "baa04c1afe717f0fc609bc896652a204", "timestamp": "", "source": "github", "line_count": 198, "max_line_length": 120, "avg_line_length": 36.07575757575758, "alnum_prop": 0.6911661766764665, "repo_name": "DayGitH/Python-Challenges", "id": "2e17582ddcc755e29d3a6c53236f0b0df4aefcfa", "size": "7143", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DailyProgrammer/DP20120626B.py", "mode": "33188", "license": "mit", "language": [ { "name": "OpenEdge ABL", "bytes": "5002" }, { "name": "Python", "bytes": "2471582" } ], "symlink_target": "" }
import os import sys import setuptools from quantumclient.openstack.common import setup Name = 'python-quantumclient' Url = "https://launchpad.net/quantum" Version = setup.get_post_version('quantumclient') License = 'Apache License 2.0' Author = 'OpenStack Quantum Project' AuthorEmail = 'openstack-dev@lists.launchpad.net' Maintainer = '' Summary = 'CLI and python client library for OpenStack Quantum' ShortDescription = Summary Description = Summary dependency_links = setup.parse_dependency_links() tests_require = setup.parse_requirements(['tools/test-requires']) EagerResources = [ ] ProjectScripts = [ ] PackageData = { } setuptools.setup( name=Name, version=Version, url=Url, author=Author, author_email=AuthorEmail, description=ShortDescription, long_description=Description, license=License, classifiers=[ 'Environment :: OpenStack', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ], scripts=ProjectScripts, dependency_links=dependency_links, install_requires=setup.parse_requirements(), tests_require=tests_require, cmdclass=setup.get_cmdclass(), include_package_data=False, packages=setuptools.find_packages('.'), package_data=PackageData, eager_resources=EagerResources, entry_points={ 'console_scripts': [ 'quantum = quantumclient.shell:main', ] }, )
{ "content_hash": "64c1df086f52ce1e3b7b6118a926a56a", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 65, "avg_line_length": 26.40625, "alnum_prop": 0.6869822485207101, "repo_name": "redhat-openstack/python-neutronclient", "id": "c3d851cbd7ec31b643056f4890ad1aa542fbdc92", "size": "2365", "binary": false, "copies": "1", "ref": "refs/heads/stable/folsom", "path": "setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "248243" }, { "name": "Shell", "bytes": "5282" } ], "symlink_target": "" }
from unittest import TestCase import h5py import numpy as np import pandas as pd from exatomic import Universe from exatomic.base import resource from exatomic.molcas.output import Output, Orb, HDF # TODO : change df.shape[0] == num to len(df.index) == num everywhere class TestOutput(TestCase): """Test the Molcas output file editor.""" def setUp(self): self.cdz = Output(resource('mol-carbon-dz.out')) self.uo2sp = Output(resource('mol-uo2-anomb.out')) self.mamcart = Output(resource('mol-ch3nh2-631g.out')) self.mamsphr = Output(resource('mol-ch3nh2-anovdzp.out')) self.c2h6 = Output(resource('mol-c2h6-basis.out')) def test_add_orb(self): """Test adding orbital file functionality.""" self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb')) self.assertTrue(hasattr(self.mamcart, 'momatrix')) self.assertTrue(hasattr(self.mamcart, 'orbital')) with self.assertRaises(ValueError): self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb')) self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'), mocoefs='same') self.assertTrue('same' in self.mamcart.momatrix.columns) self.assertTrue('same' in self.mamcart.orbital.columns) self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'), mocoefs='diff', orbocc='diffocc') self.assertTrue('diff' in self.mamcart.momatrix.columns) self.assertTrue('diffocc' in self.mamcart.orbital.columns) uni = self.mamcart.to_universe() self.assertTrue(hasattr(uni, 'momatrix')) self.assertTrue(hasattr(uni, 'orbital')) def test_add_overlap(self): """Test adding an overlap matrix.""" self.cdz.add_overlap(resource('mol-carbon-dz.overlap')) self.assertTrue(hasattr(self.cdz, 'overlap')) uni = self.cdz.to_universe() self.assertTrue(hasattr(uni, 'overlap')) def test_parse_atom(self): """Test the atom table parser.""" self.uo2sp.parse_atom() self.assertEqual(self.uo2sp.atom.shape[0], 3) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.atom)))) self.mamcart.parse_atom() self.assertEqual(self.mamcart.atom.shape[0], 7) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.atom)))) self.mamsphr.parse_atom() self.assertEqual(self.mamsphr.atom.shape[0], 7) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.atom)))) def test_parse_basis_set_order(self): """Test the basis set order table parser.""" self.uo2sp.parse_basis_set_order() self.assertEqual(self.uo2sp.basis_set_order.shape[0], 69) cols = list(set(self.uo2sp.basis_set_order._columns)) test = pd.DataFrame(self.uo2sp.basis_set_order[cols]) self.assertTrue(np.all(pd.notnull(test))) self.mamcart.parse_basis_set_order() self.assertEqual(self.mamcart.basis_set_order.shape[0], 28) cols = list(set(self.mamcart.basis_set_order._columns)) test = pd.DataFrame(self.mamcart.basis_set_order[cols]) self.assertTrue(np.all(pd.notnull(test))) self.mamsphr.parse_basis_set_order() self.assertEqual(self.mamsphr.basis_set_order.shape[0], 53) cols = list(set(self.mamsphr.basis_set_order._columns)) test = pd.DataFrame(self.mamsphr.basis_set_order[cols]) self.assertTrue(np.all(pd.notnull(test))) def test_parse_basis_set(self): """Test the gaussian basis set table parser.""" self.uo2sp.parse_basis_set() self.assertEqual(self.uo2sp.basis_set.shape[0], 451) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.basis_set)))) self.mamcart.parse_basis_set() self.assertEqual(self.mamcart.basis_set.shape[0], 84) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.basis_set)))) self.mamsphr.parse_basis_set() self.assertEqual(self.mamsphr.basis_set.shape[0], 148) self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.basis_set)))) self.c2h6.parse_basis_set() self.assertTrue(hasattr(self.c2h6, 'basis_set')) def test_to_universe(self): """Test that the Outputs can be converted to universes.""" uni = self.uo2sp.to_universe() self.assertIs(type(uni), Universe) uni = self.mamcart.to_universe() self.assertIs(type(uni), Universe) uni = self.mamsphr.to_universe() self.assertIs(type(uni), Universe) class TestOrb(TestCase): """Test the Molcas Orb file parser.""" def test_parse_old_uhf(self): sym = Orb(resource('mol-c2h6-old-sym.uhforb')) nym = Orb(resource('mol-c2h6-old-nosym.uhforb')) sym.parse_momatrix() nym.parse_momatrix() self.assertTrue(sym.momatrix.shape[0] == 274) self.assertTrue(nym.momatrix.shape[0] == 900) def test_parse_old_orb(self): sym = Orb(resource('mol-c2h6-old-sym.scforb')) nym = Orb(resource('mol-c2h6-old-nosym.scforb')) sym.parse_momatrix() nym.parse_momatrix() self.assertTrue(sym.momatrix.shape[0] == 274) self.assertTrue(nym.momatrix.shape[0] == 900) def test_parse_uhf(self): sym = Orb(resource('mol-c2h6-sym.uhforb')) nym = Orb(resource('mol-c2h6-nosym.uhforb')) sym.parse_momatrix() nym.parse_momatrix() self.assertTrue(sym.momatrix.shape[0] == 274) self.assertTrue(nym.momatrix.shape[0] == 900) def test_parse_orb(self): sym = Orb(resource('mol-c2h6-sym.scforb')) nym = Orb(resource('mol-c2h6-nosym.scforb')) sym.parse_momatrix() nym.parse_momatrix() self.assertTrue(sym.momatrix.shape[0] == 274) self.assertTrue(nym.momatrix.shape[0] == 900) def test_parse_momatrix(self): """Test the momatrix table parser.""" uo2sp = Orb(resource('mol-uo2-anomb.scforb')) uo2sp.parse_momatrix() self.assertEqual(uo2sp.momatrix.shape[0], 4761) self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.momatrix)))) self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.orbital)))) mamcart = Orb(resource('mol-ch3nh2-631g.scforb')) mamcart.parse_momatrix() self.assertEqual(mamcart.momatrix.shape[0], 784) self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.momatrix)))) self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.orbital)))) mamsphr = Orb(resource('mol-ch3nh2-anovdzp.scforb')) mamsphr.parse_momatrix() self.assertEqual(mamsphr.momatrix.shape[0], 2809) self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamsphr.momatrix)))) self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamsphr.orbital)))) class TestHDF(TestCase): def setUp(self): self.nym = HDF(resource('mol-c2h6-nosym-scf.hdf5')) self.sym = HDF(resource('mol-c2h6-sym-scf.hdf5')) def test_parse_atom(self): self.sym.parse_atom() self.nym.parse_atom() self.assertTrue(self.sym.atom.shape[0] == 8) self.assertTrue(self.nym.atom.shape[0] == 8) def test_parse_basis_set_order(self): self.sym.parse_basis_set_order() self.nym.parse_basis_set_order() self.assertTrue(self.sym.basis_set_order.shape[0] == 30) self.assertTrue(self.nym.basis_set_order.shape[0] == 30) def test_parse_orbital(self): self.sym.parse_orbital() self.nym.parse_orbital() self.assertTrue(self.sym.orbital.shape[0] == 30) self.assertTrue(self.nym.orbital.shape[0] == 30) def test_parse_overlap(self): self.sym.parse_overlap() self.nym.parse_overlap() self.assertTrue(self.sym.overlap.shape[0]) self.assertTrue(self.nym.overlap.shape[0]) def test_parse_momatrix(self): self.sym.parse_momatrix() self.nym.parse_momatrix() self.assertTrue(self.nym.momatrix.shape[0] == 900) with self.assertRaises(AttributeError): self.assertTrue(self.sym.momatrix) def test_to_universe(self): self.sym.to_universe() self.nym.to_universe()
{ "content_hash": "41170424a1063976edf4f1c9d3488f29", "timestamp": "", "source": "github", "line_count": 196, "max_line_length": 81, "avg_line_length": 42.05612244897959, "alnum_prop": 0.6418779570544705, "repo_name": "exa-analytics/atomic", "id": "978e5521a6a019d97f3dfa4a885679b36d250c36", "size": "8381", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "exatomic/molcas/tests/test_output.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "76185" }, { "name": "Python", "bytes": "82565" } ], "symlink_target": "" }
""" Django settings for {{cookiecutter.project_slug}} project. Generated by 'django-admin startproject' using Django 1.10.5. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os from .celery import app # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '4$8s8xl^ka1ukqbm)w^vy1%lqr4xhsnitvl4jo%z8#kfb35cal' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["127.0.0.1", "localhost"] # Send an email when some view fails ADMINS = [ ('{{cookiecutter.full_name}}', '{{cookiecutter.email}}'), ] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'whitenoise.runserver_nostatic', 'django.contrib.staticfiles', # Vendor apps 'crispy_forms', 'registration', 'debug_toolbar', 'djcelery', # https://github.com/mikeumus/django-celery-example # My apps '{{cookiecutter.app_name}}', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] ROOT_URLCONF = '{{cookiecutter.project_slug}}.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, "templates")], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = '{{cookiecutter.project_slug}}.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'es-mx' TIME_ZONE = 'America/Mexico_City' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, "static_production") MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, "media_production") STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] # Registration redux # https://django-registration-redux.readthedocs.io/en/latest/quickstart.html ACCOUNT_ACTIVATION_DAYS = 7 REGISTRATION_AUTO_LOGIN = True SITE_ID = 1 LOGIN_REDIRECT_URL = '/' # EMAIL # https://accounts.google.com/displayunlockcaptcha EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'tu_email@gmail.com' EMAIL_HOST_PASSWORD = 'tupassword' EMAIL_PORT = 587 EMAIL_USE_TLS = True # Crispy forms # http://django-crispy-forms.readthedocs.io/en/latest/ CRISPY_TEMPLATE_PACK = 'bootstrap3' # Debug toolbar INTERNAL_IPS = ['127.0.0.1'] app.conf.update( CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend', ) CELERYBEAT_SCHEDULER='djcelery.schedulers.DatabaseScheduler' # Rest Framework REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ] } # Whitenoise # http://whitenoise.evans.io/en/stable/django.html STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{ "content_hash": "27a4936880275ce84dbdd09afe5b6fff", "timestamp": "", "source": "github", "line_count": 191, "max_line_length": 91, "avg_line_length": 26.54973821989529, "alnum_prop": 0.7032143561427726, "repo_name": "oca159/cookiecutter-django", "id": "ae9e55594c4dfa09aab178b5cd097be24ca8efba", "size": "5071", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "30" }, { "name": "HTML", "bytes": "7776" }, { "name": "JavaScript", "bytes": "484" }, { "name": "Python", "bytes": "9562" }, { "name": "Shell", "bytes": "304" } ], "symlink_target": "" }
sitesettings = call(settings_manager .get_whitelabel_settings_by_site_id(myuser.site_id))
{ "content_hash": "a4b62f506b315681f018c9113536f242", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 56, "avg_line_length": 46.5, "alnum_prop": 0.7741935483870968, "repo_name": "romankagan/DDBWorkbench", "id": "c4f7e7edfeefb56210dbf6440cc10ae6243d26b0", "size": "93", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/testData/formatter/wrapOnDot_after.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "AspectJ", "bytes": "182" }, { "name": "C", "bytes": "174330" }, { "name": "C#", "bytes": "390" }, { "name": "C++", "bytes": "85270" }, { "name": "CSS", "bytes": "102018" }, { "name": "Erlang", "bytes": "10" }, { "name": "FLUX", "bytes": "57" }, { "name": "Groovy", "bytes": "1899897" }, { "name": "J", "bytes": "5050" }, { "name": "Java", "bytes": "128604770" }, { "name": "JavaScript", "bytes": "123045" }, { "name": "Objective-C", "bytes": "19702" }, { "name": "Perl", "bytes": "6549" }, { "name": "Python", "bytes": "17759911" }, { "name": "Ruby", "bytes": "1213" }, { "name": "Shell", "bytes": "45691" }, { "name": "TeX", "bytes": "60798" }, { "name": "XSLT", "bytes": "113531" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('poll', '0008_option_requested_by'), ] operations = [ migrations.AlterField( model_name='vote', name='email', field=models.EmailField(blank=True, default='', max_length=254), ), ]
{ "content_hash": "8c5e5a11d56c390fc9c7d4d1244fd3e3", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 76, "avg_line_length": 22.375, "alnum_prop": 0.5726256983240223, "repo_name": "DrDos0016/z2", "id": "e1a4c5663120fff647bed6f6b6cd3a8f45102759", "size": "407", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "poll/migrations/0009_alter_vote_email.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "46276" }, { "name": "HTML", "bytes": "198985" }, { "name": "JavaScript", "bytes": "120902" }, { "name": "Python", "bytes": "297554" } ], "symlink_target": "" }
import numpy import cPickle import theano import theano.tensor as T from mlp.logistic_sgd import LogisticRegression from dA.AutoEncoder import AutoEncoder from SdA import SdA from numpy.linalg import norm from theano.tensor.shared_randomstreams import RandomStreams from extract_datasets import extract_unlabeled_chunkrange from load_shared import load_data_unlabeled from tables import openFile import os import sys import time from datetime import datetime from optparse import OptionParser def test_pickle_SdA(num_epochs=10, pretrain_lr=0.00001, lr_decay = 0.98, batch_size=20): """ Pretrain an SdA model for the given number of training epochs. The model is either initialized from scratch, or is reconstructed from a previously pickled model. :type num_epochs: int :param num_epochs: number of epoch to do pretraining :type pretrain_lr: float :param pretrain_lr: learning rate to be used during pre-training :type batch_size: int :param batch_size: train in mini-batches of this size """ layer_types=['Gaussian','Bernoulli'] current_dir = os.getcwd() os.chdir(options.dir) today = datetime.today() day = str(today.date()) hour = str(today.time()) output_filename = "test_pickle_sda_." + '_'.join([elem for elem in layer_types]) + day + "." + hour output_file = open(output_filename,'w') os.chdir(current_dir) print >> output_file, "Run on " + str(datetime.now()) # Get the training data sample from the input file data_set_file = openFile(str(options.inputfile), mode = 'r') datafiles = extract_unlabeled_chunkrange(data_set_file, num_files = 10) train_set_x = load_data_unlabeled(datafiles, features = (5,20)) data_set_file.close() # compute number of minibatches for training, validation and testing n_train_batches, n_features = train_set_x.get_value(borrow=True).shape n_train_batches /= batch_size # numpy random generator numpy_rng = numpy.random.RandomState(89677) print '... building the model' # Set the initial value of the learning rate learning_rate = theano.shared(numpy.asarray(pretrain_lr, dtype=theano.config.floatX)) # Function to decrease the learning rate decay_learning_rate = theano.function(inputs=[], outputs=learning_rate, updates={learning_rate: learning_rate * lr_decay}) sda_model = SdA(numpy_rng=numpy_rng, n_ins=n_features, hidden_layers_sizes=[5, 5], corruption_levels = [0.25, 0.25], layer_types=layer_types) ######################### # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = sda_model.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size, learning_rate=learning_rate) #print '... dumping pretraining functions to output file pre pickling' #print >> output_file, 'Pretraining functions, pre pickling' #for i in xrange(sda.n_layers): #theano.printing.debugprint(pretraining_fns[i], file = output_file, print_type=True) print '... pre-training the model' start_time = time.clock() ## Pre-train layer-wise corruption_levels = [float(options.corruption), float(options.corruption)] for i in xrange(sda_model.n_layers): for epoch in xrange(num_epochs): # go through the training set c = [] for batch_index in xrange(n_train_batches): c.append(pretraining_fns[i](index=batch_index, corruption=corruption_levels[i])) print >> output_file, 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print >> output_file, numpy.mean(c) print >> output_file, 'Learning rate ' print >> output_file, learning_rate.get_value(borrow=True) decay_learning_rate() end_time = time.clock() print >> output_file, ('Pretraining time for file ' + os.path.split(__file__)[1] + ' was %.2fm to go through %i epochs' % (((end_time - start_time) / 60.), (num_epochs / 2))) # Pickle the SdA print >> output_file, 'Pickling the model...' f = file(options.savefile, 'wb') cPickle.dump(sda_model, f, protocol=cPickle.HIGHEST_PROTOCOL) f.close() # Unpickle the SdA print >> output_file, 'Unpickling the model...' f = file(options.savefile, 'rb') pickled_sda = cPickle.load(f) f.close() # Test that the W-matrices and biases for the dA layers in sda are all close to the W-matrices # and biases freshly unpickled for i in xrange(pickled_sda.n_layers): pickled_dA_params = pickled_sda.dA_layers[i].get_params() fresh_dA_params = sda_model.dA_layers[i].get_params() if not numpy.allclose(pickled_dA_params[0].get_value(), fresh_dA_params[0].get_value()): print >> output_file, ("numpy says that Ws in layer %i are not close" % (i)) print >> output_file, "Norm for pickled dA " + pickled_dA_params[0].name + ": " print >> output_file, norm(pickled_dA_params[0].get_value()) print >> output_file, "Values for pickled dA " + pickled_dA_params[0].name + ": " print >> output_file, numpy.array_repr(pickled_dA_params[0].get_value()) print >> output_file, "Norm for fresh dA " + fresh_dA_params[0].name + ": " print >> output_file, norm(fresh_dA_params[0].get_value()) print >> output_file, "Values for fresh dA " + fresh_dA_params[0].name + ": " print >> output_file, numpy.array_repr(fresh_dA_params[0].get_value()) if not numpy.allclose(pickled_dA_params[1].get_value(), fresh_dA_params[1].get_value()): print >> output_file, ("numpy says that the biases in layer %i are not close" % (i)) print >> output_file, "Norm for pickled dA " + pickled_dA_params[1].name + ": " print >> output_file, norm(pickled_dA_params[1].get_value()) print >> output_file, "Values for pickled dA " + pickled_dA_params[1].name + ": " print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value()) print >> output_file, "Norm for fresh dA " + fresh_dA_params[1].name + ": " print >> output_file, norm(fresh_dA_params[1].get_value()) print >> output_file, "Values for fresh dA " + pickled_dA_params[1].name + ": " print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value()) output_file.close() def test_unpickle_SdA(num_epochs=10, pretrain_lr=0.001, batch_size=10, lr_decay = 0.98): """ Unpickle an SdA from file, continue pre-training for a given number of epochs. :type num_epochs: int :param num_epochs: number of epoch to do pretraining :type pretrain_lr: float :param pretrain_lr: learning rate to be used during pre-training :type batch_size: int :param batch_size: train in mini-batches of this size :type lr_decay: float :param lr_decay: decay the learning rate by this proportion each epoch """ current_dir = os.getcwd() os.chdir(options.dir) today = datetime.today() day = str(today.date()) hour = str(today.time()) output_filename = "test_unpickle_sda_pretrain." + day + "." + hour output_file = open(output_filename,'w') os.chdir(current_dir) print >> output_file, "Run on " + str(datetime.now()) # Get the training data sample from the input file data_set_file = openFile(str(options.inputfile), mode = 'r') datafiles = extract_unlabeled_chunkrange(data_set_file, num_files = 10) train_set_x = load_data_unlabeled(datafiles, features = (5,20)) data_set_file.close() # compute number of minibatches for training, validation and testing n_train_batches, n_features = train_set_x.get_value(borrow=True).shape n_train_batches /= batch_size learning_rate = theano.shared(numpy.asarray(pretrain_lr, dtype=theano.config.floatX)) # Function to decrease the learning rate decay_learning_rate = theano.function(inputs=[], outputs=learning_rate, updates={learning_rate: learning_rate * lr_decay}) # Unpickle the SdA print >> output_file, 'Unpickling the model...' f = file(options.savefile, 'rb') pickled_sda = cPickle.load(f) f.close() # Train for the remaining pretraining_fns = pickled_sda.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size, learning_rate=learning_rate) print >> output_file, 'Resume training...' start_time = time.clock() ## Pre-train layer-wise ## corruption_levels = [float(options.corruption), float(options.corruption)] for i in xrange(pickled_sda.n_layers): for epoch in xrange(num_epochs): # go through the training set c = [] for batch_index in xrange(n_train_batches): c.append(pretraining_fns[i](index=batch_index, corruption=corruption_levels[i])) print >> output_file, 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print >> output_file, numpy.mean(c) decay_learning_rate() print >> output_file, 'Learning rate ' print >> output_file, learning_rate.get_value(borrow=True) end_time = time.clock() print >> output_file, ('Pretraining time for file ' + os.path.split(__file__)[1] + ' was %.2fm to go through the remaining %i epochs' % (((end_time - start_time) / 60.), (num_epochs / 2))) output_file.close() if __name__ == '__main__': parser = OptionParser() parser.add_option("-d", "--dir", dest="dir", help="test output directory") parser.add_option("-s","--savefile",dest = "savefile", help = "Save the model to this pickle file") parser.add_option("-r","--restorefile",dest = "restorefile", help = "Restore the model from this pickle file") parser.add_option("-i", "--inputfile", dest="inputfile", help="the data (hdf5 file) prepended with an absolute path") parser.add_option("-c", "--corruption", dest="corruption", help="use this amount of corruption for the dA s") (options, args) = parser.parse_args() test_pickle_SdA() test_unpickle_SdA()
{ "content_hash": "332af1f95dca818b02c67ad25c07a113", "timestamp": "", "source": "github", "line_count": 254, "max_line_length": 135, "avg_line_length": 43.06299212598425, "alnum_prop": 0.601115377582739, "repo_name": "lzamparo/SdA_reduce", "id": "717f7440074be278de795e8332a4def61faf56ef", "size": "10938", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "theano_models/SdA/test_pickle_SdA.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "548284" }, { "name": "R", "bytes": "27371" }, { "name": "Shell", "bytes": "36047" } ], "symlink_target": "" }
""" Words/Ladder Graph ------------------ Generate an undirected graph over the 5757 5-letter words in the datafile words_dat.txt.gz. Two words are connected by an edge if they differ in one letter, resulting in 14,135 edges. This example is described in Section 1.1 in Knuth's book [1]_,[2]_. References ---------- .. [1] Donald E. Knuth, "The Stanford GraphBase: A Platform for Combinatorial Computing", ACM Press, New York, 1993. .. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html """ # Authors: Aric Hagberg (hagberg@lanl.gov), # Brendt Wohlberg, # hughdbrown@yahoo.com # Copyright (C) 2004-2016 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx #------------------------------------------------------------------- # The Words/Ladder graph of Section 1.1 #------------------------------------------------------------------- def generate_graph(words): from string import ascii_lowercase as lowercase G = nx.Graph(name="words") lookup = dict((c,lowercase.index(c)) for c in lowercase) def edit_distance_one(word): for i in range(len(word)): left, c, right = word[0:i], word[i], word[i+1:] j = lookup[c] # lowercase.index(c) for cc in lowercase[j+1:]: yield left + cc + right candgen = ((word, cand) for word in sorted(words) for cand in edit_distance_one(word) if cand in words) G.add_nodes_from(words) for word, cand in candgen: G.add_edge(word, cand) return G def words_graph(): """Return the words example graph from the Stanford GraphBase""" import gzip fh=gzip.open('words_dat.txt.gz','r') words=set() for line in fh.readlines(): line = line.decode() if line.startswith('*'): continue w=str(line[0:5]) words.add(w) return generate_graph(words) if __name__ == '__main__': from networkx import * G=words_graph() print("Loaded words_dat.txt containing 5757 five-letter English words.") print("Two words are connected if they differ in one letter.") print("Graph has %d nodes with %d edges" %(number_of_nodes(G),number_of_edges(G))) print("%d connected components" % number_connected_components(G)) for (source,target) in [('chaos','order'), ('nodes','graph'), ('moron','smart'), ('pound','marks')]: print("Shortest path between %s and %s is"%(source,target)) try: sp=shortest_path(G, source, target) for n in sp: print(n) except nx.NetworkXNoPath: print("None") for c in sorted(nx.connected_components(G)): print c
{ "content_hash": "1c2198b1228531d93f15663d01d9eebd", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 76, "avg_line_length": 34.36904761904762, "alnum_prop": 0.5656390717007274, "repo_name": "elawson668/CSCI-2963", "id": "3cf815d5f984d588416bec8174cc496a96126549", "size": "2887", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Lab 6/words.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "17830" }, { "name": "C++", "bytes": "27034" }, { "name": "CMake", "bytes": "39713" }, { "name": "Makefile", "bytes": "125969" }, { "name": "Objective-C", "bytes": "93" }, { "name": "Python", "bytes": "10039" }, { "name": "R", "bytes": "264" }, { "name": "Tcl", "bytes": "2883" } ], "symlink_target": "" }
"""Tests for GNU Compiler Collection""" def tests_compiler(): """Test Compiler""" import platform import subprocess from compilertools.compilers._core import _get_arch_and_cpu from compilertools.compilers.gcc import Compiler cmd = { "python": "", "--version": "", "-dumpversion": "", "not_found": False, } def dummy_compiler(): """Force version""" return cmd["python"] def run(*popenargs, check=False, **_): """Mocked subprocess.run""" args = popenargs[0] if cmd["not_found"]: raise FileNotFoundError try: stdout = cmd[args[1]] return_code = 0 except KeyError: stdout = "" return_code = 1 if check: raise subprocess.CalledProcessError(return_code, args) return subprocess.CompletedProcess(args, return_code, stdout) platform_python_compiler = platform.python_compiler platform.python_compiler = dummy_compiler subprocess_run = subprocess.run subprocess.run = run try: compiler = Compiler(current_compiler=True) # Check not existing version assert compiler.python_build_version == 0.0 assert compiler.version == 0.0 # Check existing version cmd["python"] = "GCC 6.3.1 64bit" cmd["--version"] = "gcc (GCC) 6.3.1\n..." cmd["-dumpversion"] = "6.3.1" del compiler["python_build_version"] del compiler["version"] assert compiler.python_build_version == 6.3 assert compiler.version == 6.3 cmd["--version"] = "gcc (GCC) 10.2.1 20200723 (Red Hat 10.2.1-1)\n..." cmd["-dumpversion"] = "10" cmd["-dumpfullversion"] = "10.2.1" del compiler["version"] assert compiler.version == 10.2 # Not current compiler assert Compiler().version == 0.0 # Test Error del compiler["version"] cmd["not_found"] = True assert compiler.version == 0.0 # Initialize system configurations compiler["version"] = 6.3 arch_x86, cpu_x86 = _get_arch_and_cpu("x86_32") arch_amd64, cpu_amd64 = _get_arch_and_cpu("x86_64") # Test API/Options assert len(compiler.api) > 0 assert len(compiler.option) > 0 # Test _compile_args_matrix assert compiler._compile_args_matrix(arch_x86, cpu_x86) assert compiler._compile_args_matrix(arch_amd64, cpu_amd64) # Test _compile_args_current_machine with x86 args = compiler._compile_args_current_machine(arch_x86, cpu_x86) assert args assert "-march=native" in args # Check return a result also with amd64 assert compiler._compile_args_current_machine(arch_amd64, cpu_amd64) # Check -mfpmath with or without SSE cpu_x86["features"] = ["SSE"] args = compiler._compile_args_current_machine(arch_x86, cpu_x86) assert "-mfpmath=sse" in args cpu_x86["features"] = [] args = compiler._compile_args_current_machine(arch_x86, cpu_x86) assert "-mfpmath=sse" not in args finally: platform.python_compiler = platform_python_compiler subprocess.run = subprocess_run def tests_compiler_gcc_command(): """Test Compiler if CC/GCC command available""" from subprocess import Popen, PIPE try: version_str = ( Popen(["gcc", "--version"], stdout=PIPE, universal_newlines=True) .stdout.read() .lower() ) except OSError: from pytest import skip version_str = "" skip("GCC not available") from compilertools.compilers.gcc import Compiler assert Compiler(current_compiler=True).version != 0.0 or "gcc" not in version_str
{ "content_hash": "36868f405b74199f63aff19abf956fa3", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 85, "avg_line_length": 30.752, "alnum_prop": 0.5907908428720083, "repo_name": "JGoutin/compilertools", "id": "32beade2899c094cbe0fcef953e3d2d4edcc81da", "size": "3844", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_compilers_gcc.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "144292" } ], "symlink_target": "" }
import base64 import logging import os import re import zlib from babelfish import Language, language_converters from guessit import guess_episode_info, guess_movie_info from six.moves.xmlrpc_client import ServerProxy from . import Provider, TimeoutSafeTransport, get_version from .. import __version__ from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, ProviderError from ..subtitle import Subtitle, fix_line_ending, guess_matches, sanitized_string_equal from ..video import Episode, Movie logger = logging.getLogger(__name__) class OpenSubtitlesSubtitle(Subtitle): provider_name = 'opensubtitles' series_re = re.compile('^"(?P<series_name>.*)" (?P<series_title>.*)$') def __init__(self, language, hearing_impaired, page_link, subtitle_id, matched_by, movie_kind, hash, movie_name, movie_release_name, movie_year, movie_imdb_id, series_season, series_episode, encoding): super(OpenSubtitlesSubtitle, self).__init__(language, hearing_impaired, page_link, encoding) self.subtitle_id = subtitle_id self.matched_by = matched_by self.movie_kind = movie_kind self.hash = hash self.movie_name = movie_name self.movie_release_name = movie_release_name self.movie_year = movie_year self.movie_imdb_id = movie_imdb_id self.series_season = series_season self.series_episode = series_episode @property def id(self): return str(self.subtitle_id) @property def series_name(self): return self.series_re.match(self.movie_name).group('series_name') @property def series_title(self): return self.series_re.match(self.movie_name).group('series_title') def get_matches(self, video, hearing_impaired=False): matches = super(OpenSubtitlesSubtitle, self).get_matches(video, hearing_impaired=hearing_impaired) # episode if isinstance(video, Episode) and self.movie_kind == 'episode': # series if video.series and sanitized_string_equal(self.series_name, video.series): matches.add('series') # season if video.season and self.series_season == video.season: matches.add('season') # episode if video.episode and self.series_episode == video.episode: matches.add('episode') # title if video.title and sanitized_string_equal(self.series_title, video.title): matches.add('title') # guess matches |= guess_matches(video, guess_episode_info(self.movie_release_name + '.mkv')) # movie elif isinstance(video, Movie) and self.movie_kind == 'movie': # title if video.title and sanitized_string_equal(self.movie_name, video.title): matches.add('title') # year if video.year and self.movie_year == video.year: matches.add('year') # guess matches |= guess_matches(video, guess_movie_info(self.movie_release_name + '.mkv')) else: logger.info('%r is not a valid movie_kind', self.movie_kind) return matches # hash if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']: matches.add('hash') # imdb_id if video.imdb_id and self.movie_imdb_id == video.imdb_id: matches.add('imdb_id') return matches class OpenSubtitlesProvider(Provider): languages = {Language.fromopensubtitles(l) for l in language_converters['opensubtitles'].codes} def __init__(self, username=None, password=None): self.server = ServerProxy('https://api.opensubtitles.org/xml-rpc', TimeoutSafeTransport(10)) if username and not password or not username and password: raise ConfigurationError('Username and password must be specified') # None values not allowed for logging in, so replace it by '' self.username = username or '' self.password = password or '' self.token = None def initialize(self): logger.info('Logging in') response = checked(self.server.LogIn(self.username, self.password, 'eng', 'subliminal v%s' % get_version(__version__))) self.token = response['token'] logger.debug('Logged in with token %r', self.token) def terminate(self): logger.info('Logging out') checked(self.server.LogOut(self.token)) self.server.close() self.token = None logger.debug('Logged out') def no_operation(self): logger.debug('No operation') checked(self.server.NoOperation(self.token)) def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None): # fill the search criteria criteria = [] if hash and size: criteria.append({'moviehash': hash, 'moviebytesize': str(size)}) if imdb_id: criteria.append({'imdbid': imdb_id}) if query and season and episode: criteria.append({'query': query.replace('\'', ''), 'season': season, 'episode': episode}) elif query: criteria.append({'query': query.replace('\'', '')}) if not criteria: raise ValueError('Not enough information') # add the language for criterion in criteria: criterion['sublanguageid'] = ','.join(sorted(l.opensubtitles for l in languages)) # query the server logger.info('Searching subtitles %r', criteria) response = checked(self.server.SearchSubtitles(self.token, criteria)) subtitles = [] # exit if no data if not response['data']: logger.debug('No subtitles found') return subtitles # loop over subtitle items for subtitle_item in response['data']: # read the item language = Language.fromopensubtitles(subtitle_item['SubLanguageID']) hearing_impaired = bool(int(subtitle_item['SubHearingImpaired'])) page_link = subtitle_item['SubtitlesLink'] subtitle_id = int(subtitle_item['IDSubtitleFile']) matched_by = subtitle_item['MatchedBy'] movie_kind = subtitle_item['MovieKind'] hash = subtitle_item['MovieHash'] movie_name = subtitle_item['MovieName'] movie_release_name = subtitle_item['MovieReleaseName'] movie_year = int(subtitle_item['MovieYear']) if subtitle_item['MovieYear'] else None movie_imdb_id = int(subtitle_item['IDMovieImdb']) series_season = int(subtitle_item['SeriesSeason']) if subtitle_item['SeriesSeason'] else None series_episode = int(subtitle_item['SeriesEpisode']) if subtitle_item['SeriesEpisode'] else None encoding = subtitle_item.get('SubEncoding') or None subtitle = OpenSubtitlesSubtitle(language, hearing_impaired, page_link, subtitle_id, matched_by, movie_kind, hash, movie_name, movie_release_name, movie_year, movie_imdb_id, series_season, series_episode, encoding) logger.debug('Found subtitle %r', subtitle) subtitles.append(subtitle) return subtitles def list_subtitles(self, video, languages): query = season = episode = None if isinstance(video, Episode): query = video.series season = video.season episode = video.episode elif ('opensubtitles' not in video.hashes or not video.size) and not video.imdb_id: query = video.name.split(os.sep)[-1] return self.query(languages, hash=video.hashes.get('opensubtitles'), size=video.size, imdb_id=video.imdb_id, query=query, season=season, episode=episode) def download_subtitle(self, subtitle): logger.info('Downloading subtitle %r', subtitle) response = checked(self.server.DownloadSubtitles(self.token, [str(subtitle.subtitle_id)])) subtitle.content = fix_line_ending(zlib.decompress(base64.b64decode(response['data'][0]['data']), 47)) class OpenSubtitlesError(ProviderError): """Base class for non-generic :class:`OpenSubtitlesProvider` exceptions.""" pass class Unauthorized(OpenSubtitlesError, AuthenticationError): """Exception raised when status is '401 Unauthorized'.""" pass class NoSession(OpenSubtitlesError, AuthenticationError): """Exception raised when status is '406 No session'.""" pass class DownloadLimitReached(OpenSubtitlesError, DownloadLimitExceeded): """Exception raised when status is '407 Download limit reached'.""" pass class InvalidImdbid(OpenSubtitlesError): """Exception raised when status is '413 Invalid ImdbID'.""" pass class UnknownUserAgent(OpenSubtitlesError, AuthenticationError): """Exception raised when status is '414 Unknown User Agent'.""" pass class DisabledUserAgent(OpenSubtitlesError, AuthenticationError): """Exception raised when status is '415 Disabled user agent'.""" pass class ServiceUnavailable(OpenSubtitlesError): """Exception raised when status is '503 Service Unavailable'.""" pass def checked(response): """Check a response status before returning it. :param response: a response from a XMLRPC call to OpenSubtitles. :return: the response. :raise: :class:`OpenSubtitlesError` """ status_code = int(response['status'][:3]) if status_code == 401: raise Unauthorized if status_code == 406: raise NoSession if status_code == 407: raise DownloadLimitReached if status_code == 413: raise InvalidImdbid if status_code == 414: raise UnknownUserAgent if status_code == 415: raise DisabledUserAgent if status_code == 503: raise ServiceUnavailable if status_code != 200: raise OpenSubtitlesError(response['status']) return response
{ "content_hash": "9a89c5ce5f253327c5337f1d367c6af6", "timestamp": "", "source": "github", "line_count": 261, "max_line_length": 120, "avg_line_length": 38.980842911877396, "alnum_prop": 0.6370159229408295, "repo_name": "t4lwh/subliminal", "id": "5a06a23b84cdc0f59a175f9e2789b1a1ee06aa28", "size": "10198", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "subliminal/providers/opensubtitles.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "227146" } ], "symlink_target": "" }
""" Numba Extensions #################### The `numba`_ package provides a mechanism for compiling Python code. With appropriate options, compilation can provide massive speedups to standard Python code. This module provides reasonable compilation options as well as a utility for compiling symbolic (or string) functions to 'numbafied' functions. See Also: `sympy`_, `symengine`_, and the compilation engine `numba`_ .. _sympy: http://docs.sympy.org/latest/index.html .. _symengine: https://github.com/symengine/symengine .. _numba: http://numba.pydata.org/ """ import numpy as np import sympy as sy import numba as nb from warnings import warn from platform import system from sympy.utilities.lambdify import NUMPY_TRANSLATIONS, NUMPY_DEFAULT npvars = vars(np) npvars.update(NUMPY_DEFAULT) npvars.update({k: getattr(np, v) for k, v in NUMPY_TRANSLATIONS.items()}) if "linux" in system().lower(): jitkwargs = dict(nopython=True, nogil=True, parallel=True) veckwargs = dict(nopython=True, target="parallel") else: jitkwargs = dict(nopython=True, nogil=True, parallel=False, cache=True) veckwargs = dict(nopython=True, target="cpu") def numbafy(fn, args, compiler="jit", **nbkws): """ Compile a string, sympy expression or symengine expression using numba. Not all functions are supported by Python's numerical package (numpy). For difficult cases, valid Python code (as string) may be more suitable than symbolic expressions coming from sympy, symengine, etc. When compiling vectorized functions, include valid signatures (see `numba`_ documentation). Args: fn: Symbolic expression as sympy/symengine expression or string args (iterable): Symbolic arguments compiler: String name or callable numba compiler nbkws: Compiler keyword arguments (if none provided, smart defaults are used) Returns: func: Compiled function Warning: For vectorized functions, valid signatures are (almost always) required. """ kwargs = {} # Numba kwargs to be updated by user if not isinstance(args, (tuple, list)): args = (args, ) # Parameterize compiler if isinstance(compiler, str): compiler_ = getattr(nb, compiler, None) if compiler is None: raise AttributeError("No numba function with name {}.".format(compiler)) compiler = compiler_ if compiler in (nb.jit, nb.njit): kwargs.update(jitkwargs) sig = nbkws.pop("signature", None) else: kwargs.update(veckwargs) sig = nbkws.pop("signatures", None) if sig is None: warn("Vectorization without 'signatures' can lead to wrong results!") kwargs.update(nbkws) # Expand sympy expressions and create string for eval if isinstance(fn, sy.Expr): fn = sy.expand_func(fn) func = sy.lambdify(args, fn, modules='numpy') # Machine code compilation if sig is None: try: func = compiler(**kwargs)(func) except RuntimeError: kwargs['cache'] = False func = compiler(**kwargs)(func) else: try: func = compiler(sig, **kwargs)(func) except RuntimeError: kwargs['cache'] = False func = compiler(sig, **kwargs)(func) return func
{ "content_hash": "62172de7d860f7cd36c8fea89d116200", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 85, "avg_line_length": 36.108695652173914, "alnum_prop": 0.6709813365442504, "repo_name": "exa-analytics/exatomic", "id": "c4b9daf010306429c9d0bbf0943425fb9bc646b5", "size": "3460", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "exatomic/exa/util/nbvars.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "762" }, { "name": "JavaScript", "bytes": "121101" }, { "name": "Jupyter Notebook", "bytes": "13176" }, { "name": "Python", "bytes": "1084816" }, { "name": "Shell", "bytes": "711" }, { "name": "TypeScript", "bytes": "953" } ], "symlink_target": "" }
from sanic import Sanic from sanic.response import text app = Sanic(__name__) @app.route("/") async def hello(request): return text("Hello world!") app.run(host="0.0.0.0", port=8080)
{ "content_hash": "ca61193e78f195ab0041e44dfc7c3087", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 34, "avg_line_length": 17.363636363636363, "alnum_prop": 0.675392670157068, "repo_name": "squeaky-pl/japronto", "id": "d2e8272896a4540263b1056cef14ffc46f9d152a", "size": "191", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "benchmarks/sanic/micro.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "149858" }, { "name": "Dockerfile", "bytes": "97" }, { "name": "Go", "bytes": "668" }, { "name": "JavaScript", "bytes": "387" }, { "name": "Lua", "bytes": "915" }, { "name": "Python", "bytes": "143655" }, { "name": "Shell", "bytes": "2420" } ], "symlink_target": "" }
from sft.drivers.tcp.driver import TCPDriver
{ "content_hash": "965c9af5b7a1b1da14eea41bde9028f7", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 44, "avg_line_length": 45, "alnum_prop": 0.8444444444444444, "repo_name": "AlexeiBuzuma/LocalComputeNetworks", "id": "2932975897374d3784f1673a45046f0eef005a66", "size": "45", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sft/drivers/tcp/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "95802" } ], "symlink_target": "" }
import unittest import_error = False try: from ...tables.base import Base except ImportError: import_error = True Base = None class TestCase00(unittest.TestCase): def test_import(self): self.assertFalse(import_error) class TestCase01(unittest.TestCase): def setUp(self): self.base = Base() def test_init(self): pass
{ "content_hash": "c09b303a0085ca789a42ba5692094642", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 38, "avg_line_length": 17.61904761904762, "alnum_prop": 0.6621621621621622, "repo_name": "AdamGagorik/pydarkstar", "id": "748f150351462b80a0468fee6d82c98c1689d919", "size": "370", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pydarkstar/tests/tables/test_base.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "532" }, { "name": "Python", "bytes": "119228" } ], "symlink_target": "" }
""" maxminddb.reader ~~~~~~~~~~~~~~~~ This module contains the pure Python database reader and related classes. """ from __future__ import unicode_literals try: import mmap except ImportError: # pylint: disable=invalid-name mmap = None import struct from maxminddb.compat import (byte_from_int, compat_ip_address, string_type, string_type_name) from maxminddb.const import MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY, MODE_FD from maxminddb.decoder import Decoder from maxminddb.errors import InvalidDatabaseError from maxminddb.file import FileBuffer class Reader(object): """ Instances of this class provide a reader for the MaxMind DB format. IP addresses can be looked up using the ``get`` method. """ _DATA_SECTION_SEPARATOR_SIZE = 16 _METADATA_START_MARKER = b"\xAB\xCD\xEFMaxMind.com" _ipv4_start = None def __init__(self, database, mode=MODE_AUTO): """Reader for the MaxMind DB file format Arguments: database -- A path to a valid MaxMind DB file such as a GeoIP2 database file, or a file descriptor in the case of MODE_FD. mode -- mode to open the database with. Valid mode are: * MODE_MMAP - read from memory map. * MODE_FILE - read database as standard file. * MODE_MEMORY - load database into memory. * MODE_AUTO - tries MODE_MMAP and then MODE_FILE. Default. * MODE_FD - the param passed via database is a file descriptor, not a path. This mode implies MODE_MEMORY. """ if (mode == MODE_AUTO and mmap) or mode == MODE_MMAP: with open(database, 'rb') as db_file: self._buffer = mmap.mmap( db_file.fileno(), 0, access=mmap.ACCESS_READ) self._buffer_size = self._buffer.size() filename = database elif mode in (MODE_AUTO, MODE_FILE): self._buffer = FileBuffer(database) self._buffer_size = self._buffer.size() filename = database elif mode == MODE_MEMORY: with open(database, 'rb') as db_file: self._buffer = db_file.read() self._buffer_size = len(self._buffer) filename = database elif mode == MODE_FD: self._buffer = database.read() self._buffer_size = len(self._buffer) filename = database.name else: raise ValueError( 'Unsupported open mode ({0}). Only MODE_AUTO, MODE_FILE, ' 'MODE_MEMORY and MODE_FD are supported by the pure Python ' 'Reader'.format(mode)) metadata_start = self._buffer.rfind( self._METADATA_START_MARKER, max(0, self._buffer_size - 128 * 1024)) if metadata_start == -1: self.close() raise InvalidDatabaseError('Error opening database file ({0}). ' 'Is this a valid MaxMind DB file?' ''.format(filename)) metadata_start += len(self._METADATA_START_MARKER) metadata_decoder = Decoder(self._buffer, metadata_start) (metadata, _) = metadata_decoder.decode(metadata_start) self._metadata = Metadata(**metadata) # pylint: disable=bad-option-value self._decoder = Decoder(self._buffer, self._metadata.search_tree_size + self._DATA_SECTION_SEPARATOR_SIZE) self.closed = False def metadata(self): """Return the metadata associated with the MaxMind DB file""" return self._metadata def get(self, ip_address): """Return the record for the ip_address in the MaxMind DB Arguments: ip_address -- an IP address in the standard string notation """ if not isinstance(ip_address, string_type): raise TypeError('argument 1 must be %s, not %s' % (string_type_name, type(ip_address).__name__)) address = compat_ip_address(ip_address) if address.version == 6 and self._metadata.ip_version == 4: raise ValueError( 'Error looking up {0}. You attempted to look up ' 'an IPv6 address in an IPv4-only database.'.format(ip_address)) pointer = self._find_address_in_tree(address) return self._resolve_data_pointer(pointer) if pointer else None def _find_address_in_tree(self, ip_address): packed = bytearray(ip_address.packed) bit_count = len(packed) * 8 node = self._start_node(bit_count) for i in range(bit_count): if node >= self._metadata.node_count: break bit = 1 & (packed[i >> 3] >> 7 - (i % 8)) node = self._read_node(node, bit) if node == self._metadata.node_count: # Record is empty return 0 elif node > self._metadata.node_count: return node raise InvalidDatabaseError('Invalid node in search tree') def _start_node(self, length): if self._metadata.ip_version != 6 or length == 128: return 0 # We are looking up an IPv4 address in an IPv6 tree. Skip over the # first 96 nodes. if self._ipv4_start: return self._ipv4_start node = 0 for _ in range(96): if node >= self._metadata.node_count: break node = self._read_node(node, 0) self._ipv4_start = node return node def _read_node(self, node_number, index): base_offset = node_number * self._metadata.node_byte_size record_size = self._metadata.record_size if record_size == 24: offset = base_offset + index * 3 node_bytes = b'\x00' + self._buffer[offset:offset + 3] elif record_size == 28: (middle, ) = struct.unpack( b'!B', self._buffer[base_offset + 3:base_offset + 4]) if index: middle &= 0x0F else: middle = (0xF0 & middle) >> 4 offset = base_offset + index * 4 node_bytes = byte_from_int(middle) + self._buffer[offset:offset + 3] elif record_size == 32: offset = base_offset + index * 4 node_bytes = self._buffer[offset:offset + 4] else: raise InvalidDatabaseError( 'Unknown record size: {0}'.format(record_size)) return struct.unpack(b'!I', node_bytes)[0] def _resolve_data_pointer(self, pointer): resolved = pointer - self._metadata.node_count + \ self._metadata.search_tree_size if resolved > self._buffer_size: raise InvalidDatabaseError( "The MaxMind DB file's search tree is corrupt") (data, _) = self._decoder.decode(resolved) return data def close(self): """Closes the MaxMind DB file and returns the resources to the system""" # pylint: disable=unidiomatic-typecheck if type(self._buffer) not in (str, bytes): self._buffer.close() self.closed = True def __exit__(self, *args): self.close() def __enter__(self): if self.closed: raise ValueError('Attempt to reopen a closed MaxMind DB') return self class Metadata(object): """Metadata for the MaxMind DB reader .. attribute:: binary_format_major_version The major version number of the binary format used when creating the database. :type: int .. attribute:: binary_format_minor_version The minor version number of the binary format used when creating the database. :type: int .. attribute:: build_epoch The Unix epoch for the build time of the database. :type: int .. attribute:: database_type A string identifying the database type, e.g., "GeoIP2-City". :type: str .. attribute:: description A map from locales to text descriptions of the database. :type: dict(str, str) .. attribute:: ip_version The IP version of the data in a database. A value of "4" means the database only supports IPv4. A database with a value of "6" may support both IPv4 and IPv6 lookups. :type: int .. attribute:: languages A list of locale codes supported by the databse. :type: list(str) .. attribute:: node_count The number of nodes in the database. :type: int .. attribute:: record_size The bit size of a record in the search tree. :type: int """ # pylint: disable=too-many-instance-attributes def __init__(self, **kwargs): """Creates new Metadata object. kwargs are key/value pairs from spec""" # Although I could just update __dict__, that is less obvious and it # doesn't work well with static analysis tools and some IDEs self.node_count = kwargs['node_count'] self.record_size = kwargs['record_size'] self.ip_version = kwargs['ip_version'] self.database_type = kwargs['database_type'] self.languages = kwargs['languages'] self.binary_format_major_version = kwargs[ 'binary_format_major_version'] self.binary_format_minor_version = kwargs[ 'binary_format_minor_version'] self.build_epoch = kwargs['build_epoch'] self.description = kwargs['description'] @property def node_byte_size(self): """The size of a node in bytes :type: int """ return self.record_size // 4 @property def search_tree_size(self): """The size of the search tree :type: int """ return self.node_count * self.node_byte_size def __repr__(self): args = ', '.join('%s=%r' % x for x in self.__dict__.items()) return '{module}.{class_name}({data})'.format( module=self.__module__, class_name=self.__class__.__name__, data=args)
{ "content_hash": "6d9d5d2628caec12d291ce6abd51b436", "timestamp": "", "source": "github", "line_count": 309, "max_line_length": 81, "avg_line_length": 32.9126213592233, "alnum_prop": 0.5749262536873156, "repo_name": "m-lab/mlab-ns", "id": "3761b7d7f7ef42b979354927a51c0ab091315926", "size": "10170", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "server/mlabns/third_party/geoip2/maxminddb/reader.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "14542" }, { "name": "Dockerfile", "bytes": "877" }, { "name": "HTML", "bytes": "36040" }, { "name": "JavaScript", "bytes": "59657" }, { "name": "Python", "bytes": "1475831" }, { "name": "Shell", "bytes": "747" } ], "symlink_target": "" }
""" json_print.utils ---------------- Shared utilities and functions for json_print """ from collections import Mapping from json import load def deep_update(base_dictn, update_dictn): """Nested merge of 2 Python dictionaries.""" for k, v in update_dictn.items(): if isinstance(base_dictn, Mapping): if isinstance(v, Mapping): r = deep_update(base_dictn.get(k, {}), v) base_dictn[k] = r else: base_dictn[k] = update_dictn[k] else: base_dictn = {k: update_dictn[k]} return base_dictn def dictionary_from_json(json_filename): """Read the referenced JSON file and return as a Python dictionary.""" with open(json_filename, encoding='utf-8 ') as f: trello_dictionary = load(f) return trello_dictionary
{ "content_hash": "5827db4397b7a235bc94147cf4d670ef", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 74, "avg_line_length": 29.785714285714285, "alnum_prop": 0.6019184652278178, "repo_name": "Kilo59/json_printer", "id": "3aba10b4758226712a39f71f3a0a2f29a7eaf480", "size": "834", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "json_print/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "8948" } ], "symlink_target": "" }
""" This modules provides functions to calculate correlations between spike trains. :copyright: Copyright 2015-2016 by the Elephant team, see `doc/authors.rst`. :license: Modified BSD, see LICENSE.txt for details. """ from __future__ import division, print_function, unicode_literals import warnings import neo import numpy as np import quantities as pq import scipy.signal from scipy import integrate from elephant.utils import deprecated_alias __all__ = [ "covariance", "correlation_coefficient", "cross_correlation_histogram", "spike_time_tiling_coefficient", "spike_train_timescale" ] # The highest sparsity of the `BinnedSpikeTrain` matrix for which # memory-efficient (sparse) implementation of `covariance()` is faster than # with the corresponding numpy dense array. _SPARSITY_MEMORY_EFFICIENT_THR = 0.1 class _CrossCorrHist(object): """ Cross-correlation histogram for `BinnedSpikeTrain`s. This class is used inside :func:`cross_correlation_histogram` function and is not meant to be used outside of it. Parameters ---------- binned_spiketrain_i, binned_spiketrain_j : elephant.conversion.BinnedSpikeTrain Binned spike trains to cross-correlate. The two spike trains must have the same `t_start` and `t_stop`. window : list or tuple List of integers - (left_edge, right_edge). Refer to the docs of `cross_correlation_histogram()`. """ def __init__(self, binned_spiketrain_i, binned_spiketrain_j, window): self.binned_spiketrain_i = binned_spiketrain_i self.binned_spiketrain_j = binned_spiketrain_j self.window = window @staticmethod def get_valid_lags(binned_spiketrain_i, binned_spiketrain_j): """ Computes the lags at which the cross-correlation of the input spiketrains can be calculated with full overlap. Parameters ---------- binned_spiketrain_i, binned_spiketrain_j : elephant.conversion.BinnedSpikeTrain Binned spike trains to cross-correlate. The input spike trains can have any `t_start` and `t_stop`. Returns ------- lags : np.ndarray Array of lags at which the cross-correlation can be computed at full overlap (valid mode). """ bin_size = binned_spiketrain_i._bin_size # see cross_correlation_histogram for the examples if binned_spiketrain_i.n_bins < binned_spiketrain_j.n_bins: # ex. 1) lags range: [-2, 5] ms # ex. 2) lags range: [1, 2] ms left_edge = (binned_spiketrain_j._t_start - binned_spiketrain_i._t_start) / bin_size right_edge = (binned_spiketrain_j._t_stop - binned_spiketrain_i._t_stop) / bin_size else: # ex. 3) lags range: [-1, 3] ms left_edge = (binned_spiketrain_j._t_stop - binned_spiketrain_i._t_stop) / bin_size right_edge = (binned_spiketrain_j._t_start - binned_spiketrain_i._t_start) / bin_size right_edge = int(right_edge) left_edge = int(left_edge) lags = np.arange(left_edge, right_edge + 1, dtype=np.int32) return lags def correlate_memory(self, cch_mode): """ Slow, but memory-safe mode. Return ------- cross_corr : np.ndarray Cross-correlation of `self.binned_spiketrain1` and `self.binned_spiketrain2`. """ st1_spmat = self.binned_spiketrain_i.sparse_matrix st2_spmat = self.binned_spiketrain_j.sparse_matrix left_edge, right_edge = self.window # extract the nonzero column indices of 1-d matrices st1_bin_idx_unique = st1_spmat.nonzero()[1] st2_bin_idx_unique = st2_spmat.nonzero()[1] # 'valid' mode requires bins correction due to the shift in t_starts # 'full' and 'pad' modes don't need this correction if cch_mode == "valid": if self.binned_spiketrain_i.n_bins > \ self.binned_spiketrain_j.n_bins: st2_bin_idx_unique += right_edge else: st2_bin_idx_unique += left_edge st1_spmat = st1_spmat.data st2_spmat = st2_spmat.data # Initialize the counts to an array of zeros, # and the bin IDs to integers # spanning the time axis nr_lags = right_edge - left_edge + 1 cross_corr = np.zeros(nr_lags) # Compute the CCH at lags in left_edge,...,right_edge only for idx, i in enumerate(st1_bin_idx_unique): il = np.searchsorted(st2_bin_idx_unique, left_edge + i) ir = np.searchsorted(st2_bin_idx_unique, right_edge + i, side='right') timediff = st2_bin_idx_unique[il:ir] - i assert ((timediff >= left_edge) & ( timediff <= right_edge)).all(), 'Not all the ' 'entries of cch lie in the window' cross_corr[timediff - left_edge] += ( st1_spmat[idx] * st2_spmat[il:ir]) st2_bin_idx_unique = st2_bin_idx_unique[il:] st2_spmat = st2_spmat[il:] return cross_corr def correlate_speed(self, cch_mode): """ Fast, but might require a lot of memory. Parameters ---------- cch_mode : str Cross-correlation mode. Returns ------- cross_corr : np.ndarray Cross-correlation of `self.binned_spiketrain1` and `self.binned_spiketrain2`. """ # Retrieve the array of the binned spike trains st1_arr = self.binned_spiketrain_i.to_array()[0] st2_arr = self.binned_spiketrain_j.to_array()[0] left_edge, right_edge = self.window if cch_mode == 'pad': # Zero padding to stay between left_edge and right_edge pad_width = min(max(-left_edge, 0), max(right_edge, 0)) st2_arr = np.pad(st2_arr, pad_width=pad_width, mode='constant') cch_mode = 'valid' # Cross correlate the spike trains cross_corr = scipy.signal.fftconvolve(st2_arr, st1_arr[::-1], mode=cch_mode) # convolution of integers is integers cross_corr = np.round(cross_corr) return cross_corr def border_correction(self, cross_corr): """ Parameters ---------- cross_corr : np.ndarray Cross-correlation array. The output of `self.correlate_speed()` or `self.correlate_memory()`. Returns ------- np.ndarray Cross-correlation array with the border correction applied. """ min_num_bins = min(self.binned_spiketrain_i.n_bins, self.binned_spiketrain_j.n_bins) left_edge, right_edge = self.window valid_lags = _CrossCorrHist.get_valid_lags(self.binned_spiketrain_i, self.binned_spiketrain_j) lags_to_compute = np.arange(left_edge, right_edge + 1) outer_subtraction = np.subtract.outer(lags_to_compute, valid_lags) min_distance_from_window = np.abs(outer_subtraction).min(axis=1) n_values_fall_in_window = min_num_bins - min_distance_from_window correction = float(min_num_bins) / n_values_fall_in_window return cross_corr * correction def cross_correlation_coefficient(self, cross_corr): """ Normalizes the CCH to obtain the cross-correlation coefficient function, ranging from -1 to 1. Parameters ---------- cross_corr : np.ndarray Cross-correlation array. The output of `self.correlate_speed()` or `self.correlate_memory()`. Notes ----- See Notes in `cross_correlation_histogram()`. Returns ------- np.ndarray Normalized cross-correlation array in range `[-1, 1]`. """ max_num_bins = max(self.binned_spiketrain_i.n_bins, self.binned_spiketrain_j.n_bins) n_spikes1 = self.binned_spiketrain_i.get_num_of_spikes() n_spikes2 = self.binned_spiketrain_j.get_num_of_spikes() data1 = self.binned_spiketrain_i.sparse_matrix.data data2 = self.binned_spiketrain_j.sparse_matrix.data ii = data1.dot(data1) jj = data2.dot(data2) cov_mean = n_spikes1 * n_spikes2 / max_num_bins std_xy = np.sqrt((ii - n_spikes1 ** 2. / max_num_bins) * ( jj - n_spikes2 ** 2. / max_num_bins)) cross_corr_normalized = (cross_corr - cov_mean) / std_xy return cross_corr_normalized def kernel_smoothing(self, cross_corr_array, kernel): """ Performs 1-d convolution with the `kernel`. Parameters ---------- cross_corr_array : np.ndarray Cross-correlation array. The output of `self.correlate_speed()` or `self.correlate_memory()`. kernel : list 1-d kernel. Returns ------- np.ndarray Smoothed array. """ left_edge, right_edge = self.window kern_len_max = abs(left_edge) + abs(right_edge) + 1 # Define the kern for smoothing as an ndarray if len(kernel) > kern_len_max: raise ValueError( 'The length of the kernel {} cannot be larger than the ' 'length {} of the resulting CCH.'.format(len(kernel), kern_len_max)) kernel = np.divide(kernel, kernel.sum()) # Smooth the cross-correlation histogram with the kern return np.convolve(cross_corr_array, kernel, mode='same') @deprecated_alias(binned_sts='binned_spiketrain') def covariance(binned_spiketrain, binary=False, fast=True): r""" Calculate the NxN matrix of pairwise covariances between all combinations of N binned spike trains. For each pair of spike trains :math:`(i,j)`, the covariance :math:`C[i,j]` is obtained by binning :math:`i` and :math:`j` at the desired bin size. Let :math:`b_i` and :math:`b_j` denote the binned spike trains and :math:`\mu_i` and :math:`\mu_j` their respective averages. Then .. math:: C[i,j] = <b_i-\mu_i, b_j-\mu_j> / (L-1) where `<., .>` is the scalar product of two vectors, and :math:`L` is the number of bins. For an input of N spike trains, an N x N matrix is returned containing the covariances for each combination of input spike trains. If binary is True, the binned spike trains are clipped to 0 or 1 before computing the covariance, so that the binned vectors :math:`b_i` and :math:`b_j` are binary. Parameters ---------- binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike trains to be evaluated. binary : bool, optional If True, the spikes of a particular spike train falling in the same bin are counted as 1, resulting in binary binned vectors :math:`b_i`. If False, the binned vectors :math:`b_i` contain the spike counts per bin. Default: False. fast : bool, optional If `fast=True` and the sparsity of `binned_spiketrain` is `> 0.1`, use `np.cov()`. Otherwise, use memory efficient implementation. See Notes [2]. Default: True. Returns ------- C : (N, N) np.ndarray The square matrix of covariances. The element :math:`C[i,j]=C[j,i]` is the covariance between `binned_spiketrain[i]` and `binned_spiketrain[j]`. Raises ------ MemoryError When using `fast=True` and `binned_spiketrain` shape is large. Warns -------- UserWarning If at least one row in `binned_spiketrain` is empty (has no spikes). See Also -------- correlation_coefficient : Pearson correlation coefficient Notes ----- 1. The spike trains in the binned structure are assumed to cover the complete time span `[t_start, t_stop)` of `binned_spiketrain`. 2. Using `fast=True` might lead to `MemoryError`. If it's the case, switch to `fast=False`. Examples -------- Generate two Poisson spike trains >>> import neo >>> from quantities import s, Hz, ms >>> from elephant.spike_train_generation import homogeneous_poisson_process >>> from elephant.conversion import BinnedSpikeTrain >>> st1 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> st2 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> cov_matrix = covariance(BinnedSpikeTrain([st1, st2], bin_size=5*ms)) >>> print(cov_matrix[0, 1]) -0.001668334167083546 """ if binary: binned_spiketrain = binned_spiketrain.binarize() if fast and binned_spiketrain.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: array = binned_spiketrain.to_array() return np.cov(array) return _covariance_sparse( binned_spiketrain, corrcoef_norm=False) @deprecated_alias(binned_sts='binned_spiketrain') def correlation_coefficient(binned_spiketrain, binary=False, fast=True): r""" Calculate the NxN matrix of pairwise Pearson's correlation coefficients between all combinations of N binned spike trains. For each pair of spike trains :math:`(i,j)`, the correlation coefficient :math:`C[i,j]` is obtained by binning :math:`i` and :math:`j` at the desired bin size. Let :math:`b_i` and :math:`b_j` denote the binned spike trains and :math:`\mu_i` and :math:`\mu_j` their respective means. Then .. math:: C[i,j] = <b_i-\mu_i, b_j-\mu_j> / \sqrt{<b_i-\mu_i, b_i-\mu_i> \cdot <b_j-\mu_j, b_j-\mu_j>} where `<., .>` is the scalar product of two vectors. For an input of N spike trains, an N x N matrix is returned. Each entry in the matrix is a real number ranging between -1 (perfectly anti-correlated spike trains) and +1 (perfectly correlated spike trains). However, if k-th spike train is empty, k-th row and k-th column of the returned matrix are set to np.nan. If binary is True, the binned spike trains are clipped to 0 or 1 before computing the correlation coefficients, so that the binned vectors :math:`b_i` and :math:`b_j` are binary. Parameters ---------- binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike trains to be evaluated. binary : bool, optional If True, two spikes of a particular spike train falling in the same bin are counted as 1, resulting in binary binned vectors :math:`b_i`. If False, the binned vectors :math:`b_i` contain the spike counts per bin. Default: False. fast : bool, optional If `fast=True` and the sparsity of `binned_spiketrain` is `> 0.1`, use `np.corrcoef()`. Otherwise, use memory efficient implementation. See Notes[2] Default: True. Returns ------- C : (N, N) np.ndarray The square matrix of correlation coefficients. The element :math:`C[i,j]=C[j,i]` is the Pearson's correlation coefficient between `binned_spiketrain[i]` and `binned_spiketrain[j]`. If `binned_spiketrain` contains only one `neo.SpikeTrain`, C=1.0. Raises ------ MemoryError When using `fast=True` and `binned_spiketrain` shape is large. Warns -------- UserWarning If at least one row in `binned_spiketrain` is empty (has no spikes). See Also -------- covariance Notes ----- 1. The spike trains in the binned structure are assumed to cover the complete time span `[t_start, t_stop)` of `binned_spiketrain`. 2. Using `fast=True` might lead to `MemoryError`. If it's the case, switch to `fast=False`. Examples -------- Generate two Poisson spike trains >>> import neo >>> from quantities import s, Hz, ms >>> from elephant.spike_train_generation import homogeneous_poisson_process >>> from elephant.conversion import BinnedSpikeTrain >>> st1 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> st2 = homogeneous_poisson_process( ... rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s) >>> cc_matrix = correlation_coefficient(BinnedSpikeTrain([st1, st2], ... bin_size=5*ms)) >>> print(cc_matrix[0, 1]) 0.015477320222075359 """ if binary: binned_spiketrain = binned_spiketrain.binarize() if fast and binned_spiketrain.sparsity > _SPARSITY_MEMORY_EFFICIENT_THR: array = binned_spiketrain.to_array() return np.corrcoef(array) return _covariance_sparse( binned_spiketrain, corrcoef_norm=True) def corrcoef(*args, **kwargs): warnings.warn("'corrcoef' is deprecated; use 'correlation_coefficient'", DeprecationWarning) return correlation_coefficient(*args, **kwargs) def _covariance_sparse(binned_spiketrain, corrcoef_norm): r""" Memory efficient helper function for `covariance()` and `corrcoef()` that performs the complete calculation for either the covariance (`corrcoef_norm=False`) or correlation coefficient (`corrcoef_norm=True`). Both calculations differ only by the denominator. For any two `BinnedSpikeTrain`s :math:`\hat{b_x}` and :math:`\hat{b_y}` with mean :math:`\vec{\mu_x}` and :math:`\vec{mu_y}` respectively computes the dot product .. math:: <\hat{b_x} - \vec{\mu_x}, \hat{b_y} - \vec{\mu_y}>_{ij} = (\hat{b_x} \cdot \hat{b_y}^T)_{ij} - \frac{(\vec{N_x}^T \cdot \vec{N_y})_{ij}}{L} where :math:`N_x^i = \sum_j{b_x^{ij}}` - the number of spikes in `i`th row of :math:`\hat{b_x}`, :math:`L` - the number of bins, and :math:`\vec{\mu_x} = \frac{\vec{N_x}}{L}`. Parameters ---------- binned_spiketrain : (N, ) elephant.conversion.BinnedSpikeTrain See `covariance()` or `corrcoef()`, respectively. corrcoef_norm : bool Use normalization factor for the correlation coefficient rather than for the covariance. Warns -------- UserWarning If at least one row in `binned_spiketrain` is empty (has no spikes). Returns ------- (N, N) np.ndarray Pearson correlation or covariance matrix. """ spmat = binned_spiketrain.sparse_matrix n_bins = binned_spiketrain.n_bins # Check for empty spike trains n_spikes_per_row = spmat.sum(axis=1) if n_spikes_per_row.min() == 0: warnings.warn( 'Detected empty spike trains (rows) in the binned_spiketrain.') res = spmat.dot(spmat.T) - n_spikes_per_row * n_spikes_per_row.T / n_bins res = np.asarray(res) if corrcoef_norm: stdx = np.sqrt(res.diagonal()) stdx = np.expand_dims(stdx, axis=0) res /= (stdx.T * stdx) else: res /= (n_bins - 1) res = np.squeeze(res) return res @deprecated_alias(binned_st1='binned_spiketrain_i', binned_st2='binned_spiketrain_j', cross_corr_coef='cross_correlation_coefficient') def cross_correlation_histogram( binned_spiketrain_i, binned_spiketrain_j, window='full', border_correction=False, binary=False, kernel=None, method='speed', cross_correlation_coefficient=False): """ Computes the cross-correlation histogram (CCH) between two binned spike trains `binned_spiketrain_i` and `binned_spiketrain_j`. Parameters ---------- binned_spiketrain_i, binned_spiketrain_j : elephant.conversion.BinnedSpikeTrain Binned spike trains of lengths N and M to cross-correlate. The input spike trains can have any `t_start` and `t_stop`. window : {'valid', 'full'} or list of int, optional ‘full’: This returns the cross-correlation at each point of overlap, with an output shape of (N+M-1,). At the end-points of the cross-correlogram, the signals do not overlap completely, and boundary effects may be seen. ‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1. The cross-correlation product is only given for points where the signals overlap completely. Values outside the signal boundary have no effect. List of integers (min_lag, max_lag): The entries of window are two integers representing the left and right extremes (expressed as number of bins) where the cross-correlation is computed. Default: 'full'. border_correction : bool, optional whether to correct for the border effect. If True, the value of the CCH at bin :math:`b` (for :math:`b=-H,-H+1, ...,H`, where :math:`H` is the CCH half-length) is multiplied by the correction factor: .. math:: (H+1)/(H+1-|b|), which linearly corrects for loss of bins at the edges. Default: False. binary : bool, optional If True, spikes falling in the same bin are counted as a single spike; otherwise they are counted as different spikes. Default: False. kernel : np.ndarray or None, optional A one dimensional array containing a smoothing kernel applied to the resulting CCH. The length N of the kernel indicates the smoothing window. The smoothing window cannot be larger than the maximum lag of the CCH. The kernel is normalized to unit area before being applied to the resulting CCH. Popular choices for the kernel are * normalized boxcar kernel: `numpy.ones(N)` * hamming: `numpy.hamming(N)` * hanning: `numpy.hanning(N)` * bartlett: `numpy.bartlett(N)` If None, the CCH is not smoothed. Default: None. method : {'speed', 'memory'}, optional Defines the algorithm to use. "speed" uses `numpy.correlate` to calculate the correlation between two binned spike trains using a non-sparse data representation. Due to various optimizations, it is the fastest realization. In contrast, the option "memory" uses an own implementation to calculate the correlation based on sparse matrices, which is more memory efficient but slower than the "speed" option. Default: "speed". cross_correlation_coefficient : bool, optional If True, a normalization is applied to the CCH to obtain the cross-correlation coefficient function ranging from -1 to 1 according to Equation (5.10) in [1]_. See Notes. Default: False. Returns ------- cch_result : neo.AnalogSignal Containing the cross-correlation histogram between `binned_spiketrain_i` and `binned_spiketrain_j`. Offset bins correspond to correlations at delays equivalent to the differences between the spike times of `binned_spiketrain_i` and those of `binned_spiketrain_j`: an entry at positive lag corresponds to a spike in `binned_spiketrain_j` following a spike in `binned_spiketrain_i` bins to the right, and an entry at negative lag corresponds to a spike in `binned_spiketrain_i` following a spike in `binned_spiketrain_j`. To illustrate this definition, consider two spike trains with the same `t_start` and `t_stop`: `binned_spiketrain_i` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0 `binned_spiketrain_j` ('target neuron') : 0 0 0 0 0 0 0 1 0 0 0 Here, the CCH will have an entry of `1` at `lag=+3`. Consistent with the definition of `neo.AnalogSignals`, the time axis represents the left bin borders of each histogram bin. For example, the time axis might be: `np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms` lags : np.ndarray Contains the IDs of the individual histogram bins, where the central bin has ID 0, bins to the left have negative IDs and bins to the right have positive IDs, e.g.,: `np.array([-3, -2, -1, 0, 1, 2, 3])` Notes ----- 1. The Eq. (5.10) in [1]_ is valid for binned spike trains with at most one spike per bin. For a general case, refer to the implementation of `_covariance_sparse()`. 2. Alias: `cch` References ---------- .. [1] "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7. Examples -------- Plot the cross-correlation histogram between two Poisson spike trains >>> import elephant >>> import matplotlib.pyplot as plt >>> import quantities as pq >>> binned_spiketrain_i = elephant.conversion.BinnedSpikeTrain( ... elephant.spike_train_generation.homogeneous_poisson_process( ... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms), ... bin_size=5. * pq.ms) >>> binned_spiketrain_j = elephant.conversion.BinnedSpikeTrain( ... elephant.spike_train_generation.homogeneous_poisson_process( ... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms), ... bin_size=5. * pq.ms) >>> cc_hist = \ ... elephant.spike_train_correlation.cross_correlation_histogram( ... binned_spiketrain_i, binned_spiketrain_j, window=[-30,30], ... border_correction=False, ... binary=False, kernel=None, method='memory') >>> plt.bar(left=cc_hist[0].times.magnitude, ... height=cc_hist[0][:, 0].magnitude, ... width=cc_hist[0].sampling_period.magnitude) >>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')') >>> plt.ylabel('cross-correlation histogram') >>> plt.axis('tight') >>> plt.show() """ # Check that the spike trains are binned with the same temporal # resolution if binned_spiketrain_i.shape[0] != 1 or \ binned_spiketrain_j.shape[0] != 1: raise ValueError("Spike trains must be one dimensional") # rescale to the common units # this does not change the data - only its representation binned_spiketrain_j.rescale(binned_spiketrain_i.units) if not np.isclose(binned_spiketrain_i._bin_size, binned_spiketrain_j._bin_size): raise ValueError("Bin sizes must be equal") bin_size = binned_spiketrain_i._bin_size left_edge_min = -binned_spiketrain_i.n_bins + 1 right_edge_max = binned_spiketrain_j.n_bins - 1 t_lags_shift = (binned_spiketrain_j._t_start - binned_spiketrain_i._t_start) / bin_size if not np.isclose(t_lags_shift, round(t_lags_shift)): # For example, if bin_size=1 ms, binned_spiketrain_i.t_start=0 ms, and # binned_spiketrain_j.t_start=0.5 ms then there is a global shift in # the binning of the spike trains. raise ValueError( "Binned spiketrains time shift is not multiple of bin_size") t_lags_shift = int(round(t_lags_shift)) # In the examples below we fix st2 and "move" st1. # Zero-lag is equal to `max(st1.t_start, st2.t_start)`. # Binned spiketrains (t_start and t_stop) with bin_size=1ms: # 1) st1=[3, 8] ms, st2=[1, 13] ms # t_start_shift = -2 ms # zero-lag is at 3 ms # 2) st1=[1, 7] ms, st2=[2, 9] ms # t_start_shift = 1 ms # zero-lag is at 2 ms # 3) st1=[1, 7] ms, st2=[4, 6] ms # t_start_shift = 3 ms # zero-lag is at 4 ms # Find left and right edges of unaligned (time-dropped) time signals if len(window) == 2 and np.issubdtype(type(window[0]), np.integer) \ and np.issubdtype(type(window[1]), np.integer): # ex. 1) lags range: [w[0] - 2, w[1] - 2] ms # ex. 2) lags range: [w[0] + 1, w[1] + 1] ms # ex. 3) lags range: [w[0] + 3, w[0] + 3] ms if window[0] >= window[1]: raise ValueError( "Window's left edge ({left}) must be lower than the right " "edge ({right})".format(left=window[0], right=window[1])) left_edge, right_edge = np.subtract(window, t_lags_shift) if left_edge < left_edge_min or right_edge > right_edge_max: raise ValueError( "The window exceeds the length of the spike trains") lags = np.arange(window[0], window[1] + 1, dtype=np.int32) cch_mode = 'pad' elif window == 'full': # cch computed for all the possible entries # ex. 1) lags range: [-6, 9] ms # ex. 2) lags range: [-4, 7] ms # ex. 3) lags range: [-2, 4] ms left_edge = left_edge_min right_edge = right_edge_max lags = np.arange(left_edge + t_lags_shift, right_edge + 1 + t_lags_shift, dtype=np.int32) cch_mode = window elif window == 'valid': lags = _CrossCorrHist.get_valid_lags(binned_spiketrain_i, binned_spiketrain_j) left_edge, right_edge = lags[(0, -1), ] cch_mode = window else: raise ValueError("Invalid window parameter") if binary: binned_spiketrain_i = binned_spiketrain_i.binarize() binned_spiketrain_j = binned_spiketrain_j.binarize() cch_builder = _CrossCorrHist(binned_spiketrain_i, binned_spiketrain_j, window=(left_edge, right_edge)) if method == 'memory': cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode) else: cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode) if border_correction: if window == 'valid': warnings.warn( "Border correction does not have any effect in " "'valid' window mode since there are no border effects!") else: cross_corr = cch_builder.border_correction(cross_corr) if kernel is not None: cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel) if cross_correlation_coefficient: cross_corr = cch_builder.cross_correlation_coefficient(cross_corr) normalization = 'normalized' if cross_correlation_coefficient else 'counts' annotations = dict(window=window, border_correction=border_correction, binary=binary, kernel=kernel is not None, normalization=normalization) annotations = dict(cch_parameters=annotations) # Transform the array count into an AnalogSignal t_start = pq.Quantity((lags[0] - 0.5) * bin_size, units=binned_spiketrain_i.units, copy=False) cch_result = neo.AnalogSignal( signal=np.expand_dims(cross_corr, axis=1), units=pq.dimensionless, t_start=t_start, sampling_period=binned_spiketrain_i.bin_size, copy=False, **annotations) return cch_result, lags # Alias for common abbreviation cch = cross_correlation_histogram @deprecated_alias(spiketrain_1='spiketrain_i', spiketrain_2='spiketrain_j') def spike_time_tiling_coefficient(spiketrain_i, spiketrain_j, dt=0.005 * pq.s): """ Calculates the Spike Time Tiling Coefficient (STTC) as described in [1]_ following their implementation in C. The STTC is a pairwise measure of correlation between spike trains. It has been proposed as a replacement for the correlation index as it presents several advantages (e.g. it's not confounded by firing rate, appropriately distinguishes lack of correlation from anti-correlation, periods of silence don't add to the correlation and it's sensitive to firing patterns). The STTC is calculated as follows: .. math:: STTC = 1/2((PA - TB)/(1 - PA*TB) + (PB - TA)/(1 - PB*TA)) Where `PA` is the proportion of spikes from train 1 that lie within `[-dt, +dt]` of any spike of train 2 divided by the total number of spikes in train 1, `PB` is the same proportion for the spikes in train 2; `TA` is the proportion of total recording time within `[-dt, +dt]` of any spike in train 1, TB is the same proportion for train 2. For :math:`TA = PB = 1`and for :math:`TB = PA = 1` the resulting :math:`0/0` is replaced with :math:`1`, since every spike from the train with :math:`T = 1` is within `[-dt, +dt]` of a spike of the other train. This is a Python implementation compatible with the elephant library of the original code by C. Cutts written in C and avaiable at: (https://github.com/CCutts/Detecting_pairwise_correlations_in_spike_trains/ blob/master/spike_time_tiling_coefficient.c) Parameters ---------- spiketrain_i, spiketrain_j: neo.SpikeTrain Spike trains to cross-correlate. They must have the same `t_start` and `t_stop`. dt: pq.Quantity. The synchronicity window is used for both: the quantification of the proportion of total recording time that lies `[-dt, +dt]` of each spike in each train and the proportion of spikes in `spiketrain_i` that lies `[-dt, +dt]` of any spike in `spiketrain_j`. Default : `0.005 * pq.s` Returns ------- index: float or np.nan The spike time tiling coefficient (STTC). Returns np.nan if any spike train is empty. References ---------- .. [1] Cutts, C. S., & Eglen, S. J. (2014). Detecting Pairwise Correlations in Spike Trains: An Objective Comparison of Methods and Application to the Study of Retinal Waves. Journal of Neuroscience, 34(43), 14288–14303. Notes ----- Alias: `sttc` """ def run_P(spiketrain_i, spiketrain_j): """ Check every spike in train 1 to see if there's a spike in train 2 within dt """ N2 = len(spiketrain_j) # Search spikes of spiketrain_i in spiketrain_j # ind will contain index of ind = np.searchsorted(spiketrain_j.times, spiketrain_i.times) # To prevent IndexErrors # If a spike of spiketrain_i is after the last spike of spiketrain_j, # the index is N2, however spiketrain_j[N2] raises an IndexError. # By shifting this index, the spike of spiketrain_i will be compared # to the last 2 spikes of spiketrain_j (negligible overhead). # Note: Not necessary for index 0 that will be shifted to -1, # because spiketrain_j[-1] is valid (additional negligible comparison) ind[ind == N2] = N2 - 1 # Compare to nearest spike in spiketrain_j BEFORE spike in spiketrain_i close_left = np.abs( spiketrain_j.times[ind - 1] - spiketrain_i.times) <= dt # Compare to nearest spike in spiketrain_j AFTER (or simultaneous) # spike in spiketrain_j close_right = np.abs( spiketrain_j.times[ind] - spiketrain_i.times) <= dt # spiketrain_j spikes that are in [-dt, dt] range of spiketrain_i # spikes are counted only ONCE (as per original implementation) close = close_left + close_right # Count how many spikes in spiketrain_i have a "partner" in # spiketrain_j return np.count_nonzero(close) def run_T(spiketrain): """ Calculate the proportion of the total recording time 'tiled' by spikes. """ N = len(spiketrain) time_A = 2 * N * dt # maximum possible time if N == 1: # for just one spike in train if spiketrain[0] - spiketrain.t_start < dt: time_A += -dt + spiketrain[0] - spiketrain.t_start if spiketrain[0] + dt > spiketrain.t_stop: time_A += -dt - spiketrain[0] + spiketrain.t_stop else: # if more than one spike in train # Vectorized loop of spike time differences diff = np.diff(spiketrain) diff_overlap = diff[diff < 2 * dt] # Subtract overlap time_A += -2 * dt * len(diff_overlap) + np.sum(diff_overlap) # check if spikes are within dt of the start and/or end # if so subtract overlap of first and/or last spike if (spiketrain[0] - spiketrain.t_start) < dt: time_A += spiketrain[0] - dt - spiketrain.t_start if (spiketrain.t_stop - spiketrain[N - 1]) < dt: time_A += -spiketrain[-1] - dt + spiketrain.t_stop T = time_A / (spiketrain.t_stop - spiketrain.t_start) return T.simplified.item() # enforce simplification, strip units N1 = len(spiketrain_i) N2 = len(spiketrain_j) if N1 == 0 or N2 == 0: index = np.nan else: TA = run_T(spiketrain_i) TB = run_T(spiketrain_j) PA = run_P(spiketrain_i, spiketrain_j) PA = PA / N1 PB = run_P(spiketrain_j, spiketrain_i) PB = PB / N2 # check if the P and T values are 1 to avoid division by zero # This only happens for TA = PB = 1 and/or TB = PA = 1, # which leads to 0/0 in the calculation of the index. # In those cases, every spike in the train with P = 1 # is within dt of a spike in the other train, # so we set the respective (partial) index to 1. if PA * TB == 1: if PB * TA == 1: index = 1. else: index = 0.5 + 0.5 * (PB - TA) / (1 - PB * TA) elif PB * TA == 1: index = 0.5 + 0.5 * (PA - TB) / (1 - PA * TB) else: index = 0.5 * (PA - TB) / (1 - PA * TB) + 0.5 * (PB - TA) / ( 1 - PB * TA) return index sttc = spike_time_tiling_coefficient @deprecated_alias(binned_st='binned_spiketrain', tau_max='max_tau') def spike_train_timescale(binned_spiketrain, max_tau): r""" Calculates the auto-correlation time of a binned spike train. Uses the definition of the auto-correlation time proposed in [[1]_, Eq. (6)]: .. math:: \tau_\mathrm{corr} = \int_{-\tau_\mathrm{max}}^{\tau_\mathrm{max}}\ \left[ \frac{\hat{C}(\tau)}{\hat{C}(0)} \right]^2 d\tau where :math:`\hat{C}(\tau) = C(\tau)-\nu\delta(\tau)` denotes the auto-correlation function excluding the Dirac delta at zero timelag. Parameters ---------- binned_spiketrain : elephant.conversion.BinnedSpikeTrain A binned spike train containing the spike train to be evaluated. max_tau : pq.Quantity Maximal integration time :math:`\tau_{max}` of the auto-correlation function. It needs to be a multiple of the `bin_size` of `binned_spiketrain`. Returns ------- timescale : pq.Quantity The auto-correlation time of the binned spiketrain with the same units as in the input. If `binned_spiketrain` has less than 2 spikes, a warning is raised and `np.nan` is returned. Notes ----- * :math:`\tau_\mathrm{max}` is a critical parameter: numerical estimates of the auto-correlation functions are inherently noisy. Due to the square in the definition above, this noise is integrated. Thus, it is necessary to introduce a cutoff for the numerical integration - this cutoff should be neither smaller than the true auto-correlation time nor much bigger. * The bin size of `binned_spiketrain` is another critical parameter as it defines the discretization of the integral :math:`d\tau`. If it is too big, the numerical approximation of the integral is inaccurate. References ---------- .. [1] Wieland, S., Bernardi, D., Schwalger, T., & Lindner, B. (2015). Slow fluctuations in recurrent networks of spiking neurons. Physical Review E, 92(4), 040901. """ if binned_spiketrain.get_num_of_spikes() < 2: warnings.warn("Spike train contains less than 2 spikes! " "np.nan will be returned.") return np.nan bin_size = binned_spiketrain._bin_size try: max_tau = max_tau.rescale(binned_spiketrain.units).item() except (AttributeError, ValueError): raise ValueError("max_tau needs units of time") # safe casting of max_tau/bin_size to integer max_tau_bins = int(round(max_tau / bin_size)) if not np.isclose(max_tau, max_tau_bins * bin_size): raise ValueError("max_tau has to be a multiple of the bin_size") cch_window = [-max_tau_bins, max_tau_bins] corrfct, bin_ids = cross_correlation_histogram( binned_spiketrain, binned_spiketrain, window=cch_window, cross_correlation_coefficient=True ) # Take only t > 0 values, in particular neglecting the delta peak. start_id = corrfct.time_index((bin_size / 2) * binned_spiketrain.units) corrfct = corrfct.magnitude.squeeze()[start_id:] # Calculate the timescale using trapezoidal integration integr = (corrfct / corrfct[0]) ** 2 timescale = 2 * integrate.trapz(integr, dx=bin_size) return pq.Quantity(timescale, units=binned_spiketrain.units, copy=False)
{ "content_hash": "2b6e2f8d2c67bfd5a476f93743679f89", "timestamp": "", "source": "github", "line_count": 1032, "max_line_length": 79, "avg_line_length": 40.007751937984494, "alnum_prop": 0.6117031583026545, "repo_name": "JuliaSprenger/elephant", "id": "4330f88664abea0a21ab5a9935fbb9277e612e62", "size": "41322", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "elephant/spike_train_correlation.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "1327156" } ], "symlink_target": "" }
import os import io import codecs import sys import sublime import platform import time import sublime_plugin import subprocess from subprocess import Popen, PIPE, STDOUT from os import path import socket import subprocess import errno from socket import error as socket_error from .utils import * out_panel = 'CS-Script' # plugin_dir = path.dirname(path.dirname(__file__)) # Runtime.cscs_path = path.join(plugin_dir, 'bin', 'cscs.exe') def syntaxer_print_config(): print('syntaxer_cscs: ', Runtime.cscs_path) print('syntaxer_path: ', Runtime.syntaxer_path) print('syntaxer_port: ', Runtime.syntaxer_port) def is_linux(): return os.name == 'posix' and platform.system() == 'Linux' # ================================================================================= # C# Syntax Server - service that any process can connect via socket and request # intellisense queries # ================================================================================= # ----------------- def start_syntax_server(): try: sublime.status_message('Starting syntaxer server...') args = ['dotnet'] args.append(Runtime.syntaxer_path) args.append('-listen') args.append('-port:'+str(Runtime.syntaxer_port)) args.append('-timeout:3000') args.append('-client:{0}'.format(os.getpid())) args.append('-cscs_path:{0}'.format(Runtime.cscs_path)) args = to_args(args) start = time.time() print('>'+args[0]) subprocess.Popen(args, shell=True) print('> Syntaxer server started:', time.time()-start, 'seconds') sublime.status_message('> Syntaxer server started...') except Exception as ex: print('Cannot start syntaxer server', ex) pass # Start the server as soon as possible. If the server is already running the next call will do nothing. # The server will terminate itself after the last client exits # start_syntax_server() # ----------------- def send_exit_request(): try: clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.connect(('localhost', Runtime.syntaxer_port)) clientsocket.send('-exit'.encode('utf-8')) except socket_error as serr: pass # ----------------- reconnect_count = 0 last_cscs_sent = None def set_engine_path(cscs_path): if cscs_path: Runtime.cscs_path = cscs_path reconnect_count = 0 # print('setting engine path') send_cscs_path(Runtime.cscs_path) # ----------------- def preload_engine(): try: args = [] args.append(Runtime.cscs_path) args.append('-preload') args = to_args(args) start = time.time() subprocess.Popen(args, shell=True) print('> Roslyn preloading done:', time.time()-start, 'seconds') except: pass # ----------------- def send_cscs_path(cscs_path): sublime.set_timeout_async(lambda: try_send_cscs_path(cscs_path), 3000) def try_send_cscs_path(cscs_path): global reconnect_count global last_cscs_sent reconnect_count = reconnect_count + 1 if last_cscs_sent == cscs_path: return try: start_time = time.time() clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.connect(('localhost', Runtime.syntaxer_port)) request = '-cscs_path:{0}'.format(cscs_path) clientsocket.send(request.encode('utf-8')) last_cscs_sent = cscs_path reconnect_count = 0 print('> Connected to syntaxer server:', time.time()-start_time, 'seconds') except socket_error as serr: # send_cscs_path may be issued before server is ready for the connection # so we may need to retry last_cscs_sent = None if reconnect_count < 5: print(serr) print('Cannot configure syntaxer server with cscs location. Schedule another attempt in 3 seconds.') sublime.set_timeout_async(try_send_cscs_path, 3000) else: # just give up. 5 sec should be enough to connect. Meaning there is something # more serious than server is not being ready. print(serr) print('Cannot configure syntaxer server with cscs location.') reconnect_count = 0 # ----------------- def send_pkill_request(pid, pname=None): try: clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.connect(('localhost', Runtime.syntaxer_port)) request = '-pkill\n-pid:{0}'.format(pid) if pname: request = request + '\n-pname:' + pname clientsocket.send(request.encode('utf-8')) except socket_error as serr: if serr.errno == errno.ECONNREFUSED: start_syntax_server() # ----------------- def send_popen_request(command): try: clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.connect(('localhost', Runtime.syntaxer_port)) request = '-popen:{0}'.format(command) clientsocket.send(request.encode('utf-8')) except socket_error as serr: if serr.errno == errno.ECONNREFUSED: start_syntax_server() # ----------------- def send_syntax_request(file, location, operation): try: clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.connect(('localhost', Runtime.syntaxer_port)) request = '-client:{0}\n-op:{1}\n-script:{2}\n-pos:{3}'.format(os.getpid(), operation, file, location) clientsocket.send(request.encode('utf-8')) response = clientsocket.recv(1024*1024) return response.decode('utf-8') except socket_error as serr: if serr.errno == errno.ECONNREFUSED: start_syntax_server() # print(serr) # ----------------- def send_formatting_request(file, location): return send_syntax_request(file, location, 'format') # ----------------- def send_completion_request(file, location): print('send_completion_request') return send_syntax_request(file, location, 'completion') # ----------------- def send_tooltip_request(file, location, hint, short_hinted_tooltips=True): args = 'tooltip:'+hint if short_hinted_tooltips: args = args + '\n-short_hinted_tooltips:1' else: args = args + '\n-short_hinted_tooltips:0' return send_syntax_request(file, location, args) # if short_hinted_tooltips: # return send_syntax_request(file, location, 'short_hinted_tooltips:1\n-tooltip:'+hint) # else: # return send_syntax_request(file, location, 'short_hinted_tooltips:0\n-tooltip:'+hint) # return send_syntax_request(file, location, 'tooltip:'+hint) # ----------------- def send_resolve_request(file, location): return send_syntax_request(file, location, 'resolve') # ----------------- def send_resolve_references(file, location): return send_syntax_request(file, location, 'references') # ----------------- def send_resolve_using_request(file, word): return send_syntax_request(file, -1, 'suggest_usings:'+word) # ----------------- def popen_redirect(args): all_args = to_args(args) cmd = '' for arg in all_args: cmd = cmd + '"'+arg+'" ' # print('popen_redirect: ' + cmd) return subprocess.Popen(all_args, stdout=subprocess.PIPE, shell=True) # ----------------- def popen_redirect_tofile(args, file): return subprocess.Popen(to_args(args), stdout=file, shell=True) def popen_tofile(args, file): return subprocess.Popen(to_args(args), stdout=file, shell=True) # ----------------- def run_doc_in_cscs(args, view, handle_line, on_done=None, nuget_warning = True): curr_doc = view.file_name() clear_and_print_result_header(curr_doc) if not path.exists(Runtime.cscs_path): print('Error: cannot find CS-Script launcher - ', Runtime.cscs_path) elif not curr_doc: print('Error: cannot find out the document path') else: clear_and_print_result_header(curr_doc) if nuget_warning and '//css_nuget' in view.substr(sublime.Region(0, view.size())): output_view_write_line(out_panel, "Resolving NuGet packages may take time...") def do(): all_args = ['dotnet', Runtime.cscs_path] for a in args: all_args.append(a) all_args.append(curr_doc) proc = popen_redirect(all_args) first_result = True for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): line = line.strip() if first_result: first_result = False clear_and_print_result_header(curr_doc) handle_line(line) if on_done: on_done() sublime.set_timeout(do, 100) # ----------------- def run_cscs(args, handle_line, on_done=None, header=None): output_view_show(out_panel) output_view_clear(out_panel) if header: output_view_write_line(out_panel, header) output_view_write_line(out_panel, "------------------------------------------------------------------------") if not path.exists(Runtime.cscs_path): print('Error: cannot find CS-Script launcher - ', Runtime.cscs_path) else: def do(): all_args = ['dotnet', Runtime.cscs_path] for a in args: all_args.append(a) proc = subprocess.Popen(to_args(all_args), stdout=subprocess.PIPE, shell=True) for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): handle_line(line.strip()) if on_done: on_done() sublime.set_timeout(do, 100) # ------------- def clear_and_print_result_header(curr_doc): output_view_show(out_panel) output_view_clear(out_panel) simple_output_header = sublime.load_settings("cs-script.sublime-settings").get('simple_output_header', False) if not simple_output_header: output_view_write_line(out_panel, 'Script: '+ curr_doc) output_view_write_line(out_panel, "------------------------------------------------------------------------")
{ "content_hash": "87ed34efad3c7e2f1191b6b8cd977e32", "timestamp": "", "source": "github", "line_count": 303, "max_line_length": 117, "avg_line_length": 34.59075907590759, "alnum_prop": 0.5774258181471233, "repo_name": "oleg-shilo/cs-script-sublime", "id": "eb805f3a7aa944fd62076b54425949944000783c", "size": "10481", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "imports/syntaxer.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "936" }, { "name": "Python", "bytes": "99915" }, { "name": "Shell", "bytes": "577" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function import os import sys import setuptools if sys.version_info[:2] < (2, 6): sys.exit("virtualenv requires Python 2.6 or higher.") base_dir = os.path.dirname(__file__) # Fetch the metadata about = {} with open(os.path.join(base_dir, "virtualenv", "__about__.py")) as f: exec(f.read(), about) # Build up the long description with open(os.path.join(base_dir, "docs", "index.rst")) as f: long_description = f.read() long_description = long_description.strip().split("split here", 1)[0] with open(os.path.join(base_dir, "docs", "changes.rst")) as f: long_description = "\n\n".join([long_description, f.read()]) setuptools.setup( name=about["__title__"], version=about["__version__"], description=about["__summary__"], long_description=long_description, license=about["__license__"], url=about["__uri__"], author=about["__author__"], author_email=about["__email__"], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", ], packages=[ "virtualenv", "virtualenv.builders", "virtualenv.flavors", "virtualenv._scripts", "virtualenv._wheels", ], package_data={ "virtualenv._scripts": ["activate.*", "deactivate.bat"], "virtualenv._wheels": ["*.whl"], }, entry_points={ "console_scripts": [ "virtualenv=virtualenv.__main__:main", ], }, install_requires=[ "click<6.0", ], zip_safe=False, )
{ "content_hash": "2fd7a36ffca4133141603ab1ef63a320", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 73, "avg_line_length": 25.088607594936708, "alnum_prop": 0.5822401614530777, "repo_name": "ionelmc/virtualenv", "id": "5610a46e1039a5fdb6f4cd147eb86df23043111e", "size": "1982", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "2286" }, { "name": "C", "bytes": "963" }, { "name": "PowerShell", "bytes": "8325" }, { "name": "Python", "bytes": "81322" }, { "name": "Shell", "bytes": "6048" } ], "symlink_target": "" }
from django.apps import AppConfig from django.db.models.signals import post_save, post_delete from django.utils.translation import ugettext_lazy as _ class ImprovetextAppConfig(AppConfig): name = "bettertexts" verbose_name = _("Better texts") def ready(self): from .models import UserRating from .signals import calculate_ratings post_save.connect(calculate_ratings, sender=UserRating) post_delete.connect(calculate_ratings, sender=UserRating)
{ "content_hash": "81c066d39d04924a8eb412f51fdcfa66", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 65, "avg_line_length": 32.8, "alnum_prop": 0.7357723577235772, "repo_name": "citizenline/citizenline", "id": "17fb6c33472141ba16850ff39126d4566231a2ed", "size": "492", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bettertexts/apps.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3470" }, { "name": "HTML", "bytes": "22853" }, { "name": "JavaScript", "bytes": "8389" }, { "name": "Python", "bytes": "86277" }, { "name": "Ruby", "bytes": "198" } ], "symlink_target": "" }
from pluginsmanager.model.input import Input from pluginsmanager.model.lv2.lv2_port_mixing import Lv2PortMixing class Lv2Input(Lv2PortMixing, Input): """ Representation of a Lv2 `input audio port`_ instance. For general input use, see :class:`.Input` class documentation. .. _input audio port: http://lv2plug.in/ns/lv2core/#InputPort :param Lv2Effect effect: Effect that contains the input :param dict data: *input audio port* json representation """ def __init__(self, effect, data): super(Lv2Input, self).__init__(effect) self._data = data @property def data(self): return self._data
{ "content_hash": "a4e7ec111725d6e33c56d943f7021912", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 67, "avg_line_length": 28.565217391304348, "alnum_prop": 0.6818873668188736, "repo_name": "PedalPi/PluginsManager", "id": "1875ff37a5ade87de66ceb2328959249383760a5", "size": "1234", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pluginsmanager/model/lv2/lv2_input.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "2251" }, { "name": "Python", "bytes": "352748" } ], "symlink_target": "" }
""" Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kubernetes.client from kubernetes.client.rest import ApiException from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource class TestV1SecretEnvSource(unittest.TestCase): """ V1SecretEnvSource unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1SecretEnvSource(self): """ Test V1SecretEnvSource """ # FIXME: construct object with mandatory attributes with example values #model = kubernetes.client.models.v1_secret_env_source.V1SecretEnvSource() pass if __name__ == '__main__': unittest.main()
{ "content_hash": "94faa798c437468966c340150ce76880", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 105, "avg_line_length": 22.714285714285715, "alnum_prop": 0.6939203354297694, "repo_name": "mbohlool/client-python", "id": "f236edf90b1727c6a02f2e5a4dda3a4e5719a251", "size": "971", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kubernetes/test/test_v1_secret_env_source.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "8417639" }, { "name": "Shell", "bytes": "16830" } ], "symlink_target": "" }
import hashlib import redis from django.conf import settings from django.test import TestCase as DjangoTestCase from smsgateway import send from smsgateway.backends.redistore import RedistoreBackend from smsgateway.models import SMS from smsgateway.sms import SMSRequest from smsgateway.utils import check_cell_phone_number, truncate_sms req_data = { 'to': '+32000000001;+32000000002;+32000000003', 'msg': 'text of the message', 'signature': 'cropped to 11 chars' } class RedistoreBackendTestCase(DjangoTestCase): def setUp(self): self.backend = RedistoreBackend() self.conf = settings.SMSGATEWAY_ACCOUNTS['redistore'] def test_init(self): for key in ['redis_key_prefix', 'redis_pool', 'redis_conn', 'reference', 'sender', 'sms_data_iter', 'sent_smses']: self.assert_(key in self.backend.__dict__.keys()) def test_initialize_without_sms_request(self): self.assert_(self.backend._initialize(None, self.conf) == False) def test_initialize_with_sms_request(self): sms_request = SMSRequest(**req_data) self.assert_(self.backend._initialize(sms_request, self.conf) == True) def test_get_sms_list(self): sms_list = self.backend._get_sms_list(SMSRequest(**req_data)) self.assert_(len(sms_list) == 3) for to, sms in zip(req_data['to'].split(';'), sms_list): self.assert_(sms.to[0] == check_cell_phone_number(to)) self.assert_(sms.msg == truncate_sms(req_data['msg'])) self.assertEqual(sms.signature, req_data['signature'][:len(sms.signature)]) class RedistoreSendSingleSMSTestCase(DjangoTestCase): def setUp(self): self.conf = settings.SMSGATEWAY_ACCOUNTS['redistore'] self.rdb = redis.Redis(host=self.conf['host'], port=self.conf['port'], db=self.conf['dbn'], password=self.conf['pwd']) self.assert_(SMS.objects.count() == 0) send('+32000000001', 'testing message', 'the signature', using='redistore') self.assert_(SMS.objects.count() == 1) self.sms = SMS.objects.get(pk=1) def tearDown(self): for key in self.rdb.keys('%s*' % self.conf['key_prefix']): self.rdb.delete(key) self.assert_(len(self.rdb.keys('%s*' % self.conf['key_prefix'])) == 0) SMS.objects.all().delete() def test_single_sms_object_values(self): self.assert_(self.sms.content == 'testing message') self.assert_(self.sms.to == '+32000000001') self.assert_(self.sms.sender == 'the signature'[:len(self.sms.sender)]) def test_redis_keys(self): key = hashlib.md5(self.sms.gateway_ref).hexdigest() queue_key = '%ssmsreq:%s' % (self.conf['key_prefix'], key) allqueues_key = '%soutq' % self.conf['key_prefix'] sms_key = '%ssms:%s:0' % (self.conf['key_prefix'], key) self.assertTrue(self.rdb.exists(queue_key)) self.assertTrue(self.rdb.exists(allqueues_key)) self.assertTrue(self.rdb.exists(sms_key)) self.assert_(self.rdb.llen(allqueues_key) == 1) self.assert_(self.rdb.lpop(allqueues_key) == queue_key) self.assert_(self.rdb.llen(queue_key) == 1) self.assert_(self.rdb.lpop(queue_key) == sms_key) something = self.rdb.hgetall(sms_key) self.assertEqual( self.rdb.hgetall(sms_key), { 'source_addr_ton': '0', 'source_addr': '15185', 'dest_addr_ton': '1', 'destination_addr': '+32000000001', 'esme_vrfy_seqn': '-1', 'short_message': 'testing message'}) class RedistoreSendMultipleSMSTestCase(DjangoTestCase): def setUp(self): self.conf = settings.SMSGATEWAY_ACCOUNTS['redistore'] self.rdb = redis.Redis(host=self.conf['host'], port=self.conf['port'], db=self.conf['dbn'], password=self.conf['pwd']) self.assert_(SMS.objects.count() == 0) send(req_data['to'], req_data['msg'], req_data['signature'], using='redistore') self.assert_(SMS.objects.count() == 3) self.smses = SMS.objects.all() def tearDown(self): for key in self.rdb.keys('%s*' % self.conf['key_prefix']): self.rdb.delete(key) self.assert_(len(self.rdb.keys('%s*' % self.conf['key_prefix'])) == 0) SMS.objects.all().delete() def test_multiple_sms_object_values(self): for to, sms in zip (req_data['to'].split(';'), self.smses): self.assert_(sms.to == check_cell_phone_number(to)) self.assert_(sms.content == truncate_sms(req_data['msg'])) self.assertEqual(sms.sender, req_data['signature'][:len(sms.sender)]) self.assert_(sms.backend == 'redistore')
{ "content_hash": "a84f09509dc50f0976ddb7c5fa6ed0fd", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 79, "avg_line_length": 41.875, "alnum_prop": 0.5826865671641791, "repo_name": "peterayeni/django-smsgateway", "id": "18a574ff3a44ac32001a7a8781c1d138db7b152b", "size": "5049", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "smsgateway/tests/backends/redistore.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1262" }, { "name": "CSS", "bytes": "5496" }, { "name": "HTML", "bytes": "82179" }, { "name": "Java", "bytes": "531577" }, { "name": "Modelica", "bytes": "5760" }, { "name": "Python", "bytes": "143480" }, { "name": "Shell", "bytes": "1564" } ], "symlink_target": "" }
DEBUG = False # # Observation point types # OP_BEFORE = 0 OP_AFTER = 1
{ "content_hash": "f20e3d87a069a2f3bf6040f507f1319e", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 25, "avg_line_length": 10.285714285714286, "alnum_prop": 0.6666666666666666, "repo_name": "schieb/angr", "id": "3a0928fc9bef5a2387338ed8884bd54a02911fe2", "size": "72", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "angr/analyses/reaching_definitions/constants.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "6375" }, { "name": "C++", "bytes": "39522" }, { "name": "Dockerfile", "bytes": "493" }, { "name": "Makefile", "bytes": "739" }, { "name": "Python", "bytes": "4987778" } ], "symlink_target": "" }
''' File created on Aug 7, 2012 @author: jhc02 ''' # Module that provides BWA tools as functions from subprocess import call from subprocess import check_output import os import platform gatk_location = '/usr/local/bio/gatk/GenomeAnalysisTK.jar' java_exec = 'java' def init_java(): global java_exec this_system = platform.system() if 'Darwin' in this_system: print "Running on a Mac, setting java exec to the correct JAVA_HOME location" java_home = check_output(['/usr/libexec/java_home']).strip() java_exec = os.path.join(java_home, 'bin', 'java') print 'New java executable value: ' + java_exec def set_gatk_location(location): global gatk_location gatk_location = location def get_gatk_location(): return gatk_location def create_realigner_targets(ref_genome_file, bam_file, output_file = None): init_java() print '\tGenome Analysis ToolKit - Realigner Target Creator...[' + java_exec + ' -jar ' + gatk_location + ']\n' if output_file == None: output_file = bam_file.rsplit('.',1)[0] + '.intervals' status = call([java_exec, '-jar', gatk_location, '-T', 'RealignerTargetCreator', '-R', ref_genome_file, '-I', bam_file, '-o', output_file]) print '\tGenome Analysis ToolKit - Realigner Target Creator...DONE...Status code: ' + str(status) + '\n\n' #return (status, output_file) return (status, output_file) def create_realigner_targets_with_pre_processing(ref_genome_file, bam_file, output_file = None): init_java() # TODO: Check approach of this extra function # Check if the provided ref_genome_file has a .fa or .fasta extension. # If it doesn't, we create a copy that has the correct extension for gatk # and then run the pre-processing. # TODO: ???? - is this the correct approach? It results in dependecies on # other processes. Maybe these dependecies should be made explicit by doing # this thorugh the co-ordination forms element of the code? print '\tGenome Analysis ToolKit - Realigner Target Creator...[' + java_exec + ' -jar ' + gatk_location + ']\n' if output_file == None: output_file = bam_file.rsplit('.',1)[0] + '.intervals' status = call([java_exec, '-jar', gatk_location, '-T', 'RealignerTargetCreator', '-R', ref_genome_file, '-I', bam_file, '-o', output_file]) print '\tGenome Analysis ToolKit - Realigner Target Creator...DONE...Status code: ' + str(status) + '\n\n' #return (status, output_file) return (output_file) def indel_realigner(ref_genome_file, bam_file, target_intervals, output_file = None): init_java() print '\tGenome Analysis ToolKit - Indel Realigner...\n' if output_file == None: output_file = bam_file.rsplit('.',1)[0] + '_REALIGNED.' + bam_file.rsplit('.',1)[1] status = call([java_exec, '-jar', gatk_location, '-T', 'IndelRealigner', '-R', ref_genome_file, '-I', bam_file, '-targetIntervals', target_intervals, '-o', output_file]) print '\tGenome Analysis ToolKit - Indel Realigner...DONE...Status code: ' + str(status) + '\n\n' return (status, output_file) def base_recalibrator(ref_genome_file, bam_file, known_sites_file, output_file = None): init_java() print '\tGenome Analysis ToolKit - Base Recalibrator...\n' if output_file == None: output_file = bam_file.rsplit('.',1)[0] + '_recal.table' status = call([java_exec, '-jar', gatk_location, '-T', 'BaseRecalibrator', '-R', ref_genome_file, '-I', bam_file, '-knownSites:VCF', known_sites_file, '-o', output_file]) print '\tGenome Analysis ToolKit - Base Recalibrator...DONE...Status code: ' + str(status) + '\n\n' return (status, output_file) def print_reads(ref_genome_file, bam_file, recalibration_table_file, output_file = None): init_java() print '\tGenome Analysis ToolKit - Print Reads...\n' if output_file == None: output_file = bam_file.rsplit('.',1)[0] + '_final.' + bam_file.rsplit('.',1)[1] status = call([java_exec, '-jar', gatk_location, '-T', 'PrintReads', '-BQSR', recalibration_table_file, '-R', ref_genome_file, '-I', bam_file, '-o', output_file]) print '\tGenome Analysis ToolKit - PrintReads...DONE...Status code: ' + str(status) + '\n\n' return (status, output_file)
{ "content_hash": "0bc9c1d6a61fdcd8dadbe1bd159b1ec0", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 174, "avg_line_length": 45.27368421052632, "alnum_prop": 0.649616368286445, "repo_name": "london-escience/libhpc-cf", "id": "d03b599ee3d945e73ad8938a65350754589c44c0", "size": "6490", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "libhpc/wrapper/bio/gatk.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "122774" } ], "symlink_target": "" }
import argparse import sys from wd_curator import WikiWorker def arg_parser(args): parser = argparse.ArgumentParser( description='Run a worker for a given wiki') parser.add_argument('--path', '-p', nargs='?', required=True, help='Path to json file of the wiki') parser.add_argument('--once', '-o', action='store_true', required=False, help='Break after an one-time run') return parser.parse_known_args(args)[0] def main(argv=None): if argv is None: argv = sys.argv[1:] args = arg_parser(argv) wiki = WikiWorker.from_config(open(args.path, 'r')) wiki.run(forever=not args.once) if __name__ == "__main__": try: main() except KeyboardInterrupt: sys.exit()
{ "content_hash": "622617d165eeb1c26d62af246b005488", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 76, "avg_line_length": 25.225806451612904, "alnum_prop": 0.59846547314578, "repo_name": "Ladsgroup/Wikidata-curator", "id": "353d5b21af06e73dff27dfacf6272ec4630c66af", "size": "782", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/wiki_worker.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7509" } ], "symlink_target": "" }
import unittest import sys sys.path.append('LeetCode/_051_100') from _070_ClimbingStairs import Solution class Test_070_ClimbingStairs(unittest.TestCase): def test_climbStairs_1(self): solution = Solution() self.assertEqual(1, solution.climbStairs(1)) def test_climbStairs_2(self): solution = Solution() self.assertEqual(2, solution.climbStairs(2)) def test_climbStairs_3(self): solution = Solution() self.assertEqual(3, solution.climbStairs(3)) def test_climbStairs_4(self): solution = Solution() self.assertEqual(5, solution.climbStairs(4)) def test_climbStairs_5(self): solution = Solution() self.assertEqual(8, solution.climbStairs(5)) def test_climbStairs_6(self): solution = Solution() self.assertEqual(13, solution.climbStairs(6)) def test_climbStairs_7(self): solution = Solution() self.assertEqual(21, solution.climbStairs(7))
{ "content_hash": "b8901212fa3b1fc52eba3df2b73341b5", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 53, "avg_line_length": 28.142857142857142, "alnum_prop": 0.6639593908629442, "repo_name": "BigEgg/LeetCode", "id": "55a1dac2c47aec70c117e355477f080e5c5590c9", "size": "985", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Python/LeetCode.Test/_051_100/Test_070_ClimbingStairs.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "6405" }, { "name": "C#", "bytes": "460435" }, { "name": "C++", "bytes": "49261" }, { "name": "HTML", "bytes": "1371" }, { "name": "Java", "bytes": "22777" }, { "name": "JavaScript", "bytes": "41935" }, { "name": "Python", "bytes": "167427" }, { "name": "Smalltalk", "bytes": "1174" } ], "symlink_target": "" }
''' Created by auto_sdk on 2015.08.20 ''' from top.api.base import RestApi class TopAuthTokenRefreshRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.refresh_token = None def getapiname(self): return 'taobao.top.auth.token.refresh'
{ "content_hash": "7c5035ecc1085cf7bb3fad16b6d78d0f", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 55, "avg_line_length": 27.727272727272727, "alnum_prop": 0.7245901639344262, "repo_name": "BillBillBillBill/WishTalk-server", "id": "fa055f8fd9a119e6bb2351a092fabe3523eb9e39", "size": "305", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "WishTalk/top/api/rest/TopAuthTokenRefreshRequest.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "760394" }, { "name": "HTML", "bytes": "74807" }, { "name": "JavaScript", "bytes": "537618" }, { "name": "Python", "bytes": "73846" } ], "symlink_target": "" }
import _plotly_utils.basevalidators class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator): def __init__( self, plotly_name="minexponent", parent_name="scatter3d.line.colorbar", **kwargs ): super(MinexponentValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), min=kwargs.pop("min", 0), **kwargs, )
{ "content_hash": "04456bf1f4a25cff07350d65c6d7dd64", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 88, "avg_line_length": 34.07142857142857, "alnum_prop": 0.6058700209643606, "repo_name": "plotly/plotly.py", "id": "5a6df8218c995bd85e1fba25c9f0152e082d5368", "size": "477", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packages/python/plotly/plotly/validators/scatter3d/line/colorbar/_minexponent.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "545" }, { "name": "JavaScript", "bytes": "2074" }, { "name": "PostScript", "bytes": "565328" }, { "name": "Python", "bytes": "31506317" }, { "name": "TypeScript", "bytes": "71337" } ], "symlink_target": "" }
from octo.plugin import OctoPlugin class PluginTwo(OctoPlugin): pass
{ "content_hash": "2d96333af4996e94af80d961f43218eb", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 34, "avg_line_length": 17.75, "alnum_prop": 0.8169014084507042, "repo_name": "zoni/octo", "id": "93a26e40ca6a35b864578bafb3e63d75c619444d", "size": "71", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/plugins/plugin2.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "33940" } ], "symlink_target": "" }
""" Histogram with a Global Mean Overlay ------------------------------------ This example shows a histogram with a global mean overlay. """ # category: histograms import altair as alt from vega_datasets import data source = data.movies.url bar = alt.Chart(source).mark_bar().encode( alt.X('IMDB_Rating:Q', bin=True, axis=None), alt.Y('count()') ) rule = alt.Chart(source).mark_rule(color='red').encode( x='mean(IMDB_Rating):Q', size=alt.value(5) ) bar + rule
{ "content_hash": "121198bd06be1a671578368d1801f424", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 58, "avg_line_length": 21.818181818181817, "alnum_prop": 0.6333333333333333, "repo_name": "ellisonbg/altair", "id": "cbff9ef036c4ba4fd3667387bb7d937a2dde9638", "size": "480", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "altair/vegalite/v2/examples/histogram_with_a_global_mean_overlay.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "136763" }, { "name": "Makefile", "bytes": "312" }, { "name": "Python", "bytes": "1150719" } ], "symlink_target": "" }
""" A very quick and dirty DHCP server This is currently missing lots of features and sort of limited with respect to subnets and so on, but it's a start. """ from pox.core import core import pox.openflow.libopenflow_01 as of import pox.lib.packet as pkt from pox.lib.addresses import IPAddr,EthAddr,parse_cidr from pox.lib.addresses import IP_BROADCAST, IP_ANY from pox.lib.revent import * from pox.lib.util import dpid_to_str log = core.getLogger() class DHCPLease (Event): """ Raised when a lease is given Call nak() to abort this lease """ def __init__ (self, host_mac, ip): super(DHCPLease, self).__init__() self.host_mac = host_mac self.ip = ip self._nak = False def nak (self): self._nak = True class AddressPool (object): """ Superclass for DHCP address pools Note that it's just a subset of a list (thus, you can always just use a list as a pool). The one exception is an optional "subnet_mask" hint. It probably makes sense to change this abstraction so that we can more easily return addresses from multiple ranges, and because some things (e.g., getitem) are potentially difficult to implement and not particularly useful (since we only need to remove a single item at a time). """ def __init__ (self): """ Initialize this pool. """ pass def __contains__ (self, item): """ Is this IPAddr in the pool? """ return False def append (self, item): """ Add this IP address back into the pool """ pass def remove (self, item): """ Remove this IPAddr from the pool """ pass def __len__ (self): """ Returns number of IP addresses in the pool """ return 0 def __getitem__ (self, index): """ Get an IPAddr from the pool. Note that this will only be called with index = 0! """ pass class SimpleAddressPool (AddressPool): """ Simple AddressPool for simple subnet based pools. """ def __init__ (self, network = "192.168.0.0/24", first = 1, last = None, count = None): """ Simple subnet-based address pool Allocates count IP addresses out of network/network_size, starting with the first'th. You may specify the end of the range with either last (to specify the last'th address to use) or count to specify the number to use. If both are None, use up to the end of all legal addresses. Example for all of 192.168.x.x/16: SimpleAddressPool("192.168.0.0/16", 1, 65534) """ network,network_size = parse_cidr(network) self.first = first self.network_size = network_size self.host_size = 32-network_size self.network = IPAddr(network) if last is None and count is None: self.last = (1 << self.host_size) - 2 elif last is not None: self.last = last elif count is not None: self.last = self.first + count - 1 else: raise RuntimeError("Cannot specify both last and count") self.removed = set() if self.count <= 0: raise RuntimeError("Bad first/last range") if first == 0: raise RuntimeError("Can't allocate 0th address") if self.host_size < 0 or self.host_size > 32: raise RuntimeError("Bad network") if IPAddr(self.last | self.network.toUnsigned()) not in self: raise RuntimeError("Bad first/last range") def __repr__ (self): return str(self) def __str__ (self): t = self.network.toUnsigned() t = (IPAddr(t|self.first),IPAddr(t|self.last)) return "<Addresses from %s to %s>" % t @property def subnet_mask (self): return IPAddr(((1<<self.network_size)-1) << self.host_size) @property def count (self): return self.last - self.first + 1 def __contains__ (self, item): item = IPAddr(item) if item in self.removed: return False n = item.toUnsigned() mask = (1<<self.host_size)-1 nm = (n & mask) | self.network.toUnsigned() if nm != n: return False if (n & mask) == mask: return False if (n & mask) < self.first: return False if (n & mask) > self.last: return False return True def append (self, item): item = IPAddr(item) if item not in self.removed: if item in self: raise RuntimeError("%s is already in this pool" % (item,)) else: raise RuntimeError("%s does not belong in this pool" % (item,)) self.removed.remove(item) def remove (self, item): item = IPAddr(item) if item not in self: raise RuntimeError("%s not in this pool" % (item,)) self.removed.add(item) def __len__ (self): return (self.last-self.first+1) - len(self.removed) def __getitem__ (self, index): if index < 0: raise RuntimeError("Negative indices not allowed") if index >= len(self): raise IndexError("Item does not exist") c = self.first # Use a heuristic to find the first element faster (we hope) # Note this means that removing items changes the order of # our "list". c += len(self.removed) while c > self.last: c -= self.count while True: addr = IPAddr(c | self.network.toUnsigned()) if addr not in self.removed: assert addr in self index -= 1 if index < 0: return addr c += 1 if c > self.last: c -= self.count class DHCPPacketContextBase (object): """ Superclass for context for handling DHCP messages Along with overriding the methods, a subclass must expose the following attributes: * .parsed - the incoming DHCP message as parsed packets * .client_eth - Ethernet address of client """ def send (self, ipp): """ Should send an IP packet object """ raise NotImplementedError() def __str__ (self): """ Format the context for logging purposes """ return "<DHCP Request>" class DHCPDBase (EventMixin): """ DHCP server base class To create a working version, you must: * Get incoming DHCP packets somehow (as parsed packet objects) * Wrap them in a subclass of DHCPPacketContext * Call _process_message(), passing it the context """ _eventMixin_events = set([DHCPLease]) def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None): def fix_addr (addr, backup): if addr is None: return None if addr == (): return IPAddr(backup) return IPAddr(addr) self.ip_addr = IPAddr(ip_address) self.router_addr = fix_addr(router_address, ip_address) self.dns_addr = fix_addr(dns_address, self.router_addr) if pool is None: self.pool = [IPAddr("192.168.0."+str(x)) for x in range(100,199)] self.subnet = IPAddr(subnet or "255.255.255.0") else: self.pool = pool self.subnet = subnet if hasattr(pool, 'subnet_mask'): self.subnet = pool.subnet_mask if self.subnet is None: raise RuntimeError("You must specify a subnet mask or use a " "pool with a subnet hint") self.lease_time = 60 * 60 # An hour #TODO: Actually make them expire :) self.offers = {} # Eth -> IP we offered self.leases = {} # Eth -> IP we leased if self.ip_addr in self.pool: log.debug("Removing my own IP (%s) from address pool", self.ip_addr) self.pool.remove(self.ip_addr) def _get_pool (self, ctxt): """ Get an IP pool for this event. Return None to not issue an IP. You should probably log this. """ return self.pool def _process_message (self, ctxt): """ Subclasses should call this to process incoming DHCP messages """ ipp = ctxt.parsed.find('ipv4') if not ipp or not ipp.parsed: return if ipp.dstip not in (IP_ANY,IP_BROADCAST,self.ip_addr): return # Is it full and proper DHCP? nwp = ipp.payload if not nwp or not nwp.parsed or not isinstance(nwp, pkt.udp): return if nwp.srcport != pkt.dhcp.CLIENT_PORT: return if nwp.dstport != pkt.dhcp.SERVER_PORT: return p = nwp.payload if not p: log.debug("%s: no packet", str(ctxt)) return if not isinstance(p, pkt.dhcp): log.debug("%s: packet is not DHCP", str(ctxt)) return if not p.parsed: log.debug("%s: DHCP packet not parsed", str(ctxt)) return if p.op != p.BOOTREQUEST: return t = p.options.get(p.MSG_TYPE_OPT) if t is None: return pool = self._get_pool(ctxt) if pool is None: return if t.type == p.DISCOVER_MSG: self.exec_discover(ctxt, p, pool) elif t.type == p.REQUEST_MSG: self.exec_request(ctxt, p, pool) elif t.type == p.RELEASE_MSG: self.exec_release(ctxt, p, pool) def reply (self, ctxt, msg): orig = ctxt.parsed.find('dhcp') broadcast = (orig.flags & orig.BROADCAST_FLAG) != 0 msg.op = msg.BOOTREPLY msg.chaddr = ctxt.client_eth msg.htype = 1 msg.hlen = 6 msg.xid = orig.xid msg.add_option(pkt.DHCP.DHCPServerIdentifierOption(self.ip_addr)) ipp = pkt.ipv4(srcip = self.ip_addr) ipp.dstip = ctxt.parsed.find('ipv4').srcip if broadcast: ipp.dstip = IP_BROADCAST ipp.protocol = ipp.UDP_PROTOCOL udpp = pkt.udp() udpp.srcport = pkt.dhcp.SERVER_PORT udpp.dstport = pkt.dhcp.CLIENT_PORT udpp.payload = msg ipp.payload = udpp ctxt.send(ipp) def nak (self, ctxt, msg = None): if msg is None: msg = pkt.dhcp() msg.add_option(pkt.DHCP.DHCPMsgTypeOption(msg.NAK_MSG)) msg.siaddr = self.ip_addr self.reply(ctxt, msg) def exec_release (self, ctxt, p, pool): src = ctxt.client_eth if src != p.chaddr: log.warn("%s tried to release %s with bad chaddr" % (src,p.ciaddr)) return if self.leases.get(p.chaddr) != p.ciaddr: log.warn("%s tried to release unleased %s" % (src,p.ciaddr)) return del self.leases[p.chaddr] pool.append(p.ciaddr) log.info("%s released %s" % (src,p.ciaddr)) def exec_request (self, ctxt, p, pool): if not p.REQUEST_IP_OPT in p.options: # Uhhh... return wanted_ip = p.options[p.REQUEST_IP_OPT].addr src = ctxt.client_eth got_ip = None if src in self.leases: if wanted_ip != self.leases[src]: pool.append(self.leases[src]) del self.leases[src] else: got_ip = self.leases[src] if got_ip is None: if src in self.offers: if wanted_ip != self.offers[src]: pool.append(self.offers[src]) else: got_ip = self.offers[src] del self.offers[src] if got_ip is None: if wanted_ip in pool: pool.remove(wanted_ip) got_ip = wanted_ip if got_ip is None: log.warn("%s asked for un-offered %s", src, wanted_ip) self.nak(ctxt) return assert got_ip == wanted_ip self.leases[src] = got_ip ev = DHCPLease(src, got_ip) self.raiseEvent(ev) if ev._nak: self.nak(ctxt) return log.info("Leased %s to %s" % (got_ip, src)) reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.ACK_MSG)) reply.yiaddr = wanted_ip reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(ctxt, reply) def exec_discover (self, ctxt, p, pool): reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.OFFER_MSG)) src = ctxt.client_eth if src in self.leases: offer = self.leases[src] del self.leases[src] self.offers[src] = offer else: offer = self.offers.get(src) if offer is None: if len(pool) == 0: log.error("Out of IP addresses") self.nak(ctxt) return offer = pool[0] if p.REQUEST_IP_OPT in p.options: wanted_ip = p.options[p.REQUEST_IP_OPT].addr if wanted_ip in pool: offer = wanted_ip pool.remove(offer) self.offers[src] = offer reply.yiaddr = offer reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(ctxt, reply) def fill (self, wanted_opts, msg): """ Fill out some options in msg """ if msg.SUBNET_MASK_OPT in wanted_opts: msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet)) if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None: msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr)) if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None: msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr)) msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time)) class OpenFlowDHCPPacketContext (DHCPPacketContextBase): def __init__ (self, event): self.event = event self.parsed = event.parsed self.client_eth = self.parsed.src def __str__ (self): return str(self.event.connection) def send (self, ipp): # Use DPID as EthAddr of server server_addr = EthAddr(dpid_to_str(self.event.dpid,True).split("|")[0].replace("-",":")) ethp = pkt.ethernet(src=server_addr, dst=self.event.parsed.src) ethp.type = pkt.ethernet.IP_TYPE if ipp.dstip == IP_BROADCAST: ethp.dst = pkt.ETHERNET.ETHER_BROADCAST ethp.payload = ipp po = of.ofp_packet_out(data=ethp.pack()) po.actions.append(of.ofp_action_output(port=self.event.port)) self.event.connection.send(po) class DHCPD (DHCPDBase): """ Turns OpenFlow switches into DHCP servers """ _servers = [] def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None, install_flow = True, dpid = None, ports = None): super(DHCPD, self).__init__(ip_address, router_address, dns_address, pool, subnet) self._install_flow = install_flow if dpid is None: self.dpid = None else: try: dpid = int(dpid) except: dpid = util.str_to_dpid(dpid) self.dpid = dpid if ports is None: self.ports = None else: self.ports = set(ports) if self.ports: assert self.dpid is not None # Doesn't make sense self._servers.append(self) core.openflow.addListeners(self) def _handle_ConnectionUp (self, event): if self.dpid is not None and self.dpid != event.dpid: return if self._install_flow: msg = self._get_flow_mod() event.connection.send(msg) def _handle_PacketIn (self, event): # Is it to us? (Or at least not specifically NOT to us...) if self.dpid is not None and self.dpid != event.dpid: return if self.ports: for p in self.ports: if p == event.port: break if p in event.connection.ports: if event.connection.ports[p].port_no == event.port: break else: return ctxt = OpenFlowDHCPPacketContext(event) self._process_message(ctxt) @classmethod def get_server_for_port (cls, dpid, port): """ Given a dpid.port, returns DHCPD instance responsible for it or None If there is a server, but the connection to the relevant switch is down, returns None. """ for s in cls.servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: return s port_no = conn.ports.get(port) if port_no is None: continue port_no = port_no.port_no for p in s.ports: p = conn.ports.get(p) if p is None: continue if p.port_no == port_no: return s return None @classmethod def get_ports_for_dpid (cls, dpid): """ Given a dpid, returns all port,server that are configured for it If the switch is disconnected, returns None. """ r = set() for s in cls._servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: for p in conn.ports: r.add((p.port_no,s)) else: for p in s.ports: p = conn.ports.get(p) if p is None: continue r.add((p.port_no,s)) return r def _get_flow_mod (self, msg_type=of.ofp_flow_mod): """ Get flow mods that will send DHCP to the controller """ #TODO: We might over-match right now since we don't limit by port msg = msg_type() msg.match = of.ofp_match() msg.match.dl_type = pkt.ethernet.IP_TYPE msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL #msg.match.nw_dst = IP_BROADCAST msg.match.tp_src = pkt.dhcp.CLIENT_PORT msg.match.tp_dst = pkt.dhcp.SERVER_PORT msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) return msg def default (no_flow = False, network = "192.168.0.0/24", # Address range first = 100, last = 199, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = ()): # Auto """ Launch DHCP server defaulting to 192.168.0.100-199 """ launch(no_flow, network, first, last, count, ip, router, dns) def launch (no_flow = False, network = "192.168.0.0/24", # Address range first = 1, last = None, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = (), # Auto dpid = None, # All ports = None, # All __INSTANCE__ = None): """ Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first,last,count = map(fixint,(first,last,count)) router,dns = map(fix,(router,dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network = network, first = first, last = last, count = count) inst = DHCPD(install_flow = not no_flow, pool = pool, ip_address = ip, router_address = router, dns_address = dns, dpid = dpid, ports = ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
{ "content_hash": "d6aa2f31269e2eb96a8a201add152b85", "timestamp": "", "source": "github", "line_count": 665, "max_line_length": 91, "avg_line_length": 29.043609022556392, "alnum_prop": 0.6094542818680749, "repo_name": "MurphyMc/pox", "id": "61ec636173c049841d7456178a7a2943fa76eae8", "size": "19894", "binary": false, "copies": "1", "ref": "refs/heads/halosaur", "path": "pox/proto/dhcpd.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "382" }, { "name": "C++", "bytes": "19091" }, { "name": "HTML", "bytes": "999" }, { "name": "JavaScript", "bytes": "9048" }, { "name": "Python", "bytes": "1580617" }, { "name": "Shell", "bytes": "22540" } ], "symlink_target": "" }
import unittest import parlai.utils.testing as testing_utils MODEL_OPTS = { 'n_layers': 4, 'embedding_size': 16, 'ffn_size': 32, 'n_heads': 2, 'num_epochs': 0.1, 'batchsize': 32, 'truncate': 8, } @testing_utils.skipUnlessGPU class TestModelParallel(unittest.TestCase): def test_polyencoder(self): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/polyencoder', 'model_parallel': True, 'candidates': 'batch', 'poly_n_codes': 4, **MODEL_OPTS, } ) with self.assertRaises(RuntimeError): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/polyencoder', 'data_parallel': True, 'model_parallel': True, 'candidates': 'batch', 'poly_n_codes': 4, **MODEL_OPTS, } ) def test_ranker(self): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/ranker', 'candidates': 'batch', 'model_parallel': True, **MODEL_OPTS, } ) with self.assertRaises(RuntimeError): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/ranker', 'data_parallel': True, 'model_parallel': True, 'candidates': 'batch', **MODEL_OPTS, } ) def test_classifier(self): testing_utils.train_model( { 'task': 'integration_tests:classifier', 'classes': ['one', 'zero'], 'model': 'transformer/classifier', 'model_parallel': True, **MODEL_OPTS, } ) with self.assertRaises(RuntimeError): testing_utils.train_model( { 'task': 'integration_tests:classifier', 'classes': ['one', 'zero'], 'model': 'transformer/classifier', 'data_parallel': True, 'model_parallel': True, **MODEL_OPTS, } ) def test_transformer_generator(self): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/generator', 'model_parallel': True, **MODEL_OPTS, } ) @testing_utils.skipUnlessGPU class TestDataParallel(unittest.TestCase): def test_polyencoder(self): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/polyencoder', 'candidates': 'batch', 'poly_n_codes': 4, 'data_parallel': True, **MODEL_OPTS, } ) def test_ranker(self): testing_utils.train_model( { 'task': 'integration_tests', 'model': 'transformer/ranker', 'candidates': 'batch', 'data_parallel': True, **MODEL_OPTS, } ) def test_classifier(self): testing_utils.train_model( { 'task': 'integration_tests:classifier', 'classes': ['one', 'zero'], 'data_parallel': True, 'model': 'transformer/classifier', **MODEL_OPTS, } )
{ "content_hash": "1db42ddaf2bc72c1079e93f89cb9264b", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 59, "avg_line_length": 29.265151515151516, "alnum_prop": 0.43308309603934764, "repo_name": "facebookresearch/ParlAI", "id": "ed4a8f31a4eab0e65c74ce82d4312f06430a8544", "size": "4063", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/test_multigpu.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "2000" }, { "name": "CSS", "bytes": "38474" }, { "name": "Cuda", "bytes": "4118" }, { "name": "Dockerfile", "bytes": "1218" }, { "name": "HTML", "bytes": "645771" }, { "name": "JavaScript", "bytes": "405110" }, { "name": "Makefile", "bytes": "289" }, { "name": "Python", "bytes": "6802410" }, { "name": "Shell", "bytes": "26147" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models from django.core.management import call_command def forwards_func(apps, schema_editor): call_command("loaddata", "0001_init.json", "--app=blog") def reverse_func(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [('blog', '0005_auto_20170314_2102'),] operations = [ migrations.RunPython(forwards_func, reverse_func), ]
{ "content_hash": "4654826206bbf9fb3fca49980d6099a8", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 60, "avg_line_length": 22.285714285714285, "alnum_prop": 0.7051282051282052, "repo_name": "andersonjonathan/Navitas", "id": "0e26f22741e819866ff89aa5d9bd1fb4e26ecbb9", "size": "468", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "navitas/blog/migrations/0006_fixture_loading.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "340824" }, { "name": "HTML", "bytes": "72128" }, { "name": "JavaScript", "bytes": "2863820" }, { "name": "Python", "bytes": "131283" } ], "symlink_target": "" }
import pyblish.api class ValidateCurrentSaveFile(pyblish.api.ContextPlugin): """File must be saved before publishing""" label = "Validate File Saved" order = pyblish.api.ValidatorOrder - 0.1 hosts = ["maya", "houdini"] def process(self, context): current_file = context.data["currentFile"] if not current_file: raise RuntimeError("File not saved")
{ "content_hash": "db231099a7878181cf2e63cb2d5d9cca", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 57, "avg_line_length": 28.571428571428573, "alnum_prop": 0.665, "repo_name": "mindbender-studio/config", "id": "552b8b81e859ee72faf2406258a7160507bb06b3", "size": "400", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "polly/plugins/publish/validate_file_saved.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "130394" } ], "symlink_target": "" }
import select from . import * class SenserPlatformBackend(ShellPlatformBackend, FilePlatformBackend, MemoryPlatformBackend, BackupPlatformBackend): def __init__(self, dev): self.dev = dev def start(self): self.dev.readHasp() def interactiveShell(self, conn): self.dev.setTerminalEnable(False) self.dev.setTerminalEnable(True) print('Terminal activated. Press <CTRL+C> to exit.') self.transferSenserTerminal(conn) self.dev.setTerminalEnable(False) def writeFile(self, path, f): self.dev.writeFile(path, f.read()) def readFile(self, path, f, sizeCb=None): self.dev.readFile(path, f) def readMemory(self, offset, size, f): self.dev.readMemory(offset, size, f) def readBackup(self, id): return self.dev.readBackup(id) def writeBackup(self, id, data): self.dev.writeBackup(id, data) def syncBackup(self): self.dev.saveBackup(0) def getBackupStatus(self): return self.dev.getBackupPresetDataStatus() def getBackupData(self): return self.dev.getBackupPresetData(True) def setBackupData(self, data): self.dev.setBackupPresetData(2, data) def setBackupProtection(self, enable): self.dev.setBackupId1(enable) def transferSenserTerminal(self, conn): if not conn: return try: while True: ready = select.select([conn], [conn], [], 0) if ready[1]: d = self.dev.dev.readTerminal() conn.sendall(d) if ready[0]: d = conn.recv(0x40) if d == b'': break self.dev.dev.writeTerminal(d) except (ConnectionError, KeyboardInterrupt): pass conn.close()
{ "content_hash": "ef79a17e649fb54d54f7970ff37c10d0", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 117, "avg_line_length": 22.52173913043478, "alnum_prop": 0.7091377091377091, "repo_name": "ma1co/Sony-PMCA-RE", "id": "521f5a2e879c0f7c6d479633d8733d0231c3e12d", "size": "1554", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pmca/platform/backend/senser.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1312" }, { "name": "C++", "bytes": "18511" }, { "name": "Makefile", "bytes": "631" }, { "name": "Python", "bytes": "212048" } ], "symlink_target": "" }
""" Serializer definition.""" import json import datetime import decimal from django.db import models class DatetimeDecimalEncoder(json.JSONEncoder): """ Encoder for datetime and decimal serialization. Usage: json.dumps(object, cls=DatetimeDecimalEncoder) NOTE: _iterencode does not work """ def default(self, o): """ Encode JSON. :return str: A JSON encoded string """ if isinstance(o, (datetime.datetime, datetime.date, datetime.time)): return o.isoformat() if isinstance(o, decimal.Decimal): return float(o) return json.JSONEncoder.default(self, o) class SerializerMeta: encoder = DatetimeDecimalEncoder fieldnames_include = [] fieldnames_exclude = [] class Serializer(object): """ Serializer class. Serializer has methods dump_document and load_document to convert model into document. Document is dictionary with following format: { "id" // The document SHOULD contain an "id" key. } * The "id" key in a document represents a unique identifier for the underlying resource, scoped to its type. It MUST be a string which SHOULD only contain alphanumeric characters, dashes and underscores. In scenarios where uniquely identifying information between client and server is unnecessary (e.g., read-only, transient entities), JSON API allows for omitting the "id" key. Serializer: 1) Check custom serializer for field in Resource 2) Try to use predefined serializers for fields 3) Try convert to string """ Meta = SerializerMeta @classmethod def dump_document(cls, instance, fields_own=None, fields_to_many=None): """ Get document for model_instance. redefine dump rule for field x: def dump_document_x :param django.db.models.Model instance: model instance :param list<Field> or None fields: model_instance field to dump :return dict: document Related documents are not included to current one. In case of to-many field serialization ensure that models_instance has been select_related so, no database calls would be executed. Method ensures that document has cls.Meta.fieldnames_include and does not have cls.Meta.fieldnames_exclude Steps: 1) fieldnames_include could be properties, but not related models. Add them to fields_own. """ if fields_own is not None: fields_own = {f.name for f in fields_own} else: fields_own = { f.name for f in instance._meta.fields if f.rel is None and f.serialize } fields_own.add('id') fields_own = (fields_own | set(cls.Meta.fieldnames_include))\ - set(cls.Meta.fieldnames_exclude) document = {} # Include own fields for fieldname in fields_own: field_serializer = getattr( cls, "dump_document_{}".format(fieldname), None) if field_serializer is not None: value = field_serializer(instance) else: value = getattr(instance, fieldname) try: field = instance._meta.get_field(fieldname) except models.fields.FieldDoesNotExist: # Field is property, value already calculated pass else: if isinstance(field, models.fields.files.FileField): # TODO: Serializer depends on API here. value = cls.Meta.api.base_url + value.url elif isinstance(field, models.CommaSeparatedIntegerField): value = [v for v in value] document[fieldname] = value # Include to-one fields. It does not require database calls for field in instance._meta.fields: fieldname = "{}_id".format(field.name) # NOTE: check field is not related to parent model to exclude # <class>_ptr fields. OneToOne relationship field.rel.multiple = # False. Here make sure relationship is to parent model. if field.rel and not field.rel.multiple \ and isinstance(instance, field.rel.to): continue if field.rel and fieldname not in cls.Meta.fieldnames_exclude: document["links"] = document.get("links") or {} document["links"][field.name] = getattr(instance, fieldname) # Include to-many fields. It requires database calls. At this point we # assume that model was prefetch_related with child objects, which would # be included into 'linked' attribute. Here we need to add ids of linked # objects. To avoid database calls, iterate over objects manually and # get ids. fields_to_many = fields_to_many or [] for field in fields_to_many: document["links"] = document.get("links") or {} document["links"][field.related_resource_name] = [ obj.id for obj in getattr(instance, field.name).all()] return document @classmethod def dump_documents(cls, resource, model_instances, fields_own=None, include_structure=None): model_instances = list(model_instances) model_info = resource.Meta.model_info include_structure = include_structure or [] fields_to_many = set() for include_object in include_structure: f = include_object["field_path"][0] if f.category == f.CATEGORIES.TO_MANY: fields_to_many.add(f) data = { "data": [ cls.dump_document( m, fields_own=fields_own, fields_to_many=fields_to_many ) for m in model_instances ] } # TODO: move links generation to other method. if model_info.fields_to_one or fields_to_many: data["links"] = {} for field in model_info.fields_to_one: linkname = "{}.{}".format(resource.Meta.name_plural, field.name) data["links"].update({ linkname: resource.Meta.api.api_url + "/" + field.name + "/{" + linkname + "}" }) if include_structure: data["linked"] = [] for include_object in include_structure: current_models = set(model_instances) for field in include_object["field_path"]: related_models = set() for m in current_models: if field.category == field.CATEGORIES.TO_MANY: related_models |= set(getattr(m, field.name).all()) if field.category == field.CATEGORIES.TO_ONE: related_model = getattr(m, field.name) if related_model is not None: related_models.add(related_model) current_models = related_models related_model_info = include_object["model_info"] related_resource = include_object["resource"] for rel_model in current_models: linked_obj = related_resource.dump_document( rel_model, related_model_info.fields_own ) linked_obj["type"] = include_object["type"] data["linked"].append(linked_obj) return data
{ "content_hash": "a7403e87e629ead2c138e7865f085587", "timestamp": "", "source": "github", "line_count": 210, "max_line_length": 80, "avg_line_length": 36.44285714285714, "alnum_prop": 0.579641970469097, "repo_name": "pavlov99/jsonapi", "id": "9c152041c9216f43e4b8e36b09e1d7f018396441", "size": "7653", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jsonapi/serializers.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "179" }, { "name": "HTML", "bytes": "3572" }, { "name": "Makefile", "bytes": "1780" }, { "name": "Python", "bytes": "159620" } ], "symlink_target": "" }
from unittest import TestCase from tests.unit_tests.utils.test_handlers import HandlerClassInInit from tests.unit_tests.utils.test_handlers.another_handler import AnotherHandlerClass from responsebot.utils.handler_utils import discover_handler_classes from tests.unit_tests.utils.test_handlers.handler import HandlerClass, NonRegisteredHandlerClass, HandlerClass2 class DiscoverHandlerClassesTestCase(TestCase): def test_discover_handler_classes(self): handler_classes = discover_handler_classes('tests.unit_tests.utils.test_handlers') # discover handler class in package init self.assertIn(HandlerClassInInit, handler_classes) # discover handler class in handler self.assertIn(HandlerClass, handler_classes) self.assertIn(HandlerClass2, handler_classes) self.assertNotIn(NonRegisteredHandlerClass, handler_classes) # discover handler class in multiple modules self.assertIn(AnotherHandlerClass, handler_classes) def test_discover_handler_classes_in_module(self): handler_classes = discover_handler_classes('tests.unit_tests.utils.test_handlers.handler') # discover handler class in handler self.assertIn(HandlerClass, handler_classes) self.assertIn(HandlerClass2, handler_classes) self.assertNotIn(NonRegisteredHandlerClass, handler_classes)
{ "content_hash": "24d0076d321625a8155f590e846d0aed", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 111, "avg_line_length": 44.25806451612903, "alnum_prop": 0.7682215743440233, "repo_name": "invinst/ResponseBot", "id": "d30222d2dee9440f1c0476dc0c0fafdb5bb211a4", "size": "1372", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/unit_tests/utils/test_handler_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "101568" } ], "symlink_target": "" }
from __future__ import unicode_literals import frappe, json from frappe.oauth import OAuthWebRequestValidator, WebApplicationServer from oauthlib.oauth2 import FatalClientError, OAuth2Error from werkzeug import url_fix from six.moves.urllib.parse import quote, urlencode, urlparse from frappe.integrations.doctype.oauth_provider_settings.oauth_provider_settings import get_oauth_settings from frappe import _ def get_oauth_server(): if not getattr(frappe.local, 'oauth_server', None): oauth_validator = OAuthWebRequestValidator() frappe.local.oauth_server = WebApplicationServer(oauth_validator) return frappe.local.oauth_server def get_urlparams_from_kwargs(param_kwargs): arguments = param_kwargs if arguments.get("data"): arguments.pop("data") if arguments.get("cmd"): arguments.pop("cmd") return urlencode(arguments) @frappe.whitelist() def approve(*args, **kwargs): r = frappe.request uri = url_fix(r.url.replace("+"," ")) http_method = r.method body = r.get_data() headers = r.headers try: scopes, frappe.flags.oauth_credentials = get_oauth_server().validate_authorization_request(uri, http_method, body, headers) headers, body, status = get_oauth_server().create_authorization_response(uri=frappe.flags.oauth_credentials['redirect_uri'], \ body=body, headers=headers, scopes=scopes, credentials=frappe.flags.oauth_credentials) uri = headers.get('Location', None) frappe.local.response["type"] = "redirect" frappe.local.response["location"] = uri except FatalClientError as e: return e except OAuth2Error as e: return e @frappe.whitelist(allow_guest=True) def authorize(*args, **kwargs): #Fetch provider URL from settings oauth_settings = get_oauth_settings() params = get_urlparams_from_kwargs(kwargs) request_url = urlparse(frappe.request.url) success_url = request_url.scheme + "://" + request_url.netloc + "/api/method/frappe.integrations.oauth2.approve?" + params failure_url = frappe.form_dict["redirect_uri"] + "?error=access_denied" if frappe.session['user']=='Guest': #Force login, redirect to preauth again. frappe.local.response["type"] = "redirect" frappe.local.response["location"] = "/login?redirect-to=/api/method/frappe.integrations.oauth2.authorize?" + quote(params.replace("+"," ")) elif frappe.session['user']!='Guest': try: r = frappe.request uri = url_fix(r.url) http_method = r.method body = r.get_data() headers = r.headers scopes, frappe.flags.oauth_credentials = get_oauth_server().validate_authorization_request(uri, http_method, body, headers) skip_auth = frappe.db.get_value("OAuth Client", frappe.flags.oauth_credentials['client_id'], "skip_authorization") unrevoked_tokens = frappe.get_all("OAuth Bearer Token", filters={"status":"Active"}) if skip_auth or (oauth_settings["skip_authorization"] == "Auto" and len(unrevoked_tokens)): frappe.local.response["type"] = "redirect" frappe.local.response["location"] = success_url else: #Show Allow/Deny screen. response_html_params = frappe._dict({ "client_id": frappe.db.get_value("OAuth Client", kwargs['client_id'], "app_name"), "success_url": success_url, "failure_url": failure_url, "details": scopes }) resp_html = frappe.render_template("templates/includes/oauth_confirmation.html", response_html_params) frappe.respond_as_web_page("Confirm Access", resp_html) except FatalClientError as e: return e except OAuth2Error as e: return e @frappe.whitelist(allow_guest=True) def get_token(*args, **kwargs): r = frappe.request uri = url_fix(r.url) http_method = r.method body = r.form headers = r.headers #Check whether frappe server URL is set frappe_server_url = frappe.db.get_value("Social Login Key", "frappe", "base_url") or None if not frappe_server_url: frappe.throw(_("Please set Base URL in Social Login Key for Frappe")) try: headers, body, status = get_oauth_server().create_token_response(uri, http_method, body, headers, frappe.flags.oauth_credentials) out = frappe._dict(json.loads(body)) if not out.error and "openid" in out.scope: token_user = frappe.db.get_value("OAuth Bearer Token", out.access_token, "user") token_client = frappe.db.get_value("OAuth Bearer Token", out.access_token, "client") client_secret = frappe.db.get_value("OAuth Client", token_client, "client_secret") if token_user in ["Guest", "Administrator"]: frappe.throw(_("Logged in as Guest or Administrator")) import hashlib id_token_header = { "typ":"jwt", "alg":"HS256" } id_token = { "aud": token_client, "exp": int((frappe.db.get_value("OAuth Bearer Token", out.access_token, "expiration_time") - frappe.utils.datetime.datetime(1970, 1, 1)).total_seconds()), "sub": frappe.db.get_value("User Social Login", {"parent":token_user, "provider": "frappe"}, "userid"), "iss": frappe_server_url, "at_hash": frappe.oauth.calculate_at_hash(out.access_token, hashlib.sha256) } import jwt id_token_encoded = jwt.encode(id_token, client_secret, algorithm='HS256', headers=id_token_header) out.update({"id_token":str(id_token_encoded)}) frappe.local.response = out except FatalClientError as e: return e @frappe.whitelist(allow_guest=True) def revoke_token(*args, **kwargs): r = frappe.request uri = url_fix(r.url) http_method = r.method body = r.form headers = r.headers headers, body, status = get_oauth_server().create_revocation_response(uri, headers=headers, body=body, http_method=http_method) frappe.local.response['http_status_code'] = status if status == 200: return "success" else: return "bad request" @frappe.whitelist() def openid_profile(*args, **kwargs): picture = None first_name, last_name, avatar, name = frappe.db.get_value("User", frappe.session.user, ["first_name", "last_name", "user_image", "name"]) frappe_userid = frappe.db.get_value("User Social Login", {"parent":frappe.session.user, "provider": "frappe"}, "userid") request_url = urlparse(frappe.request.url) base_url = frappe.db.get_value("Social Login Key", "frappe", "base_url") or None if avatar: if validate_url(avatar): picture = avatar elif base_url: picture = base_url + '/' + avatar else: picture = request_url.scheme + "://" + request_url.netloc + avatar user_profile = frappe._dict({ "sub": frappe_userid, "name": " ".join(filter(None, [first_name, last_name])), "given_name": first_name, "family_name": last_name, "email": name, "picture": picture }) frappe.local.response = user_profile def validate_url(url_string): try: result = urlparse(url_string) if result.scheme and result.scheme in ["http", "https", "ftp", "ftps"]: return True else: return False except: return False
{ "content_hash": "c78c22c58ca864989170f384fd7bc7a1", "timestamp": "", "source": "github", "line_count": 191, "max_line_length": 158, "avg_line_length": 35.41361256544503, "alnum_prop": 0.7056475458308693, "repo_name": "vjFaLk/frappe", "id": "1e4b1fb142be0ad4e1ddc11dd38efa1308d5485f", "size": "6764", "binary": false, "copies": "1", "ref": "refs/heads/parsimony-production", "path": "frappe/integrations/oauth2.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "290337" }, { "name": "HTML", "bytes": "179507" }, { "name": "JavaScript", "bytes": "2179734" }, { "name": "Less", "bytes": "146135" }, { "name": "Makefile", "bytes": "99" }, { "name": "Python", "bytes": "2774237" }, { "name": "SCSS", "bytes": "15721" }, { "name": "Shell", "bytes": "3875" }, { "name": "Vue", "bytes": "95109" } ], "symlink_target": "" }
import time import unittest from selenium import webdriver import settings class DisableEnableTeacher(unittest.TestCase): def setUp(self): self.driver = None self.base_url = settings.test_parameters.get("education_base_url") def test_E89_disable_enable_teacher(self): web_types = settings.test_parameters.get("web_types") for web_type in web_types: if web_type == 'firefox': self.driver = webdriver.Firefox() elif web_type == 'chrome': self.driver = webdriver.Chrome() self.driver.implicitly_wait(30) driver = self.driver driver.get(self.base_url) driver.maximize_window() ########################################### # 前置条件:登录系统,创建测试数据 ########################################### driver.find_element_by_id("input_username").clear() driver.find_element_by_id("input_username").send_keys(settings.test_parameters.get("admin_username")) driver.find_element_by_id("input_password").clear() driver.find_element_by_id("input_password").send_keys(settings.test_parameters.get("admin_password")) driver.find_element_by_id("login_btn").click() time.sleep(5) driver.find_element_by_link_text(u"用户管理").click() time.sleep(1) driver.find_element_by_link_text(u"教师").click() time.sleep(3) driver.find_element_by_id("create_user").click() time.sleep(1) driver.find_element_by_id("username").clear() driver.find_element_by_id("username").send_keys("teacher01") driver.find_element_by_id("fullname").clear() driver.find_element_by_id("fullname").send_keys("teacher01") driver.find_element_by_id("email").clear() driver.find_element_by_id("email").send_keys("teacher01@vinzor.com") driver.find_element_by_id("password").clear() driver.find_element_by_id("password").send_keys("123456") driver.find_element_by_id("confirm").clear() driver.find_element_by_id("confirm").send_keys("123456") time.sleep(3) driver.find_element_by_id("confirm_action").click() time.sleep(5) self.assertEqual("用户 teacher01 创建成功", driver.find_element_by_class_name("gritter-without-image"). find_element_by_tag_name("p").text) ########################################### # 步骤1:禁用新建用户,新建用户无法登录 ########################################### time.sleep(8) driver.find_element_by_css_selector("input.form-control.input-sm").clear() driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("teacher01") time.sleep(5) if web_type == 'firefox': driver.find_element_by_id("status_btn").click() elif web_type == 'chrome': element = driver.find_element_by_id("status_btn") webdriver.ActionChains(driver).move_to_element(element).click().perform() time.sleep(5) self.assertEqual("用户 teacher01 禁用成功", driver.find_element_by_class_name("gritter-without-image"). find_element_by_tag_name("p").text) time.sleep(8) driver.find_element_by_css_selector("span.user-info").click() time.sleep(1) driver.find_element_by_link_text(u"退出").click() time.sleep(2) driver.find_element_by_id("input_username").clear() driver.find_element_by_id("input_username").send_keys("teacher01") driver.find_element_by_id("input_password").clear() driver.find_element_by_id("input_password").send_keys("123456") driver.find_element_by_id("login_btn").click() time.sleep(2) login_text = driver.find_element_by_id("login_state").text self.assertEqual("用户处于禁用状态禁止登录", login_text) ########################################### # 步骤2:激活新建用户,新建用户可以登录 ########################################### driver.find_element_by_id("input_username").clear() driver.find_element_by_id("input_username").send_keys("admin") driver.find_element_by_id("input_password").clear() driver.find_element_by_id("input_password").send_keys("admin123") driver.find_element_by_id("login_btn").click() time.sleep(5) driver.find_element_by_link_text(u"用户管理").click() time.sleep(1) driver.find_element_by_link_text(u"教师").click() time.sleep(3) driver.find_element_by_css_selector("input.form-control.input-sm").clear() driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("teacher01") time.sleep(5) if web_type == 'firefox': driver.find_element_by_id("status_btn").click() elif web_type == 'chrome': element = driver.find_element_by_id("status_btn") webdriver.ActionChains(driver).move_to_element(element).click().perform() time.sleep(3) self.assertEqual("用户 teacher01 激活成功", driver.find_element_by_class_name("gritter-without-image"). find_element_by_tag_name("p").text) time.sleep(8) driver.find_element_by_css_selector("span.user-info").click() time.sleep(1) driver.find_element_by_link_text(u"退出").click() time.sleep(2) driver.find_element_by_id("input_username").clear() driver.find_element_by_id("input_username").send_keys("teacher01") driver.find_element_by_id("input_password").clear() driver.find_element_by_id("input_password").send_keys("123456") driver.find_element_by_id("login_btn").click() time.sleep(5) driver.find_element_by_css_selector("span.user-info").click() time.sleep(1) driver.find_element_by_link_text(u"退出").click() time.sleep(2) ########################################### # 后置条件: 清理创建用户,有助多次测试 ########################################### driver.find_element_by_id("input_username").clear() driver.find_element_by_id("input_username").send_keys("admin") driver.find_element_by_id("input_password").clear() driver.find_element_by_id("input_password").send_keys("admin123") driver.find_element_by_id("login_btn").click() time.sleep(5) driver.find_element_by_link_text(u"用户管理").click() time.sleep(1) driver.find_element_by_link_text(u"教师").click() time.sleep(3) driver.find_element_by_css_selector("input.form-control.input-sm").clear() driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("teacher01") time.sleep(5) if web_type == 'firefox': driver.find_element_by_id("select_all").click() elif web_type == 'chrome': element = driver.find_element_by_id("select_all") webdriver.ActionChains(driver).move_to_element(element).click().perform() time.sleep(3) driver.find_element_by_id("delete_user").click() time.sleep(3) driver.find_element_by_id("confirm_delete").click() time.sleep(3) driver.quit() def tearDown(self): self.driver.quit() if __name__ == "__main__": unittest.main()
{ "content_hash": "0444c227b84d9aac279299a97e0bbd22", "timestamp": "", "source": "github", "line_count": 155, "max_line_length": 113, "avg_line_length": 51.28387096774193, "alnum_prop": 0.5360422694678576, "repo_name": "sysuwuhaibin/vatus", "id": "876cc8ff6c83c315fcb9443ae927ce43310b6e78", "size": "8218", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vatus/testcases/test_education/test_user_management/test_E89_disable_enable_teacher.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "214309" } ], "symlink_target": "" }
""" Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Copyright (C) 2020 Stefano Gottardo (original implementation module) Miscellaneous utility functions for directory builder SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals import os import resources.lib.common as common from resources.lib.globals import G try: # Python 2 unicode except NameError: # Python 3 unicode = str # pylint: disable=redefined-builtin def _get_custom_thumb_path(thumb_file_name): return os.path.join(G.ADDON_DATA_PATH, 'resources', 'media', thumb_file_name) def add_items_previous_next_page(directory_items, pathitems, perpetual_range_selector, sub_genre_id=None): if pathitems and perpetual_range_selector: if 'previous_start' in perpetual_range_selector: params = {'perpetual_range_start': perpetual_range_selector.get('previous_start'), 'sub_genre_id': sub_genre_id if perpetual_range_selector.get('previous_start') == 0 else None} previous_page_item = { 'url': common.build_url(pathitems=pathitems, params=params, mode=G.MODE_DIRECTORY), 'label': common.get_local_string(30148), 'art': {'thumb': _get_custom_thumb_path('FolderPagePrevious.png')}, 'is_folder': True, 'properties': {'specialsort': 'top'} # Force an item to stay on top (not documented in Kodi) } directory_items.insert(0, previous_page_item) if 'next_start' in perpetual_range_selector: params = {'perpetual_range_start': perpetual_range_selector.get('next_start')} next_page_item = { 'url': common.build_url(pathitems=pathitems, params=params, mode=G.MODE_DIRECTORY), 'label': common.get_local_string(30147), 'art': {'thumb': _get_custom_thumb_path('FolderPageNext.png')}, 'is_folder': True, 'properties': {'specialsort': 'bottom'} # Force an item to stay on bottom (not documented in Kodi) } directory_items.append(next_page_item) def get_param_watched_status_by_profile(): """ Get a the current profile guid, will be used as parameter in the ListItem's (of videos), so that Kodi database can distinguish the data (like watched status) according to each Netflix profile :return: a dictionary to be add to 'build_url' params """ return {'profile_guid': G.LOCAL_DB.get_active_profile_guid()} def get_availability_message(video_data): return (video_data.get('summary', {}).get('availabilityDateMessaging') or video_data.get('availability', {}).get('availabilityDate') or common.get_local_string(10005)) # "Not available"
{ "content_hash": "eec9f9f89cd34dff31cda82448ff20a2", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 116, "avg_line_length": 45.55555555555556, "alnum_prop": 0.651219512195122, "repo_name": "hawkeyexp/plugin.video.netflix", "id": "e936a6e41f911d12de52d7bbdd73ccfd6b200292", "size": "2894", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "resources/lib/services/nfsession/directorybuilder/dir_builder_utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "3038" }, { "name": "Python", "bytes": "1172784" } ], "symlink_target": "" }
from chess.pieces.color import Color from chess.pieces.piece import Piece from chess.point import Point class Bishop(Piece): def render(self): if self.color == Color.white: return 'B' else: return 'b' def valid_move(self, board, position): # Check for non-diagonal movement (for diagonal movement, dy/dx = 1) if not abs(position.y - self.position.y) / abs(position.x - self.position.x) == 1: return False # Cycle through each square between the current square and the destination # checking for other pieces x_delta = position.x - self.position.x y_delta = position.y - self.position.y x_step = int(x_delta / abs(x_delta)) y_step = int(y_delta / abs(y_delta)) for x in range(x_step, x_delta, x_step): for y in range(y_step, y_delta, y_step): # If there is another piece anywhere between the current # and destination square, this is not allowed if board.piece_at(Point(self.position.x + x, self.position.y + y)): return False return True
{ "content_hash": "673cd6c8a583d5dd50321cd0a1e5fff2", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 90, "avg_line_length": 35.303030303030305, "alnum_prop": 0.5939914163090129, "repo_name": "fredojones/chess", "id": "4e5be5eb26905266c2a1fa6d57217a6eda0c7d3c", "size": "1165", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chess/pieces/bishop.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19375" } ], "symlink_target": "" }
def azureml_main(frame1): ## import libraries import matplotlib matplotlib.use('agg') # Set backend from pandas.tools.plotting import scatter_matrix import pandas.tools.rplot as rplot import matplotlib.pyplot as plt import numpy as np ## Create a pair-wise scatter plot Azure = False ## If in Azure, frame1 is passed to function if(Azure == False): frame1 = eeframe fig1 = plt.figure(1, figsize=(10, 10)) ax = fig1.gca() scatter_matrix(frame1, alpha=0.3, diagonal='kde', ax = ax) plt.show() if(Azure == True): fig1.savefig('scatter1.png') ## Create conditioned scatter plots. col_list = ["Relative Compactness", "Surface Area", "Wall Area", "Roof Area", 'Glazing Area', "Glazing Area Distribution"] indx = 0 for col in col_list: if(frame1[col].dtype in [np.int64, np.int32, np.float64]): indx += 1 fig = plt.figure(figsize = (12,6)) fig.clf() ax = fig.gca() plot = rplot.RPlot(frame1, x = col, y = 'Heating Load') plot.add(rplot.TrellisGrid(['Overall Height','Orientation'])) plot.add(rplot.GeomScatter()) plot.add(rplot.GeomPolyFit(degree=2)) ax.set_xlabel(col) ax.set_ylabel('Heating Load') plot.render(plt.gcf()) if(Azure == True): fig.savefig('scatter' + col + '.png') ## Histograms of features by Overall Height col_list = ["Relative Compactness", "Surface Area", "Wall Area", "Roof Area", 'Glazing Area', "Glazing Area Distribution", "Heating Load"] for col in col_list: temp7 = frame1.ix[frame1['Overall Height'] == 7, col].as_matrix() temp35 = frame1.ix[frame1['Overall Height'] == 3.5, col].as_matrix() fig = plt.figure(figsize = (12,6)) fig.clf() ax7 = fig.add_subplot(1, 2, 1) ax35 = fig.add_subplot(1, 2, 2) ax7.hist(temp7, bins = 20) ax7.set_title('Histogram of ' +col + '\n for for Overall Height of 7') ax35.hist(temp35, bins = 20) ax35.set_title('Histogram of ' +col + '\n for for Overall Height of 3.5') if(Azure == True): fig.savefig('hists_' + col + '.png') ## Create boxplots. for col in col_list: if(frame1[col].dtype in [np.int64, np.int32, np.float64]): fig = plt.figure(figsize = (6,6)) fig.clf() ax = fig.gca() frame1[[col, 'Overall Height']].boxplot(column = [col], ax = ax, by = ['Overall Height']) ax.set_xlabel('') if(Azure == True): fig.savefig('box_' + col + '.png') ## In Azure, the function returns the data frame return frame1
{ "content_hash": "545d0ed2889134ebe4d39f5b5fe18430", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 101, "avg_line_length": 35.27906976744186, "alnum_prop": 0.5154911008569545, "repo_name": "applecool/DataScience", "id": "5fe5b4c2049be5858598e8e4c2f14fbdd4b126a7", "size": "3171", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Azure ML Studio Experiments/Energy Efficiencies/VisualizeEE.py", "mode": "33261", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "7514" }, { "name": "Python", "bytes": "22477" }, { "name": "R", "bytes": "15441" } ], "symlink_target": "" }
import unittest import socket import threading from nose.plugins.skip import SkipTest from dummyserver.server import ( TornadoServerThread, SocketServerThread, DEFAULT_CERTS, ProxyServerThread, ) has_ipv6 = hasattr(socket, 'has_ipv6') class SocketDummyServerTestCase(unittest.TestCase): """ A simple socket-based server is created for this class that is good for exactly one request. """ scheme = 'http' host = 'localhost' @classmethod def _start_server(cls, socket_handler): ready_event = threading.Event() cls.server_thread = SocketServerThread(socket_handler=socket_handler, ready_event=ready_event, host=cls.host) cls.server_thread.start() ready_event.wait() cls.port = cls.server_thread.port @classmethod def tearDownClass(cls): if hasattr(cls, 'server_thread'): cls.server_thread.join(0.1) class HTTPDummyServerTestCase(unittest.TestCase): scheme = 'http' host = 'localhost' host_alt = '127.0.0.1' # Some tests need two hosts certs = DEFAULT_CERTS @classmethod def _start_server(cls): ready_event = threading.Event() cls.server_thread = TornadoServerThread(host=cls.host, scheme=cls.scheme, certs=cls.certs, ready_event=ready_event) cls.server_thread.start() ready_event.wait() cls.port = cls.server_thread.port @classmethod def _stop_server(cls): cls.server_thread.stop() cls.server_thread.join() @classmethod def setUpClass(cls): cls._start_server() @classmethod def tearDownClass(cls): cls._stop_server() class HTTPSDummyServerTestCase(HTTPDummyServerTestCase): scheme = 'https' host = 'localhost' certs = DEFAULT_CERTS class HTTPDummyProxyTestCase(unittest.TestCase): http_host = 'localhost' http_host_alt = '127.0.0.1' https_host = 'localhost' https_host_alt = '127.0.0.1' https_certs = DEFAULT_CERTS proxy_host = 'localhost' proxy_host_alt = '127.0.0.1' @classmethod def setUpClass(cls): cls.http_thread = TornadoServerThread(host=cls.http_host, scheme='http') cls.http_thread._start_server() cls.http_port = cls.http_thread.port cls.https_thread = TornadoServerThread( host=cls.https_host, scheme='https', certs=cls.https_certs) cls.https_thread._start_server() cls.https_port = cls.https_thread.port ready_event = threading.Event() cls.proxy_thread = ProxyServerThread( host=cls.proxy_host, ready_event=ready_event) cls.proxy_thread.start() ready_event.wait() cls.proxy_port = cls.proxy_thread.port @classmethod def tearDownClass(cls): cls.proxy_thread.stop() cls.proxy_thread.join() class IPv6HTTPDummyServerTestCase(HTTPDummyServerTestCase): host = '::1' @classmethod def setUpClass(cls): if not has_ipv6: raise SkipTest('IPv6 not available') else: super(IPv6HTTPDummyServerTestCase, cls).setUpClass()
{ "content_hash": "f3cb9ca76c1fabbbe66d2e487e77239f", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 77, "avg_line_length": 28.3, "alnum_prop": 0.5977620730270907, "repo_name": "katakumpo/urllib3", "id": "99a7c8cd173c603d49491cc7242cebefcbe96df1", "size": "3396", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dummyserver/testcase.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.test import TestCase from django.urls import reverse from rest_framework.authtoken.models import Token from rest_framework import status from rest_framework.test import APITestCase from .models import Farmer from .views import FarmerViewSet class FarmerMoTest(TestCase): def test_string_representation(self): farmer = Farmer(first_name='John', surname='Smith', town='Harrogate') self.assertEqual(str(farmer), '%s %s' % (farmer.first_name, farmer.surname)) def test_verbose_name(self): self.assertEqual(Farmer._meta.verbose_name, 'farmer') def test_verbose_plural_name(self): self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers') def test_getting_full_name(self): farmer = Farmer(first_name='Tom Doggo', surname='Pupper', town='Harrogate') self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper') def test_getting_short_name(self): farmer = Farmer(first_name='Tom Doggo', surname='Pupper', town='Harrogate') self.assertEqual(farmer.get_short_name(), 'T. Pupper') def test_fail_if_surname_is_not_supplied(self): farmer = Farmer(first_name='Tom', town='Leeds') with self.assertRaises(ValidationError): farmer.full_clean() def test_fail_if_first_name_is_not_supplied(self): farmer = Farmer(surname='Pupper', town='Harrogate') with self.assertRaises(ValidationError): farmer.full_clean() def test_fail_if_town_is_not_supplied(self): farmer = Farmer(first_name='Test', surname='Family Name') with self.assertRaises(ValidationError): farmer.full_clean() class FarmersAPITest(APITestCase): def setUp(self): self.superuser = get_user_model().objects.create_superuser( 'john', 'john@example.com', 'somepassword') self.superuser_token = Token.objects.create(user=self.superuser) self.data = [ Farmer(first_name='John', surname='Smith', town='Harrogate'), Farmer(first_name='Tom', surname='Darcy', town='London'), ] Farmer.objects.bulk_create(self.data) def test_can_get_farmers_list(self): response = self.client.get(reverse('farmer-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_farmers_list(self): response = self.client.get(reverse('farmer-list'), format='json') [self.assertContains(response, x) for x in ['John', 'Tom', 'London', 'Harrogate']] def test_can_get_farmer_detail(self): random_farmer = Farmer.objects.order_by('?').first() url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_farmer_detail(self): random_farmer = Farmer.objects.order_by('?')[0] url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) response = self.client.get(url, format='json') [self.assertContains(response, x) for x in [ random_farmer.first_name, random_farmer.surname, random_farmer.town, random_farmer.pk]] def test_guest_cannot_delete_farmer(self): random_farmer = Farmer.objects.order_by('?')[0] url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_admin_can_delete_farmer(self): random_farmer = Farmer.objects.order_by('?')[0] url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) self._authenticate_superuser() response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_guest_cannot_update_farmer(self): random_farmer = Farmer.objects.order_by('?')[0] url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) response = self.client.patch(url, data={'first_name': 'Updated name'}) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_admin_can_update_farmer(self): random_farmer = Farmer.objects.order_by('?')[0] url = reverse('farmer-detail', kwargs=dict(pk=random_farmer.pk)) self._authenticate_superuser() response = self.client.patch(url, data={'first_name': 'Updated name'}) self.assertEqual(response.status_code, status.HTTP_200_OK) # Check if data has been updated in database random_farmer.refresh_from_db() self.assertEqual(random_farmer.first_name, 'Updated name') def test_guest_cannot_create_farmer(self): response = self.client.post( reverse('farmer-list'), data={'first_name': 'Test', 'surname': 'Test2', 'town': 'TestTown'} ) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_admin_can_create_farmer(self): data = {'first_name': 'Test', 'surname': 'Test2', 'town': 'TestTown'} self._authenticate_superuser() response = self.client.post(reverse('farmer-list'), data=data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) # Check if data has been added to database self.assertTrue(Farmer.objects.filter(**data).exists()) def _authenticate_superuser(self): self.client.force_authenticate( user=self.superuser, token=self.superuser_token)
{ "content_hash": "ab0abb17168345e4921528de66622424", "timestamp": "", "source": "github", "line_count": 162, "max_line_length": 79, "avg_line_length": 35.54320987654321, "alnum_prop": 0.6490100729419938, "repo_name": "tm-kn/farmers-api", "id": "187d84c16c2d44747e96dac45d9216ba1539ea49", "size": "5758", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "farmers_api/farmers/tests.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "12715" } ], "symlink_target": "" }
import glob import io import os import platform import sys import pytest import numpy as np from ..verify import VerifyError from ....extern.six.moves import range from ....extern import six from ....io import fits from ....tests.helper import raises, catch_warnings, ignore_warnings from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning from ....utils.compat import NUMPY_LT_1_12 from . import FitsTestCase class TestHDUListFunctions(FitsTestCase): def test_update_name(self): hdul = fits.open(self.data('o4sp040b0_raw.fits')) hdul[4].name = 'Jim' hdul[4].ver = 9 assert hdul[('JIM', 9)].header['extname'] == 'JIM' def test_hdu_file_bytes(self): hdul = fits.open(self.data('checksum.fits')) res = hdul[0].filebytes() assert res == 11520 res = hdul[1].filebytes() assert res == 8640 def test_hdulist_file_info(self): hdul = fits.open(self.data('checksum.fits')) res = hdul.fileinfo(0) def test_fileinfo(**kwargs): assert res['datSpan'] == kwargs.get('datSpan', 2880) assert res['resized'] == kwargs.get('resized', False) assert res['filename'] == self.data('checksum.fits') assert res['datLoc'] == kwargs.get('datLoc', 8640) assert res['hdrLoc'] == kwargs.get('hdrLoc', 0) assert res['filemode'] == 'readonly' res = hdul.fileinfo(1) test_fileinfo(datLoc=17280, hdrLoc=11520) hdu = fits.ImageHDU(data=hdul[0].data) hdul.insert(1, hdu) res = hdul.fileinfo(0) test_fileinfo(resized=True) res = hdul.fileinfo(1) test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None) res = hdul.fileinfo(2) test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520) def test_create_from_multiple_primary(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145 Ensure that a validation error occurs when saving an HDUList containing multiple PrimaryHDUs. """ hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()]) pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'), output_verify='exception') def test_append_primary_to_empty_list(self): # Tests appending a Simple PrimaryHDU to an empty HDUList. hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_extension_to_empty_list(self): """Tests appending a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_table_extension_to_empty_list(self): """Tests appending a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() hdul1 = fits.open(self.data('tb.fits')) hdul.append(hdul1[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_groupshdu_to_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_primary_to_non_empty_list(self): """Tests appending a Simple PrimaryHDU to a non-empty HDUList.""" hdul = fits.open(self.data('arange.fits')) hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_extension_to_non_empty_list(self): """Tests appending a Simple ExtensionHDU to a non-empty HDUList.""" hdul = fits.open(self.data('tb.fits')) hdul.append(hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info @raises(ValueError) def test_append_groupshdu_to_non_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) hdu = fits.GroupsHDU() hdul.append(hdu) def test_insert_primary_to_empty_list(self): """Tests inserting a Simple PrimaryHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_extension_to_empty_list(self): """Tests inserting a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_table_extension_to_empty_list(self): """Tests inserting a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() hdul1 = fits.open(self.data('tb.fits')) hdul.insert(0, hdul1[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_groupshdu_to_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_primary_to_non_empty_list(self): """Tests inserting a Simple PrimaryHDU to a non-empty HDUList.""" hdul = fits.open(self.data('arange.fits')) hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(1, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_extension_to_non_empty_list(self): """Tests inserting a Simple ExtensionHDU to a non-empty HDUList.""" hdul = fits.open(self.data('tb.fits')) hdul.insert(1, hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_groupshdu_to_non_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.insert(1, hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters'), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] hdul.insert(0, hdu) assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info @raises(ValueError) def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self): """ Tests inserting a Simple GroupsHDU to the beginning of an HDUList that that already contains a GroupsHDU. """ hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) hdul.insert(0, hdu) def test_insert_extension_to_primary_in_non_empty_list(self): # Tests inserting a Simple ExtensionHDU to a non-empty HDUList. hdul = fits.open(self.data('tb.fits')) hdul.insert(0, hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'ImageHDU', 12, (), '', ''), (3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_image_extension_to_primary_in_non_empty_list(self): """ Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList as the primary HDU. """ hdul = fits.open(self.data('tb.fits')) hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''), (1, '', 1, 'ImageHDU', 12, (), '', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_filename(self): """Tests the HDUList filename method.""" hdul = fits.open(self.data('tb.fits')) name = hdul.filename() assert name == self.data('tb.fits') def test_file_like(self): """ Tests the use of a file like object with no tell or seek methods in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto() """ hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul = fits.HDUList() hdul.append(hdu) tmpfile = open(self.temp('tmpfile.fits'), 'wb') hdul.writeto(tmpfile) tmpfile.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_file_like_2(self): hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) tmpfile = open(self.temp('tmpfile.fits'), 'wb') hdul = fits.open(tmpfile, mode='ostream') hdul.append(hdu) hdul.flush() tmpfile.close() hdul.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_file_like_3(self): tmpfile = open(self.temp('tmpfile.fits'), 'wb') fits.writeto(tmpfile, np.arange(100, dtype=np.int32)) tmpfile.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_new_hdu_extname(self): """ Tests that new extension HDUs that are added to an HDUList can be properly indexed by their EXTNAME/EXTVER (regression test for ticket:48). """ f = fits.open(self.data('test0.fits')) hdul = fits.HDUList() hdul.append(f[0].copy()) hdul.append(fits.ImageHDU(header=f[1].header)) assert hdul[1].header['EXTNAME'] == 'SCI' assert hdul[1].header['EXTVER'] == 1 assert hdul.index_of(('SCI', 1)) == 1 def test_update_filelike(self): """Test opening a file-like object in update mode and resizing the HDU. """ sf = io.BytesIO() arr = np.zeros((100, 100)) hdu = fits.PrimaryHDU(data=arr) hdu.writeto(sf) sf.seek(0) arr = np.zeros((200, 200)) hdul = fits.open(sf, mode='update') hdul[0].data = arr hdul.flush() sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_flush_readonly(self): """Test flushing changes to a file opened in a read only mode.""" oldmtime = os.stat(self.data('test0.fits')).st_mtime hdul = fits.open(self.data('test0.fits')) hdul[0].header['FOO'] = 'BAR' with catch_warnings(AstropyUserWarning) as w: hdul.flush() assert len(w) == 1 assert 'mode is not supported' in str(w[0].message) assert oldmtime == os.stat(self.data('test0.fits')).st_mtime def test_fix_extend_keyword(self): hdul = fits.HDUList() hdul.append(fits.PrimaryHDU()) hdul.append(fits.ImageHDU()) del hdul[0].header['EXTEND'] hdul.verify('silentfix') assert 'EXTEND' in hdul[0].header assert hdul[0].header['EXTEND'] is True def test_fix_malformed_naxisj(self): """ Tests that malformed NAXISj values are fixed sensibly. """ hdu = fits.open(self.data('arange.fits')) # Malform NAXISj header data hdu[0].header['NAXIS1'] = 11.0 hdu[0].header['NAXIS2'] = '10.0' hdu[0].header['NAXIS3'] = '7' # Axes cache needs to be malformed as well hdu[0]._axes = [11.0, '10.0', '7'] # Perform verification including the fix hdu.verify('silentfix') # Check that malformed data was converted assert hdu[0].header['NAXIS1'] == 11 assert hdu[0].header['NAXIS2'] == 10 assert hdu[0].header['NAXIS3'] == 7 def test_fix_wellformed_naxisj(self): """ Tests that wellformed NAXISj values are not modified. """ hdu = fits.open(self.data('arange.fits')) # Fake new NAXISj header data hdu[0].header['NAXIS1'] = 768 hdu[0].header['NAXIS2'] = 64 hdu[0].header['NAXIS3'] = 8 # Axes cache needs to be faked as well hdu[0]._axes = [768, 64, 8] # Perform verification including the fix hdu.verify('silentfix') # Check that malformed data was converted assert hdu[0].header['NAXIS1'] == 768 assert hdu[0].header['NAXIS2'] == 64 assert hdu[0].header['NAXIS3'] == 8 def test_new_hdulist_extend_keyword(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114 Tests that adding a PrimaryHDU to a new HDUList object updates the EXTEND keyword on that HDU. """ h0 = fits.Header() hdu = fits.PrimaryHDU(header=h0) sci = fits.ImageHDU(data=np.array(10)) image = fits.HDUList([hdu, sci]) image.writeto(self.temp('temp.fits')) assert 'EXTEND' in hdu.header assert hdu.header['EXTEND'] is True def test_replace_memmaped_array(self): # Copy the original before we modify it hdul = fits.open(self.data('test0.fits')) hdul.writeto(self.temp('temp.fits')) hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True) old_data = hdul[1].data.copy() hdul[1].data = hdul[1].data + 1 hdul.close() hdul = fits.open(self.temp('temp.fits'), memmap=True) assert ((old_data + 1) == hdul[1].data).all() def test_open_file_with_end_padding(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106 Open files with end padding bytes. """ hdul = fits.open(self.data('test0.fits'), do_not_scale_image_data=True) info = hdul.info(output=False) hdul.writeto(self.temp('temp.fits')) with open(self.temp('temp.fits'), 'ab') as f: f.seek(0, os.SEEK_END) f.write(b'\0' * 2880) with ignore_warnings(): assert info == fits.info(self.temp('temp.fits'), output=False, do_not_scale_image_data=True) def test_open_file_with_bad_header_padding(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136 Open files with nulls for header block padding instead of spaces. """ a = np.arange(100).reshape((10, 10)) hdu = fits.PrimaryHDU(data=a) hdu.writeto(self.temp('temp.fits')) # Figure out where the header padding begins and fill it with nulls end_card_pos = str(hdu.header).index('END' + ' ' * 77) padding_start = end_card_pos + 80 padding_len = 2880 - padding_start with open(self.temp('temp.fits'), 'r+b') as f: f.seek(padding_start) f.write('\0'.encode('ascii') * padding_len) with catch_warnings(AstropyUserWarning) as w: with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == a).all() assert ('contains null bytes instead of spaces' in str(w[0].message)) assert len(w) == 1 assert len(hdul) == 1 assert str(hdul[0].header) == str(hdu.header) def test_update_with_truncated_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 Test that saving an update where the header is shorter than the original header doesn't leave a stump from the old header in the file. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(hdu.header) < 34: hdu.header['TEST{}'.format(idx)] = idx idx += 1 hdu.writeto(self.temp('temp.fits'), checksum=True) with fits.open(self.temp('temp.fits'), mode='update') as hdul: # Modify the header, forcing it to be rewritten hdul[0].header['TEST1'] = 2 with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == data).all() @pytest.mark.xfail(platform.system() == 'Windows' and not NUMPY_LT_1_12, reason='https://github.com/astropy/astropy/issues/5797') def test_update_resized_header(self): """ Test saving updates to a file where the header is one block smaller than before, and in the case where the heade ris one block larger than before. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(str(hdu.header)) <= 2880: hdu.header['TEST{}'.format(idx)] = idx idx += 1 orig_header = hdu.header.copy() hdu.writeto(self.temp('temp.fits')) with fits.open(self.temp('temp.fits'), mode='update') as hdul: while len(str(hdul[0].header)) > 2880: del hdul[0].header[-1] with fits.open(self.temp('temp.fits')) as hdul: assert hdul[0].header == orig_header[:-1] assert (hdul[0].data == data).all() with fits.open(self.temp('temp.fits'), mode='update') as hdul: idx = 101 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header['TEST{}'.format(idx)] = idx idx += 1 # Touch something in the data too so that it has to be rewritten hdul[0].data[0] = 27 with fits.open(self.temp('temp.fits')) as hdul: assert hdul[0].header[:-37] == orig_header[:-1] assert hdul[0].data[0] == 27 assert (hdul[0].data[1:] == data[1:]).all() def test_update_resized_header2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150 This is similar to test_update_resized_header, but specifically tests a case of multiple consecutive flush() calls on the same HDUList object, where each flush() requires a resize. """ data1 = np.arange(100) data2 = np.arange(100) + 100 phdu = fits.PrimaryHDU(data=data1) hdu = fits.ImageHDU(data=data2) phdu.writeto(self.temp('temp.fits')) with fits.open(self.temp('temp.fits'), mode='append') as hdul: hdul.append(hdu) with fits.open(self.temp('temp.fits'), mode='update') as hdul: idx = 1 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header['TEST{}'.format(idx)] = idx idx += 1 hdul.flush() hdul.append(hdu) with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == data1).all() assert hdul[1].header == hdu.header assert (hdul[1].data == data2).all() assert (hdul[2].data == data2).all() @ignore_warnings() def test_hdul_fromstring(self): """ Test creating the HDUList structure in memory from a string containing an entire FITS file. This is similar to test_hdu_fromstring but for an entire multi-extension FITS file at once. """ # Tests HDUList.fromstring for all of PyFITS' built in test files def test_fromstring(filename): with fits.open(filename) as hdul: orig_info = hdul.info(output=False) with open(filename, 'rb') as f: dat = f.read() hdul2 = fits.HDUList.fromstring(dat) assert orig_info == hdul2.info(output=False) for idx in range(len(hdul)): assert hdul[idx].header == hdul2[idx].header if hdul[idx].data is None or hdul2[idx].data is None: assert hdul[idx].data == hdul2[idx].data elif (hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields): # Compare tables for n in hdul[idx].data.names: c1 = hdul[idx].data[n] c2 = hdul2[idx].data[n] assert (c1 == c2).all() elif (any(dim == 0 for dim in hdul[idx].data.shape) or any(dim == 0 for dim in hdul2[idx].data.shape)): # For some reason some combinations of Python and Numpy # on Windows result in MemoryErrors when trying to work # on memmap arrays with more than one dimension but # some dimensions of size zero, so include a special # case for that return hdul[idx].data.shape == hdul2[idx].data.shape else: np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data) for filename in glob.glob(os.path.join(self.data_dir, '*.fits')): if sys.platform == 'win32' and filename == 'zerowidth.fits': # Running this test on this file causes a crash in some # versions of Numpy on Windows. See PyFITS ticket # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174 continue test_fromstring(filename) # Test that creating an HDUList from something silly raises a TypeError pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c']) def test_save_backup(self): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121 Save backup of file before flushing changes. """ self.copy_file('scale.fits') with ignore_warnings(): with fits.open(self.temp('scale.fits'), mode='update', save_backup=True) as hdul: # Make some changes to the original file to force its header # and data to be rewritten hdul[0].header['TEST'] = 'TEST' hdul[0].data[0] = 0 assert os.path.exists(self.temp('scale.fits.bak')) with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul1: with fits.open(self.temp('scale.fits.bak'), do_not_scale_image_data=True) as hdul2: assert hdul1[0].header == hdul2[0].header assert (hdul1[0].data == hdul2[0].data).all() with ignore_warnings(): with fits.open(self.temp('scale.fits'), mode='update', save_backup=True) as hdul: # One more time to see if multiple backups are made hdul[0].header['TEST2'] = 'TEST' hdul[0].data[0] = 1 assert os.path.exists(self.temp('scale.fits.bak')) assert os.path.exists(self.temp('scale.fits.bak.1')) def test_replace_mmap_data(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): hdu_a = fits.PrimaryHDU(data=arr_a) hdu_a.writeto(self.temp('test_a.fits'), overwrite=True) hdu_b = fits.PrimaryHDU(data=arr_b) hdu_b.writeto(self.temp('test_b.fits'), overwrite=True) hdul_a = fits.open(self.temp('test_a.fits'), mode='update', memmap=mmap_a) hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b) hdul_a[0].data = hdul_b[0].data hdul_a.close() hdul_b.close() hdul_a = fits.open(self.temp('test_a.fits')) assert np.all(hdul_a[0].data == arr_b) with ignore_warnings(): test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_replace_mmap_data_2(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. Like test_replace_mmap_data but with table data instead of image data. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): col_a = fits.Column(name='a', format='J', array=arr_a) col_b = fits.Column(name='b', format='J', array=arr_b) hdu_a = fits.BinTableHDU.from_columns([col_a]) hdu_a.writeto(self.temp('test_a.fits'), overwrite=True) hdu_b = fits.BinTableHDU.from_columns([col_b]) hdu_b.writeto(self.temp('test_b.fits'), overwrite=True) hdul_a = fits.open(self.temp('test_a.fits'), mode='update', memmap=mmap_a) hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b) hdul_a[1].data = hdul_b[1].data hdul_a.close() hdul_b.close() hdul_a = fits.open(self.temp('test_a.fits')) assert 'b' in hdul_a[1].columns.names assert 'a' not in hdul_a[1].columns.names assert np.all(hdul_a[1].data['b'] == arr_b) with ignore_warnings(): test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_extname_in_hdulist(self): """ Tests to make sure that the 'in' operator works. Regression test for https://github.com/astropy/astropy/issues/3060 """ hdulist = fits.HDUList() hdulist.append(fits.ImageHDU(name='a')) assert 'a' in hdulist assert 'A' in hdulist assert ('a', 1) in hdulist assert ('A', 1) in hdulist assert 'b' not in hdulist assert ('a', 2) not in hdulist assert ('b', 1) not in hdulist assert ('b', 2) not in hdulist def test_overwrite_vs_clobber(self): hdulist = fits.HDUList([fits.PrimaryHDU()]) hdulist.writeto(self.temp('test_overwrite.fits')) hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True) with catch_warnings(AstropyDeprecationWarning) as warning_lines: hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"clobber" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "overwrite" instead.') def test_invalid_hdu_key_in_contains(self): """ Make sure invalid keys in the 'in' operator return False. Regression test for https://github.com/astropy/astropy/issues/5583 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU()) hdulist.append(fits.ImageHDU()) # A more or less random assortment of things which are not valid keys. bad_keys = [None, 3.5, {}] for key in bad_keys: assert not (key in hdulist) def test_iteration_of_lazy_loaded_hdulist(self): """ Regression test for https://github.com/astropy/astropy/issues/5585 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU(name='SCI')) hdulist.append(fits.ImageHDU(name='SCI')) hdulist.append(fits.ImageHDU(name='nada')) hdulist.append(fits.ImageHDU(name='SCI')) filename = self.temp('many_extension.fits') hdulist.writeto(filename) f = fits.open(filename) # Check that all extensions are read if f is not sliced all_exts = [ext for ext in f] assert len(all_exts) == 5 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Try a simple slice with no conditional on the ext. This is essentially # the reported failure. all_exts_but_zero = [ext for ext in f[1:]] assert len(all_exts_but_zero) == 4 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Check whether behavior is proper if the upper end of the slice is not # omitted. read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI'] assert len(read_exts) == 2 def test_proper_error_raised_on_non_fits_file_with_unicode(self): """ Regression test for https://github.com/astropy/astropy/issues/5594 The failure shows up when (in python 3+) you try to open a file with unicode content that is not actually a FITS file. See: https://github.com/astropy/astropy/issues/5594#issuecomment-266583218 """ import codecs filename = self.temp('not-fits-with-unicode.fits') with codecs.open(filename, mode='w', encoding='utf=8') as f: f.write(u'Ce\xe7i ne marche pas') # This should raise an IOError because there is no end card. with pytest.raises(IOError): fits.open(filename) @pytest.mark.skipif(six.PY2, reason='ResourceWarning is not created in Python 2') def test_no_resource_warning_raised_on_non_fits_file(self): """ Regression test for https://github.com/astropy/astropy/issues/6168 The ResourceWarning shows up when (in python 3+) you try to open a non-FITS file when using a filename. """ # To avoid creating the file multiple times the tests are # all included in one test file. See the discussion to the # PR at https://github.com/astropy/astropy/issues/6168 # filename = self.temp('not-fits.fits') with open(filename, mode='w') as f: f.write('# header line\n') f.write('0.1 0.2\n') # Opening the file should raise an OSError however the file # is opened (there are two distinct code paths, depending on # whether ignore_missing_end is True or False). # # Explicit tests are added to make sure the file handle is not # closed when passed in to fits.open. In this case the ResourceWarning # was not raised, but a check is still included. # with catch_warnings(ResourceWarning) as ws: # Make sure that files opened by the user are not closed with open(filename, mode='rb') as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=False) assert not f.closed with open(filename, mode='rb') as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=True) assert not f.closed with pytest.raises(OSError): fits.open(filename, ignore_missing_end=False) with pytest.raises(OSError): fits.open(filename, ignore_missing_end=True) assert len(ws) == 0
{ "content_hash": "cd85c990c33cef0bd623ee918704d3d3", "timestamp": "", "source": "github", "line_count": 962, "max_line_length": 81, "avg_line_length": 36.71101871101871, "alnum_prop": 0.5650979725903273, "repo_name": "AustereCuriosity/astropy", "id": "04a0e07bbf3f0892955817d65ba5daa16cd1ebbf", "size": "35380", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "astropy/io/fits/tests/test_hdulist.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "366877" }, { "name": "C++", "bytes": "1825" }, { "name": "HTML", "bytes": "1172" }, { "name": "Jupyter Notebook", "bytes": "62553" }, { "name": "Python", "bytes": "8239657" }, { "name": "Shell", "bytes": "593" }, { "name": "TeX", "bytes": "778" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import django.core.validators import django.utils.timezone import django.contrib.auth.models class Migration(migrations.Migration): dependencies = [ ('auth', '0006_require_contenttypes_0002'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(verbose_name='last login', blank=True, null=True)), ('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)), ('username', models.CharField(validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], unique=True, max_length=30, verbose_name='username', error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.')), ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)), ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)), ('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)), ('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)), ('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)), ('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)), ('groups', models.ManyToManyField(verbose_name='groups', to='auth.Group', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', blank=True, related_query_name='user')), ('user_permissions', models.ManyToManyField(verbose_name='user permissions', to='auth.Permission', help_text='Specific permissions for this user.', related_name='user_set', blank=True, related_query_name='user')), ], options={ 'abstract': False, 'verbose_name': 'user', 'verbose_name_plural': 'users', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
{ "content_hash": "2209aa662017366f22fc56cf17a699da", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 432, "avg_line_length": 69.71428571428571, "alnum_prop": 0.64275956284153, "repo_name": "LABETE/TestYourProject", "id": "56f5e97cab50b52fb9621b4ea789e370667996b3", "size": "2952", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TestYourProject/users/migrations/0001_initial.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "70188" }, { "name": "HTML", "bytes": "99145" }, { "name": "JavaScript", "bytes": "917905" }, { "name": "Python", "bytes": "62518" }, { "name": "Shell", "bytes": "3620" } ], "symlink_target": "" }
""" Support for EBox. Get data from 'My Usage Page' page: https://client.ebox.ca/myusage """ from __future__ import annotations from datetime import timedelta import logging from pyebox import EboxClient from pyebox.client import PyEboxError import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA, SensorEntity, SensorEntityDescription, ) from homeassistant.const import ( CONF_MONITORED_VARIABLES, CONF_NAME, CONF_PASSWORD, CONF_USERNAME, DATA_GIGABITS, PERCENTAGE, TIME_DAYS, ) from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) PRICE = "CAD" DEFAULT_NAME = "EBox" REQUESTS_TIMEOUT = 15 SCAN_INTERVAL = timedelta(minutes=15) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15) SENSOR_TYPES: tuple[SensorEntityDescription, ...] = ( SensorEntityDescription( key="usage", name="Usage", native_unit_of_measurement=PERCENTAGE, icon="mdi:percent", ), SensorEntityDescription( key="balance", name="Balance", native_unit_of_measurement=PRICE, icon="mdi:cash-usd", ), SensorEntityDescription( key="limit", name="Data limit", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="days_left", name="Days left", native_unit_of_measurement=TIME_DAYS, icon="mdi:calendar-today", ), SensorEntityDescription( key="before_offpeak_download", name="Download before offpeak", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="before_offpeak_upload", name="Upload before offpeak", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:upload", ), SensorEntityDescription( key="before_offpeak_total", name="Total before offpeak", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="offpeak_download", name="Offpeak download", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="offpeak_upload", name="Offpeak Upload", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:upload", ), SensorEntityDescription( key="offpeak_total", name="Offpeak Total", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="download", name="Download", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), SensorEntityDescription( key="upload", name="Upload", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:upload", ), SensorEntityDescription( key="total", name="Total", native_unit_of_measurement=DATA_GIGABITS, icon="mdi:download", ), ) SENSOR_TYPE_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_MONITORED_VARIABLES): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPE_KEYS)] ), vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the EBox sensor.""" username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) httpsession = hass.helpers.aiohttp_client.async_get_clientsession() ebox_data = EBoxData(username, password, httpsession) name = config.get(CONF_NAME) try: await ebox_data.async_update() except PyEboxError as exp: _LOGGER.error("Failed login: %s", exp) raise PlatformNotReady from exp sensors = [ EBoxSensor(ebox_data, description, name) for description in SENSOR_TYPES if description.key in config[CONF_MONITORED_VARIABLES] ] async_add_entities(sensors, True) class EBoxSensor(SensorEntity): """Implementation of a EBox sensor.""" def __init__( self, ebox_data, description: SensorEntityDescription, name, ): """Initialize the sensor.""" self.entity_description = description self._attr_name = f"{name} {description.name}" self.ebox_data = ebox_data async def async_update(self): """Get the latest data from EBox and update the state.""" await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 ) class EBoxData: """Get data from Ebox.""" def __init__(self, username, password, httpsession): """Initialize the data object.""" self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession) self.data = {} @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from Ebox.""" try: await self.client.fetch_data() except PyEboxError as exp: _LOGGER.error("Error on receive last EBox data: %s", exp) return # Update data self.data = self.client.get_data()
{ "content_hash": "7d1237a6432164f9b95ca4c9a835ddfe", "timestamp": "", "source": "github", "line_count": 204, "max_line_length": 86, "avg_line_length": 27.691176470588236, "alnum_prop": 0.6328553726323243, "repo_name": "FreekingDean/home-assistant", "id": "3c43dd3613098ce91e2154fe22c38c4d5394a843", "size": "5649", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/ebox/sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2335" }, { "name": "Python", "bytes": "36746639" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('account', '0020_auto_20150610_0943'), ] operations = [ migrations.AlterField( model_name='user', name='key_expires', field=models.DateTimeField(default=datetime.datetime(2015, 6, 10, 12, 17, 25, 354900, tzinfo=utc)), ), ]
{ "content_hash": "8d5738aca6b140d05c85f74a872f1ffa", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 111, "avg_line_length": 24.8, "alnum_prop": 0.6350806451612904, "repo_name": "NTsystems/NoTes-API", "id": "7614c4c2f1213d3c1e8ef29cf51a68e84875c01e", "size": "520", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "notes/apps/account/migrations/0021_auto_20150610_1217.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "249" }, { "name": "Nginx", "bytes": "528" }, { "name": "Python", "bytes": "58225" }, { "name": "Shell", "bytes": "143" } ], "symlink_target": "" }
import setuptools setuptools.setup( setup_requires = ['pbr',], pbr=True )
{ "content_hash": "bb7ad162ca0b0e3afc556a99ddde2b04", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 34, "avg_line_length": 14.285714285714286, "alnum_prop": 0.54, "repo_name": "chrigl/bobtemplates.pypbr", "id": "5939355fc5b5845502b32f7471101b1a6e31b541", "size": "703", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "5375" } ], "symlink_target": "" }
# Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. # ======================= # This version of the META tools is a fork of an original version produced # by Vanderbilt University's Institute for Software Integrated Systems (ISIS). # Their license statement: # Copyright (C) 2011-2014 Vanderbilt University # Developed with the sponsorship of the Defense Advanced Research Projects # Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights # as defined in DFARS 252.227-7013. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. #perform Latin Hypercube Sampling from __future__ import division # tells python to use "true" division throughout import random from numpy import * #Normal LHS sampling def LHS(dim, iterations): points = GetLHSpoints(dim, iterations) unused = [] for d in range(dim): unused.append(range(iterations)) samples = [] for i in range(iterations): sample = [] for d in range(dim): rpoint = int(random.random()*len(unused[d])) segpoint = unused[d][rpoint] sample.append(points[d, segpoint]) del unused[d][rpoint] # unused[d].remove(segpoint) samples.append(sample) return samples #use Orthogonal LHS sampling for even better sampling performance def OrthLHS(dim, iterations): points = GetLHSpoints(dim, iterations) secsizes = GetSectionSizes(dim, iterations) print secsizes unused = [] for d in range(dim): sections = [] start = 0 for s in range(secsizes[d]): secsize = int(ceil((iterations-start)/(secsizes[d]-s))) sections.append(range(start,start+secsize)) start += secsize unused.append(sections) samples = [] sampleidx = [] sampleavail = [] #GetOrthLHSsamples(0,dim,samples,unused,points,secsizes,[0]*dim) indices = GetOrthLHSsampleIndices(dim,secsizes) print unused for s in range(iterations): maxv = 0 for i in indices: val = 0 for d in range(dim): val += len(unused[d][i[d]]) if val==maxv: available.append(i) if val>maxv: maxv = val available = [i] print 'Available indices:',available rpoint = int(random.random()*len(available)) index = available[rpoint] if maxv<dim: for d in range(dim): for j,u in enumerate(unused[d]): if len(u)>0: index[d]=j break i = sampleidx.index(index) last = list(index) print 'Choosing',index,'instead' sample = GetSample(index,dim,unused) print 'giving sample',sample print unused samples.append(sample) sampleidx.append(index) sampleavail.append(available) while True: print 'Redoing step',i if(len(sampleavail[i])<2): print 'Could not complete' while last==sampleidx[i]: last = sampleavail[i][int(random.random()*len(sampleavail[i]))] print 'For step',i,'choosing',last,'instead' for d in range(dim): unused[d][sampleidx[i][d]].append(samples[i][d]) print unused samples[i]=GetSample(last,dim,unused) print samples[i] print unused sampleidx[i]=last try: i = sampleidx.index(last) except ValueError: break else: print 'Choosing index',index,'with value',maxv, sample = GetSample(index,dim,unused) print 'giving sample',sample print unused del indices[rpoint] samples.append(sample) sampleidx.append(index) sampleavail.append(available) return samples # recursive function to obtain LHS samples def GetOrthLHSsamples(depth,dim,samples,unused,points,secsizes,index): if depth==dim: print 'index=',index for d in range(dim): #check if there are sections available for this sample. If not, we can't take the sample if unused[d][index[d]] == []: return sample = GetSample(index,dim,unused) print sample samples.append(sample) print unused else: for s in range(secsizes[depth]): index[depth]=s GetOrthLHSsamples(depth+1,dim,samples,unused,points,secsizes,index) def GetSample(index,dim,unused): sample = [] for d in range(dim): rpoint = int(random.random()*len(unused[d][index[d]])) segpoint = unused[d][index[d]][rpoint] # sample.append(points[d, segpoint]) sample.append(segpoint) # for testing. Comment out previous line and uncomment this line to see how the samples are distributed. del unused[d][index[d]][rpoint] return sample def GetOrthLHSsampleIndices(dim,secsizes,depth=0,index=None): if depth==dim: return [list(index)] #must return a copy of the index, not a reference to it else: if depth==0: index = [0]*dim Indices = [] for s in range(secsizes[depth]): index[depth]=s Indices += GetOrthLHSsampleIndices(dim,secsizes,depth+1,index) return Indices #gets how many sections each dimension should be split into def GetSectionSizes(dim, iterations): sdims = [] iters = iterations for d in range(dim): sdims.append(int(ceil(iters ** (1/(dim-d))))) iters = ceil(iters/sdims[d]) return sdims #get a set of sample points (on 0-1) def GetLHSpoints(dim, iterations): points = zeros((dim,iterations)) segmentSize = 1 / iterations for d in range(dim): for i in range(iterations): segmentMin = i * segmentSize points[d,i] = segmentMin + (random.random() * segmentSize) return points #print LHS(2,4) #print len(LHS(4,1000)) #print len(LHS(4,10000)) #print len(LHS(4,50000)) #print OrthLHS(2,5) #for i in range(4,20): # print i,len(OrthLHS(2,i))
{ "content_hash": "ee1567554577530b9e94a0883b17bb1b", "timestamp": "", "source": "github", "line_count": 230, "max_line_length": 139, "avg_line_length": 38.06956521739131, "alnum_prop": 0.6156920968478757, "repo_name": "pombredanne/metamorphosys-desktop", "id": "1eac4b54c73c51f05a1013b86337ca6ca38147bd", "size": "8807", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metamorphosys/META/src/Python27Packages/PCC/PCC/LHS.py", "mode": "33261", "license": "mit", "language": [ { "name": "Arduino", "bytes": "10683" }, { "name": "Assembly", "bytes": "117345" }, { "name": "Awk", "bytes": "3591" }, { "name": "Batchfile", "bytes": "228118" }, { "name": "BitBake", "bytes": "4526" }, { "name": "C", "bytes": "3613212" }, { "name": "C#", "bytes": "11617773" }, { "name": "C++", "bytes": "51448188" }, { "name": "CMake", "bytes": "3055" }, { "name": "CSS", "bytes": "109563" }, { "name": "Clojure", "bytes": "37831" }, { "name": "Eagle", "bytes": "3782687" }, { "name": "Emacs Lisp", "bytes": "8514" }, { "name": "GAP", "bytes": "49124" }, { "name": "Groff", "bytes": "2178" }, { "name": "Groovy", "bytes": "7686" }, { "name": "HTML", "bytes": "4025250" }, { "name": "Inno Setup", "bytes": "35715" }, { "name": "Java", "bytes": "489537" }, { "name": "JavaScript", "bytes": "167454" }, { "name": "Lua", "bytes": "1660" }, { "name": "Makefile", "bytes": "97209" }, { "name": "Mathematica", "bytes": "26" }, { "name": "Matlab", "bytes": "80874" }, { "name": "Max", "bytes": "78198" }, { "name": "Modelica", "bytes": "44541139" }, { "name": "Objective-C", "bytes": "34004" }, { "name": "Perl", "bytes": "19285" }, { "name": "PostScript", "bytes": "400254" }, { "name": "PowerShell", "bytes": "19749" }, { "name": "Processing", "bytes": "1477" }, { "name": "Prolog", "bytes": "3121" }, { "name": "Protocol Buffer", "bytes": "58995" }, { "name": "Python", "bytes": "5517835" }, { "name": "Ruby", "bytes": "4483" }, { "name": "Shell", "bytes": "956773" }, { "name": "Smarty", "bytes": "37892" }, { "name": "TeX", "bytes": "4183594" }, { "name": "Visual Basic", "bytes": "22546" }, { "name": "XSLT", "bytes": "332312" } ], "symlink_target": "" }
""" author: sljeff email: kindjeff.com@gmail.com """ from . import db from ..pocket import PocketSession from ..qqbot import qqpush live_subscription = {} daily_subscription = {} daily_sessions = {} live_members = {} def update_subscription(): """ 从数据库里更新订阅信息。应该在每次数据库更改时调用。 """ global live_subscription, daily_subscription, daily_sessions live_subscription = db.get_live_subscription() daily_subscription = db.get_daily_subscription() for phonenum in daily_sessions.keys(): if phonenum not in daily_subscription.keys(): daily_sessions.pop(phonenum) for phonenum, password in daily_subscription.items(): if daily_sessions.get(phonenum) is None: daily_sessions[phonenum] = PocketSession(phonenum, password) update_subscription() def livecallback(live_list): """ 直播的回调函数。 每次会和上一次比较,检查新的直播成员是否被订阅,并进行推送。 """ global live_members _live_members = {l['memberId']: l for l in live_list} new_member_ids = set(_live_member_ids.keys()) - set(live_member_ids.keys()) live_members = _live_members for mid in new_member_ids: subscriptors = live_subscription.get(mid) if subscriptors is not None: live_data = live_members.get(mid) qqpush(subscriptors, live_data) def dailycallback(): """ 日常任务的回调(签到/扭蛋)。 只会在签到失败时进行一次重新登录。 """ for session in daily_sessions.values(): sign_result = session.sign() if sign_result is None: session.login() sign_result = session.sign() session.get_niudan()
{ "content_hash": "52463b0e38ee52e652739622b32ec340", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 79, "avg_line_length": 25.38095238095238, "alnum_prop": 0.6466541588492808, "repo_name": "zyf-website/pocket48-tools", "id": "d341a778e8a928e2ad53c426f44dd959b0571489", "size": "1789", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pocket48_tools/subscribe/callback.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24700" } ], "symlink_target": "" }
from django.conf.urls.defaults import patterns, url, include from django.contrib import admin from techism2.rss.feeds import UpcommingEventsRssFeed, UpcommingEventsAtomFeed admin.autodiscover() urlpatterns = patterns('', # web (r'^$', 'techism2.web.views.index'), (r'^events/$', 'techism2.web.views.index'), url(r'^events/(?P<event_id>\d+)/$', 'techism2.web.views.show', name='event-show'), (r'^events/edit/(?P<event_id>\d+)/$', 'techism2.web.views.edit'), (r'^events/create/$', 'techism2.web.views.create'), (r'^events/archive/$', 'techism2.web.views.archive'), (r'^events/tags/(?P<tag_name>.+)/$', 'techism2.web.views.tag'), # static pages (r'^impressum/$', 'techism2.web.views.static_impressum'), (r'^about/$', 'techism2.web.views.static_about'), # iCal (r'^feed.ics$', 'techism2.ical.views.ical'), # Atom (r'^feeds/atom/upcomming_events$', UpcommingEventsAtomFeed()), #RSS (r'^feeds/rss/upcomming_events$', UpcommingEventsRssFeed()), # admin (r'^admin/', include(admin.site.urls)), # login/logout (r'^accounts/', include('django_openid_auth.urls')), (r'^accounts/logout/$', 'techism2.web.views.logout'), url(r'^accounts/google_login/$', 'gaeauth.views.login', name='google_login'), url(r'^accounts/google_logout/$', 'gaeauth.views.logout', name='google_logout'), url(r'^accounts/google_authenticate/$', 'gaeauth.views.authenticate', name='google_authenticate'), # cron jobs (r'^cron/update_archived_flag', 'techism2.cron.views.update_archived_flag'), (r'^cron/update_tags_cache', 'techism2.cron.views.update_tags_cache'), )
{ "content_hash": "f63e68ecffd87271810fee6f2c7489aa", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 102, "avg_line_length": 37.4, "alnum_prop": 0.6423054070112894, "repo_name": "gimler/techism2", "id": "37a46dcd8fdbf289dd762dc16591d558790573af", "size": "1683", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "413557" }, { "name": "Python", "bytes": "361299" } ], "symlink_target": "" }
from typing import Any, Callable def func1(callback: Callable[[Any, Any], None], *args: Any, **kwargs: Any): callback(*args, **kwargs) # mypy complains with too many arguments error def func2(callback: Callable[..., None], *args: Any, **kwargs: Any): callback(*args, **kwargs) def cb(*args: Any, **kwargs: Any) -> None: print(args) print(kwargs) func1(cb, 1, 2, three=3, four=4) func2(cb, 1, 2, three=3, four=4)
{ "content_hash": "a237af97c781d0c53b07d8a856daac8b", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 77, "avg_line_length": 24.27777777777778, "alnum_prop": 0.6407322654462243, "repo_name": "viblo/pymunk", "id": "183aba921df72b6a0d8a2aa48ee0eecba388c4f5", "size": "437", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dump/typingissues/too-many-arguments.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1349" }, { "name": "Jupyter Notebook", "bytes": "297305" }, { "name": "Objective-C", "bytes": "2968" }, { "name": "Python", "bytes": "627317" } ], "symlink_target": "" }
""" Copyright 2016 ElasticBox All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from bson.objectid import ObjectId import cgi import json import logging import random import re import string import urllib import urlparse from datetime import datetime, timedelta import jwt from lxml import etree from onelogin.saml2.auth import OneLogin_Saml2_Auth from onelogin.saml2.constants import OneLogin_Saml2_Constants from onelogin.saml2.metadata import OneLogin_Saml2_Metadata from passlib.hash import sha512_crypt from tornado.auth import GoogleOAuth2Mixin from tornado.gen import coroutine, Return from tornado.web import RequestHandler, HTTPError from api.v1 import ELASTICKUBE_TOKEN_HEADER, ELASTICKUBE_VALIDATION_TOKEN_HEADER from api.v1.actions import emails from data.query import Query ROUNDS = 40000 def _generate_hashed_password(password): salt = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64)) hash = sha512_crypt.encrypt((password + salt).encode("utf-8"), rounds=ROUNDS) hash_parts = hash.split("$rounds={0}$".format(ROUNDS)) return {"hash": hash_parts[1], "rounds": "{0}$rounds={1}$".format(hash_parts[0], ROUNDS), "salt": salt} def _fill_signup_invitation_request(document, firstname, lastname, password=None): document["firstname"] = firstname document["lastname"] = lastname document["email_validated_at"] = datetime.utcnow() if password is not None: document["password"] = _generate_hashed_password(password) class AuthHandler(RequestHandler): @coroutine def authenticate_user(self, user): logging.info("Authenticating user '%(username)s'", user) token = dict( id=str(user["_id"]), username=user["username"], firstname=user["firstname"], lastname=user["lastname"], email=user["email"], role=user["role"], created=datetime.utcnow().isoformat(), exp=datetime.utcnow() + timedelta(30) ) user["last_login"] = datetime.utcnow() yield self.settings["database"].Users.update({"_id": user["_id"]}, user) token = jwt.encode(token, self.settings["secret"], algorithm="HS256") self.set_cookie(ELASTICKUBE_TOKEN_HEADER, token) logging.info("User '%(username)s' authenticated.", user) raise Return(token) class AuthProvidersHandler(RequestHandler): @coroutine def get(self): providers = dict() # If there are no users created then we need to return an empty list of providers to enable the signup flow if (yield Query(self.settings["database"], "Users").find_one()) is None: self.write({}) else: settings = yield Query(self.settings["database"], "Settings").find_one() if "google_oauth" in settings["authentication"]: providers['google'] = dict(auth_url="/api/v1/auth/google") if "saml" in settings["authentication"]: providers['saml'] = dict(auth_url="/api/v1/auth/saml") if "password" in settings["authentication"]: providers['password'] = dict(regex=settings["authentication"]["password"]["regex"]) validation_token = self.request.headers.get(ELASTICKUBE_VALIDATION_TOKEN_HEADER) if validation_token is not None: user = yield Query(self.settings["database"], "Users").find_one({"invite_token": validation_token}) if user is not None and 'email_validated_at' not in user: providers['email'] = user[u'email'] self.write(providers) class SignupHandler(AuthHandler): @staticmethod def _validate_signup_data(data): if "email" not in data: raise HTTPError(400, reason="Email is required.") if "password" not in data: raise HTTPError(400, reason="Password is required.") if "firstname" not in data: raise HTTPError(400, reason="First name is required.") if "lastname" not in data: raise HTTPError(400, reason="Last name is required.") return True @coroutine def _update_invited_user(self, validation_token, data): user = yield Query(self.settings["database"], "Users").find_one( {"invite_token": validation_token, "email": data["email"]}) if user is not None and "email_validated_at" not in user: for namespace_name in user["namespaces"]: namespace = yield Query(self.settings["database"], "Namespaces").find_one({"name": namespace_name}) if namespace is None: logging.warn("Cannot find namespace %s", namespace_name) else: if "members" in namespace: namespace["members"].append(user["username"]) else: namespace["members"] = [user["username"]] yield Query(self.settings["database"], "Namespaces").update(namespace) del user["namespaces"] _fill_signup_invitation_request( user, firstname=data["firstname"], lastname=data["lastname"], password=data["password"]) raise Return(user) else: raise HTTPError(403, message="Invitation not found.") @coroutine def post(self): try: data = json.loads(self.request.body) except Exception: raise HTTPError(400, message='Invalid JSON') validation_token = self.request.headers.get(ELASTICKUBE_VALIDATION_TOKEN_HEADER) if validation_token is not None: self._validate_signup_data(data) user = yield self._update_invited_user(validation_token, data) token = yield self.authenticate_user(user) self.write(token) self.flush() # Signup can be used only the first time elif (yield Query(self.settings["database"], "Users").find_one()) is not None: raise HTTPError(403, message="Onboarding already completed.") else: self._validate_signup_data(data) user = dict( email=data["email"], username=data["email"], password=_generate_hashed_password(data["password"]), firstname=data["firstname"], lastname=data["lastname"], role="administrator", schema="http://elasticbox.net/schemas/user", email_validated_at=datetime.utcnow().isoformat() ) signup_user = yield Query(self.settings["database"], "Users").insert(user) token = yield self.authenticate_user(signup_user) self.write(token) self.flush() class RequestInviteHandler(AuthHandler): @coroutine def post(self): logging.info("Initiating RequestInviteHandler post") data = json.loads(self.request.body) if "email" not in data: raise HTTPError(400, reason="Missing email in body request.") settings = yield self.settings["database"].Settings.find_one() if "mail" in settings: mail_settings = settings["mail"] origin_user = { 'name': data.get('name', ''), 'email': data['email'] } invite_address = "{0}/admin/users?invite={1}".format( settings["hostname"], cgi.escape(origin_user['email'], quote=True)) try: admin = yield Query(self.settings["database"], "Users").find_one({"role": "administrator"}) yield emails.send_request_invite_link( mail_settings, admin["email"], origin_user, invite_address, settings) except Exception: logging.exception("Error sending request invite.") raise HTTPError(500, reason='Error sending request invite.') raise HTTPError(200) else: logging.warning("Mail settings not added") raise HTTPError(412, reason="Request invite not available.") class ResetPasswordHandler(AuthHandler): @coroutine def post(self): logging.info("Initiating ResetPasswordHandler post") data = json.loads(self.request.body) if "email" not in data: raise HTTPError(400, reason="Missing email in body request.") email = data["email"] user = yield self.settings["database"].Users.find_one({"email": email}) if not user: logging.debug("User with email '%s' not found.", email) raise HTTPError(200) settings = yield self.settings["database"].Settings.find_one() if "mail" in settings: mail_settings = settings["mail"] token = dict( id=str(user["_id"]), hash=user['password']['hash'][:-16], exp=datetime.utcnow() + timedelta(minutes=10) ) token = jwt.encode(token, self.settings["secret"], algorithm="HS256") user_data = { 'name': user.get('firstname'), 'email': user['email'], 'token': token } try: yield emails.send_reset_password_link(mail_settings, user_data, settings) except Exception: raise HTTPError(500, reason='Error sending reset password email.') raise HTTPError(200) else: logging.warning("Mail settings not added") raise HTTPError(412, reason="Mail settings not added.") class ChangePasswordHandler(AuthHandler): @coroutine def post(self): logging.info("Initiating ChangePasswordHandler post") data = json.loads(self.request.body) if "password" not in data: raise HTTPError(400, reason="Missing arguments in change password request.") if "token" not in data: raise HTTPError(400, reason="Missing arguments in change password request.") password = data["password"] try: token = jwt.decode(data["token"], self.settings['secret'], algorithm='HS256') except Exception: raise HTTPError(400, reason="Invalid token or token has expired") user = yield self.settings["database"].Users.find_one({"_id": ObjectId(token["id"])}) if not user: logging.error("Error trying to change user password for token: '%s'.", token) raise HTTPError(200) if not user["password"]["hash"][:-16] == token["hash"]: raise HTTPError(400, reason="Invalid token or token has expired") user["password"] = _generate_hashed_password(password) yield Query(self.settings["database"], "Users").update_fields({"_id": user["_id"]}, { "password": user["password"] }) raise HTTPError(200) class PasswordHandler(AuthHandler): @coroutine def post(self): logging.info("Initiating PasswordHandler post") data = json.loads(self.request.body) if "username" not in data: raise HTTPError(400, reason="Missing username in body request.") if "password" not in data: raise HTTPError(400, reason="Missing password in body request.") username = data["username"] password = data["password"] user = yield self.settings["database"].Users.find_one({"username": username}) if not user: logging.info("Username '%s' not found.", username) raise HTTPError(302, reason='/request-invite') if 'email_validated_at' not in user: logging.info("Username '%s' not validated.", username) raise HTTPError(302, reason='/request-invite') if 'password' not in user: logging.info("User '%s' has not password.", username) raise HTTPError(401, reason="Invalid username or password.") encoded_user_password = '{0}{1}'.format(user["password"]["rounds"], user["password"]["hash"]) if sha512_crypt.verify((password + user["password"]["salt"]).encode("utf-8"), encoded_user_password): token = yield self.authenticate_user(user) self.write(token) self.flush() else: logging.info("Invalid password for user '%s'.", username) raise HTTPError(401, reason="Invalid username or password.") class GoogleOAuth2LoginHandler(AuthHandler, GoogleOAuth2Mixin): @coroutine def get(self): logging.info("Initiating Google OAuth.") settings = yield Query(self.settings["database"], "Settings").find_one() google_oauth = settings[u'authentication'].get('google_oauth', None) if google_oauth is None: raise HTTPError(403, 'Forbidden request') # Add OAuth settings for GoogleOAuth2Mixin self.settings['google_oauth'] = { 'key': google_oauth['key'], 'secret': google_oauth['secret'] } code = self.get_argument('code', False) redirect_uri = "{0}/api/v1/auth/google".format(settings["hostname"]) if code: logging.debug("Google redirect received.") auth_data = yield self.get_authenticated_user( redirect_uri=redirect_uri, code=code) auth_user = yield self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=auth_data['access_token']) if auth_user["verified_email"]: user = yield self.settings["database"].Users.find_one({"email": auth_user["email"]}) firstname = auth_data.get('given_name', auth_data.get('name', "")) lastname = auth_data.get('family_name', "") # Validate user if it signup by OAuth2 if user and 'email_validated_at' not in user: logging.debug('User validated via OAuth2 %s', auth_user["email"]) _fill_signup_invitation_request(user, firstname=firstname, lastname=lastname, password=None) user = yield Query(self.settings["database"], 'Users').update(user) if user: yield self.authenticate_user(user) self.redirect('/') else: logging.debug("User '%s' not found", auth_user["email"]) self.redirect('/request-invite?account={0}&name={1}'.format( cgi.escape(auth_user["email"], quote=True), cgi.escape("{0} {1}".format(firstname, lastname), quote=True))) else: logging.info("User email '%s' not verified.", auth_user["email"]) raise HTTPError(400, "Email is not verified.") else: logging.debug("Redirecting to google for authentication.") yield self.authorize_redirect( redirect_uri=redirect_uri, client_id=google_oauth['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) class Saml2MetadataHandler(RequestHandler): @coroutine def get(self): logging.info("Initiating SAML 2.0 Metadata get.") settings = yield Query(self.settings["database"], "Settings").find_one() saml_settings = Saml2MetadataHandler.get_saml_settings(settings) self.set_header('Content-Type', 'text/xml') self.write(OneLogin_Saml2_Metadata.builder( sp=saml_settings['sp'], authnsign=saml_settings['security']['authnRequestsSigned'], wsign=saml_settings['security']['wantAssertionsSigned']) ) self.flush() @staticmethod def get_saml_settings(settings, saml_config=None): saml_settings = dict( sp=dict( entityId=urlparse.urlparse(settings["hostname"]).netloc, assertionConsumerService=dict( url="{0}/api/v1/auth/saml".format(settings["hostname"]), binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST), NameIDFormat=OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED, attributeConsumingService=dict( serviceName="ElasticKube SAML", serviceDescription="ElasticKube SAML Service Provider", requestedAttributes=[] ) ), security=dict( authnRequestsSigned=False, wantAssertionsSigned=True, wantNameId=True ) ) if saml_config is not None: saml_settings['idp'] = dict( entityId=saml_config['idp_entity_id'], singleSignOnService=dict( url=saml_config['idp_sso'], binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT), x509cert=saml_config['idp_cert'] ) return saml_settings class Saml2LoginHandler(AuthHandler): NS_IDENTITY_CLAIMS = 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/' EMAIL_ATTRIBUTES = ('email', 'Email', 'User.Email', NS_IDENTITY_CLAIMS + 'email') FIRST_NAME_ATTRIBUTES = ('firstname', 'FirstName', 'User.FirstName', NS_IDENTITY_CLAIMS + 'givenname') LAST_NAME_ATTRIBUTES = ('lastname', 'LastName', 'User.LastName', NS_IDENTITY_CLAIMS + 'surname') IDP_CERT_PATH = "md:IDPSSODescriptor/md:KeyDescriptor[@use='signing']/ds:KeyInfo/ds:X509Data/ds:X509Certificate" IDP_SSO_PATH = "md:IDPSSODescriptor/md:SingleSignOnService[@Binding='{0}']".format( OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT) @staticmethod def get_metadata_info(metadata): metadata_xml = etree.fromstring(str(metadata)) if metadata_xml.tag.endswith('EntitiesDescriptor'): metadata_xml = metadata_xml.find("md:EntityDescriptor", namespaces=OneLogin_Saml2_Constants.NSMAP) idp_entity_id = metadata_xml.attrib['entityID'] idp_domain = urlparse.urlparse(idp_entity_id).netloc idp_cert = metadata_xml.find(Saml2LoginHandler.IDP_CERT_PATH, namespaces=OneLogin_Saml2_Constants.NSMAP).text idp_sso = metadata_xml.find( Saml2LoginHandler.IDP_SSO_PATH, namespaces=OneLogin_Saml2_Constants.NSMAP).attrib['Location'] return (idp_entity_id, idp_domain, idp_cert, idp_sso) @coroutine def _get_saml_auth(self, request): settings = yield Query(self.settings["database"], "Settings").find_one() saml_config = settings[u'authentication'].get('saml', None) if saml_config is None: raise HTTPError(403, 'Forbidden request') netloc = urlparse.urlparse(settings["hostname"]).netloc host, _, port = netloc.partition(':') saml_request = dict( http_host=host, script_name=request.path, get_data={k: v[0] if len(v) == 1 else v for k, v in request.query_arguments.items()}, post_data={k: v[0] if len(v) == 1 else v for k, v in request.body_arguments.items()} ) if port: saml_request['server_port'] = port saml_settings = Saml2MetadataHandler.get_saml_settings(settings, saml_config) raise Return( (OneLogin_Saml2_Auth(saml_request, saml_settings), "{0}/api/v1/auth/saml".format(settings["hostname"])) ) def _get_attribute(self, attributes, mappings): for mapping in mappings: values = attributes.get(mapping, []) if len(values) > 0: return values[0].encode('utf8') return "" @coroutine def get(self): logging.info("Initiating SAML 2.0 Auth.") auth, return_to = yield self._get_saml_auth(self.request) logging.info("Redirecting to SAML for authentication.") self.redirect(auth.login(return_to=return_to)) @coroutine def post(self): logging.info("SAML redirect received.") auth, _ = yield self._get_saml_auth(self.request) auth.process_response() errors = auth.get_errors() if len(errors) > 0: logging.info("SAML authentication error: '%s'.", auth.get_last_error_reason()) raise HTTPError(401, reason=auth.get_last_error_reason()) if not auth.is_authenticated(): logging.info("SAML user not authenticated.") raise HTTPError(401, reason="SAML user not authenticated.") attributes = auth.get_attributes() logging.debug('SAML Attributes received: {0}'.format(attributes)) first_name = self._get_attribute(attributes, self.FIRST_NAME_ATTRIBUTES) last_name = self._get_attribute(attributes, self.LAST_NAME_ATTRIBUTES) settings = yield Query(self.settings["database"], "Settings").find_one() saml = settings[u'authentication'].get('saml', None) name_id = auth.get_nameid() user_email = self._get_attribute(attributes, self.EMAIL_ATTRIBUTES).lower() if not user_email: raise HTTPError(401, reason="SAML email attribute is missing.") user = yield self.settings["database"].Users.find_one({"saml_id": name_id}) user_updated = False if user and user["email"] != user_email: logging.info("User email changed!") user["email"] = user_email user_updated = True elif not user: user = yield self.settings["database"].Users.find_one({"email": re.compile(user_email, re.IGNORECASE)}) if user: user["saml_id"] = name_id user_updated = True # Validate user if it signup by SAML if user and 'email_validated_at' not in user: logging.debug('User %s (%s) validated via SAML', user_email, name_id) user = yield self._update_invited_user(user, attributes) user_updated = True if user: if user_updated: user = yield Query(self.settings["database"], 'Users').update(user) yield self.authenticate_user(user) self.redirect('/') else: logging.debug("User '%s' (%s) not found", user_email, name_id) escaped_name = cgi.escape("{0} {1}".format(first_name, last_name), quote=True) if not escaped_name: escaped_name = cgi.escape(name_id, quote=True) self.redirect('/request-invite?account={0}&&name={1}'.format( cgi.escape(user_email, quote=True), escaped_name)) @coroutine def _update_invited_user(self, user, attributes): for namespace_name in user["namespaces"]: namespace = yield Query(self.settings["database"], "Namespaces").find_one({"name": namespace_name}) if namespace is None: logging.warn("Cannot find namespace %s", namespace_name) else: if "members" in namespace: namespace["members"].append(user["username"]) else: namespace["members"] = [user["username"]] yield Query(self.settings["database"], "Namespaces").update(namespace) del user["namespaces"] first_name = self._get_attribute(attributes, self.FIRST_NAME_ATTRIBUTES) last_name = self._get_attribute(attributes, self.LAST_NAME_ATTRIBUTES) _fill_signup_invitation_request(user, firstname=first_name, lastname=last_name, password=None) raise Return(user)
{ "content_hash": "52127eddae7e2f1d3e5b2d43b939baba", "timestamp": "", "source": "github", "line_count": 623, "max_line_length": 117, "avg_line_length": 38.831460674157306, "alnum_prop": 0.6004050925925926, "repo_name": "ElasticBox/elastickube", "id": "c9c55d9d2facfc38c93725b023d1ac7fafea97bb", "size": "24192", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/api/v1/auth.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "523708" }, { "name": "HTML", "bytes": "105664" }, { "name": "JavaScript", "bytes": "403206" }, { "name": "Nginx", "bytes": "2992" }, { "name": "Python", "bytes": "266887" }, { "name": "Ruby", "bytes": "1485" }, { "name": "Shell", "bytes": "39391" } ], "symlink_target": "" }
from django.conf import settings from django.core.paginator import Paginator, EmptyPage def paginate(request, items, per_page=settings.DEFAULT_PER_PAGE, page_key='page'): paginator = Paginator(items, per_page) try: page_number = int(request.GET[page_key]) page = paginator.page(page_number) except (ValueError, KeyError, EmptyPage): page = paginator.page(1) return paginator, page
{ "content_hash": "e76df5809564c5bf1ef2578b663cbfc7", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 64, "avg_line_length": 29.133333333333333, "alnum_prop": 0.6819221967963387, "repo_name": "SableWalnut/wagtailinvoices", "id": "0df884a54123a54ca9ba3f54427eaa1fac4aebcb", "size": "437", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wagtailinvoices/pagination.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "13747" }, { "name": "Python", "bytes": "30759" } ], "symlink_target": "" }
""" Support for Tellstick covers using Tellstick Net. This platform uses the Telldus Live online service. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/cover.tellduslive/ """ import logging from homeassistant.components.cover import CoverDevice from homeassistant.components.tellduslive import TelldusLiveEntity _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Telldus Live covers.""" if discovery_info is None: return add_entities(TelldusLiveCover(hass, cover) for cover in discovery_info) class TelldusLiveCover(TelldusLiveEntity, CoverDevice): """Representation of a cover.""" @property def is_closed(self): """Return the current position of the cover.""" return self.device.is_down def close_cover(self, **kwargs): """Close the cover.""" self.device.down() self.changed() def open_cover(self, **kwargs): """Open the cover.""" self.device.up() self.changed() def stop_cover(self, **kwargs): """Stop the cover.""" self.device.stop() self.changed()
{ "content_hash": "92da46e966f9e5aa9dd3e3ca1cb2e425", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 75, "avg_line_length": 26.804347826086957, "alnum_prop": 0.6763990267639902, "repo_name": "persandstrom/home-assistant", "id": "9d292d9e8b5bb340ad49fc1cf5cda421d8462ea5", "size": "1233", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "homeassistant/components/cover/tellduslive.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1067" }, { "name": "Python", "bytes": "11745210" }, { "name": "Ruby", "bytes": "518" }, { "name": "Shell", "bytes": "16652" } ], "symlink_target": "" }
from __future__ import unicode_literals from flask.ext import restful class Resource(restful.Resource): pass
{ "content_hash": "ea4c65e65a40bce20f975afd8d12ba84", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 39, "avg_line_length": 16.571428571428573, "alnum_prop": 0.7586206896551724, "repo_name": "patrikpettersson/rest-engine", "id": "7be55471cf66011ef60a7a00e2bcd49ad259c04c", "size": "140", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/flask_restplus/resource.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6111" }, { "name": "JavaScript", "bytes": "6338" }, { "name": "Python", "bytes": "2834855" } ], "symlink_target": "" }
""" Basic request class """ ## MIT License ## ## Copyright (c) 2017, krishna bhogaonker ## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: ## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __author__ = 'krishna bhogaonker' __copyright__ = 'copyright 2017' __credits__ = ['krishna bhogaonker'] __license__ = "MIT" __version__ = '0.1.0' __maintainer__ = 'krishna bhogaonker' __email__ = 'cyclotomiq@gmail.com' __status__ = 'pre-alpha' from abcHandler import Handler import uuid from enum import Enum class Request(): class State(Enum): open = 'open' close = 'closed' rejected = 'rejected' class Composites(Enum): none = 'none' monthly = 'monthly' quarterly = 'quarterly' yearly = 'yearly' def __init__(reqCollection, bands, startdate, enddate, compositeFlag): self.id = uuid.uuid5() # unique id for request self.status = Request.State.open # request status self.requestCollection = reqCollection # satellite imagery collection self.requestBands = [] # bands from imagery collection self.startdate = startdate # state date for imagery request self.enddate = enddate # end date for imagery request self.compositeFlag = compositeFlag # composite images or raw daily images self.status = Request.State.open # self.urllist = [] class ValidationLogic: @classmethod def isnotinteger(cls, value): try: return int(value) except ValueError as e: raise IsNotInteger(e) class Error(Exception): """Base class for exceptions in this module.""" pass class Error1(Error): def __init__(self, evalue): print('The value entered is invalid: ' + str(evalue)) class Tests(): def test_t1(self): pass def main() if __name__ == "__main__": main()
{ "content_hash": "f57b3455946ca03eee98970deea38fbf", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 463, "avg_line_length": 31.886363636363637, "alnum_prop": 0.6856735566642908, "repo_name": "krishnab-datakind/mining-data-acquisition", "id": "c784da7e9f9a7d856ffbcd7e7f78e9a189b6ac3f", "size": "2826", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "data_gather/request.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2333" }, { "name": "Python", "bytes": "107734" } ], "symlink_target": "" }
formatter = "%r %r %r %r" print formatter % (1, 2, 3, 4) print formatter % ("one", "two", "three", "four") print formatter % (True, False ,True ,False) print formatter % (formatter, formatter, formatter, formatter) print formatter % ( "I had this." , "You could type up right.", "But it didn't sing.", "So i said bye." )
{ "content_hash": "e3809aab5ee4595839686754b4298878", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 62, "avg_line_length": 22.75, "alnum_prop": 0.5686813186813187, "repo_name": "Vayne-Lover/Effective", "id": "0f6fd0078cf404bd14d2eeab05e3b8b330ebbca0", "size": "389", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Python/Learn Python The Hard Way/ex8.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "37043" } ], "symlink_target": "" }
import sys import threading import logging from logging.handlers import ( TimedRotatingFileHandler, SocketHandler, DatagramHandler, SysLogHandler, SMTPHandler, HTTPHandler, ) from logging import LoggerAdapter try: from six import with_metaclass except: def with_metaclass(meta, *bases): """Create a base class with a metaclass. copy from six """ class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) from .util import ( read_from_yaml, read_from_etcd, parse_config, ) PY2 = sys.version_info[0] == 2 class IntField(int): """ the instance have both int type and other attributes """ def __new__(cls, value=0, name=None, **kwargs): obj = int.__new__(cls, value) kwargs['name'] = name obj.__dict__.update(**kwargs) return obj class StrField(str): """ the instance have both str type and other attributes """ def __new__(cls, value=0, name="", **kwargs): obj = str.__new__(cls, value) kwargs['name'] = name obj.__dict__.update(**kwargs) return obj class ConstMetaClass(type): def __new__(mcs, name, bases, namespace): field_dict = {} for k, v in namespace.items(): if k.isupper() and isinstance(v, (int, str)): if isinstance(v, int) and not isinstance(v, IntField): # default name is k namespace[k] = IntField(v, name=k) elif isinstance(v, str) and not isinstance(v, StrField): namespace[k] = StrField(v, name=k) field_dict[k] = namespace[k] namespace["FIELD_DICT"] = field_dict return type.__new__(mcs, name, bases, namespace) class _Const(with_metaclass(ConstMetaClass)): FIELD_DICT = NotImplemented class Logger(LoggerAdapter): """ inspired from log4j2 Technical Terms: Java Python Appender -> Handler Layout -> Formatter Logger -> Logger Layout -> format """ # [ 16.6.7. LogRecord attributes -Python docs ](https://docs.python.org/3/library/logging.html#logrecord-attributes) DEFAULT_FORMAT = '[%(levelname)s %(process)d:%(asctime)s:%(funcName)s:%(lineno)d] %(message)s' DEFAULT_DATE_FORMAT = '%Y%m%d %H:%M:%S' # display as local zone class RotateMode(_Const): """ https://docs.python.org/3/library/logging.handlers.html need learn about TimedRotatingFileHandler rotate type not support Weekday 'W0' - 'W6' """ SECONDS = 'S' MINUTES = 'M' HOURS = 'H' DAYS = 'D' MIDNIGHT = 'midnight' def __init__(self, logger, formatter, extra=None): """ :param logger: :param formatter: :param extra: for logging.LoggerAdapter specify contextual :return: """ self._formatter = formatter super(Logger, self).__init__(logger, extra=extra) def set_extra(self, extra): if not isinstance(extra, dict): raise TypeError("extra not dict") self.extra = extra def update_extra(self, update_extra): if not isinstance(update_extra, dict): raise TypeError("update_extra not dict") self.extra.update(update_extra) def config_file_handler(self, filename, level=None, rotate_mode=RotateMode.DAYS): self.add_handler(TimedRotatingFileHandler(filename, when=rotate_mode, utc=True), level=level) def config_tcp_handler(self, host, port, level=None): self.add_handler(SocketHandler(host, port), level=level) def config_udp_handler(self, host, port, level=None): self.add_handler(DatagramHandler(host, port), level=level) def config_syslog_handler(self, *args, **kwargs): # should known SysLogHandler params level = kwargs.pop('level', None) self.add_handler(SysLogHandler(*args, **kwargs), level=level) def config_smtp_handler(self, *args, **kwargs): # should known SMTPHandler params level = kwargs.pop('level', None) self.add_handler(SMTPHandler(*args, **kwargs), level=level) def config_http_handler(self, *args, **kwargs): # should known HTTPHandler params level = kwargs.pop('level', None) self.add_handler(HTTPHandler(*args, **kwargs), level=level) def add_handler(self, handler, **kwargs): level = kwargs.get('level') handler.setFormatter(self._formatter) if level: handler.setLevel(level) self.logger.addHandler(handler) class LogManager(object): _lock = threading.RLock() _REGISTERED_LOGGER_DICT = {} # config options: CONFIG_YAML = 1 CONFIG_ETCD = 2 CONFIG_READ_HANDLER_DICT = { CONFIG_YAML: read_from_yaml, CONFIG_ETCD: read_from_etcd, } _META_CONFIG = NotImplemented @classmethod def register_meta_config(cls, config_type, **kwargs): """ register your meta config: 1. tell LogManager way do want to read from 2. tell the specified config type with parameters that you can correctly read the config data :param config_type: :param kwargs: config read_handler with read the parameters """ if config_type not in cls.CONFIG_READ_HANDLER_DICT.keys(): raise ValueError("no support config_type= {0} it should be defined in LogManager.ConfigType".format( config_type)) with cls._lock: cls._META_CONFIG = {'type': config_type, 'kwargs': kwargs} @classmethod def load(cls): """ Recommendation: just load once at the startup of the process/app eg: LogManager.register_meta_config(LogManager.ConfigType.YAML, host="127.0.0.1", port=2379) LogManager.load() # your app start running app.run() """ with cls._lock: config_type = cls._META_CONFIG['type'] read_handler = cls.CONFIG_READ_HANDLER_DICT[config_type] config_data = read_handler(**cls._META_CONFIG['kwargs']) parse_config(cls, config_data) @staticmethod def get_root_logger(): return logging.getLogger() @staticmethod def create_logger( name=None, level=logging.INFO, propagate=True, date_fmt=Logger.DEFAULT_DATE_FORMAT, fmt=Logger.DEFAULT_FORMAT ): """ :param name: default None :param level: default logging.INFO :param propagate: default True :param date_fmt: :param fmt: :return: Logger instance """ logger = logging.getLogger(name) formatter = logging.Formatter(datefmt=date_fmt, fmt=fmt) logger.setLevel(level) logger.propagate = propagate return Logger(logger, formatter) @classmethod def get_logger(cls, name): registered_logger = cls._REGISTERED_LOGGER_DICT.get(name) if registered_logger: return registered_logger else: root_logger = cls.get_root_logger() root_logger.warning("not found logger by name= {0}".format(name)) return root_logger
{ "content_hash": "243aab05d26265f5aa88003738f415f3", "timestamp": "", "source": "github", "line_count": 232, "max_line_length": 120, "avg_line_length": 31.75862068965517, "alnum_prop": 0.6000271444082519, "repo_name": "kaka19ace/kklogger", "id": "066594d6c39017fa2cfd933109858a543c5fd4d9", "size": "7452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kklogger/logger.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "11787" } ], "symlink_target": "" }
from .exceptions import EncodingError def to_long(base, lookup_f, s): """ Convert an array to a (possibly bignum) integer, along with a prefix value of how many prefixed zeros there are. base: the source base lookup_f: a function to convert an element of s to a value between 0 and base-1. s: the value to convert """ prefix = 0 v = 0 for c in s: v *= base try: v += lookup_f(c) except Exception: raise EncodingError("bad character %s in string %s" % (c, s)) if v == 0: prefix += 1 return v, prefix def from_long(v, prefix, base, charset): """The inverse of to_long. Convert an integer to an arbitrary base. v: the integer value to convert prefix: the number of prefixed 0s to include base: the new base charset: an array indicating what printable character to use for each value. """ ba = bytearray() while v > 0: try: v, mod = divmod(v, base) ba.append(charset(mod)) except Exception: raise EncodingError("can't convert to character corresponding to %d" % mod) ba.extend([charset(0)] * prefix) ba.reverse() return bytes(ba) """ The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """
{ "content_hash": "91e5aa2fe809d951a0f7cd51bb02dee9", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 87, "avg_line_length": 33.183098591549296, "alnum_prop": 0.6842105263157895, "repo_name": "richardkiss/pycoin", "id": "d388c092c05e0224b8227629222d3c31617cd740", "size": "2356", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "pycoin/encoding/base_conversion.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "115" }, { "name": "Python", "bytes": "752865" }, { "name": "Shell", "bytes": "198" } ], "symlink_target": "" }
""" Randomly sample and print out instructions from a level. """ import argparse import babyai import gym parser = argparse.ArgumentParser("Show level instructions") parser.add_argument("--n-episodes", type=int, default=10000, help="Collect instructions from this many episodes") parser.add_argument("level", help="The level of interest") args = parser.parse_args() env = gym.make(args.level) instructions = set(env.reset()['mission'] for i in range(args.n_episodes)) for instr in sorted(instructions): print(instr)
{ "content_hash": "a62494b441d1c061f0df80757b645daf", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 74, "avg_line_length": 26.904761904761905, "alnum_prop": 0.695575221238938, "repo_name": "mila-iqia/babyai", "id": "1ed0ee50c8c2fc36de0f82ab330b10dffc04625a", "size": "565", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/show_level_instructions.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "656" }, { "name": "Python", "bytes": "288971" } ], "symlink_target": "" }
from __future__ import unicode_literals from pybooru import Moebooru client = Moebooru('Konachan') posts = client.post_list(tags='blue_eyes', limit=10) for post in posts: print("URL image: {0}".format(post['file_url']))
{ "content_hash": "329ae8fb008f5b84b5660644ebe23781", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 52, "avg_line_length": 28.25, "alnum_prop": 0.7123893805309734, "repo_name": "LuqueDaniel/pybooru", "id": "d708c35e6cb6bd5cd0b3c483f2fa3a0e10d9d989", "size": "250", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/moebooru/posts_img_url.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "144" }, { "name": "Python", "bytes": "91357" }, { "name": "Shell", "bytes": "149" } ], "symlink_target": "" }
print('Welcome to Moustacheminer Server Services Music Bot') import json import traceback import discord from os import path from discord.ext import commands from discord.ext.commands import errors as commands_errors with open(path.abspath(path.join(path.dirname(__file__), '..', 'config', 'default.json'))) as f: config = json.load(f) token = config.get('api')['discord']['token'] prefix = config.get('discord')['prefix'][0] class Bot(commands.AutoShardedBot): def __init__(self, **options): super().__init__(**options) async def on_ready(self): app_info = await self.application_info() self.invite_url = discord.utils.oauth_url(app_info.id) print(f'Logged in as {self.user.name}\nBot invite link: {self.invite_url}') self.load_extension('extensions.core') async def on_command_error(self, ctx, exception): if isinstance(exception, commands_errors.MissingRequiredArgument): command = ctx.invoked_subcommand or ctx.command _help = await ctx.bot.formatter.format_help_for(ctx, command) for page in _help: await ctx.send(page) elif isinstance(exception, commands_errors.CommandInvokeError): exception = exception.original _traceback = traceback.format_tb(exception.__traceback__) _traceback = ''.join(_traceback) error = ('`{0}` in command `{1}`: ```py\nTraceback (most recent call last):\n{2}{0}: {3}\n```')\ .format(type(exception).__name__, ctx.command.qualified_name, _traceback, exception) await ctx.send(error) elif isinstance(exception, commands_errors.CommandOnCooldown): await ctx.send('You can use this command in {0:.0f} seconds.'.format(exception.retry_after)) elif isinstance(exception, commands_errors.CommandNotFound): pass else: await ctx.send(exception) async def on_message(self, message): if message.author.bot: return if str(message.author.id) in config.get('discord')['ban']: return await self.process_commands(message) bot = Bot(command_prefix=prefix) bot.remove_command('help') bot.run(token)
{ "content_hash": "ee0d7d0984d8d9bc87c376738e21dd0b", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 108, "avg_line_length": 34.646153846153844, "alnum_prop": 0.63898756660746, "repo_name": "moustacheminer/MSS-Discord", "id": "52b59e80ebbc88dc7d9f7081b2c29fd7432dcf12", "size": "2252", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "music/bot.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "259219" }, { "name": "JavaScript", "bytes": "58682" }, { "name": "Python", "bytes": "19058" } ], "symlink_target": "" }
from tests.src.event.TestEvent import TestEvent from tests.src.TestSynergyObject import TestSynergyObject class MakeBeansProfitEvent(TestEvent): def _prepare(self, object_id, context, parameters={}): return parameters
{ "content_hash": "b9bd3752fff5d06e3a5ef6f2dab0b2ea", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 58, "avg_line_length": 29, "alnum_prop": 0.7844827586206896, "repo_name": "buxx/synergine", "id": "06350a8843655b3b21df11747f98205409c96e13", "size": "232", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/src/event/MakeBeansProfitEvent.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "85069" } ], "symlink_target": "" }
import os import sys import errno import subprocess as sp # Define some constants AUDIO, VIDEO, SCREEN = 0, 1, 2 LINUX, MAC, WINDOWS = 0, 1, 2 # src: http://stackoverflow.com/a/377028/1044366 def which(program): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def detectPlatform(): if sys.platform.startswith('linux'): return LINUX elif sys.platform.startswith('win') or sys.platform.startswith('cyg'): return WINDOWS elif sys.platform.startswith('darwin'): return MAC else: return None def get_git_root(cwd): git_root = os.path.abspath(cwd) # TODO: pep8 # TODO: while generating the commit-msg hook, # don't forget to write the python path # FIXME: doesn't work on windows while git_root != '/': if os.path.isdir(os.path.join(git_root, '.git')): # this is the git's root return git_root git_root = os.path.dirname(git_root) # at this point, the root (/) is the git root or there is not git root if os.path.isdir(os.path.join(git_root, '.git')): return git_root raise Exception('This is not a Git repository!') def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise
{ "content_hash": "0279acf377a6cabd4c1d5719d5a15828", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 74, "avg_line_length": 27.158730158730158, "alnum_prop": 0.6066627703097603, "repo_name": "sandeepraju/git-talk", "id": "bf41bfc2e771147bddb9f0620da1a92219dfdeb0", "size": "1711", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gittalk/utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "417" }, { "name": "Python", "bytes": "58386" }, { "name": "Shell", "bytes": "1589" } ], "symlink_target": "" }
import os from unittest import mock from click.testing import CliRunner from great_expectations.cli import cli from tests.cli.utils import assert_no_logging_messages_or_tracebacks @mock.patch( "great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit" ) def test_store_list_stores( mock_emit, caplog, empty_data_context_stats_enabled, monkeypatch ): project_dir = empty_data_context_stats_enabled.root_directory runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(project_dir)) result = runner.invoke( cli, "--v3-api store list", catch_exceptions=False, ) assert result.exit_code == 0 for expected_output in [ "5 active Stores found", "expectations_store", "validations_store", "evaluation_parameter_store", "checkpoint_store", "profiler_store", ]: assert expected_output in result.output assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.store.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.store.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == 3 assert_no_logging_messages_or_tracebacks(caplog, result)
{ "content_hash": "842b7027df44f990288617b6cced42a0", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 91, "avg_line_length": 28.803571428571427, "alnum_prop": 0.5827650340979541, "repo_name": "great-expectations/great_expectations", "id": "0b70773295d34006707257d3333d0804b0446d9d", "size": "1613", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/cli/test_store.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "23771" }, { "name": "Dockerfile", "bytes": "2388" }, { "name": "HTML", "bytes": "27311" }, { "name": "JavaScript", "bytes": "45960" }, { "name": "Jinja", "bytes": "66650" }, { "name": "Jupyter Notebook", "bytes": "816323" }, { "name": "Lua", "bytes": "3489" }, { "name": "Makefile", "bytes": "657" }, { "name": "Python", "bytes": "15728777" }, { "name": "Shell", "bytes": "2930" } ], "symlink_target": "" }
""" byceps.services.shop.order.transfer.action ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2014-2022 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from dataclasses import dataclass from typing import Any, Dict from uuid import UUID from ...article.transfer.models import ArticleNumber from .order import PaymentState ActionParameters = Dict[str, Any] @dataclass(frozen=True) class Action: id: UUID article_number: ArticleNumber payment_state: PaymentState procedure_name: str parameters: ActionParameters
{ "content_hash": "2d4f50061241f2dfb8f026497650c118", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 54, "avg_line_length": 21.40740740740741, "alnum_prop": 0.7093425605536332, "repo_name": "homeworkprod/byceps", "id": "68aa751997611e87699dc74ce938343b6042a16e", "size": "578", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "byceps/services/shop/order/transfer/action.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "38198" }, { "name": "HTML", "bytes": "318830" }, { "name": "JavaScript", "bytes": "8541" }, { "name": "Python", "bytes": "935249" } ], "symlink_target": "" }
import tweepy import networkx as nx import json import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt total = [] ''' with open('Relation.txt') as r: graph = nx.Graph() a = r.readline() e = json.loads(a) retweeters = [w[0] for w in e] friends = [x for x in [w[1] for w in e]] followers = [x for x in [w[2] for w in e]] for ret in retweeters: if ret in friends or ret in followers: print(1) for w in e: rt = w[0] fr = w[1] fo = w[2] graph.add_edges_from([(rt, u) for u in fr + fo]) print(nx.average_clustering(graph)) ''' # File Relation_8.5, Relation 这两个都是promoted. # File Relation_Real, 还有将来的后面加数字的,都是Genuine with open('Relation_Total.txt') as r: for each_list in r: retweeter = [] friends = [] the_person_being_followed=[] graph = nx.Graph() extract = json.loads(each_list) #print(extract) #print('Look upper') #print(type(extract)) #print(extract[1]) retweeters_ = [w[0] for w in extract] friends_ = list(np.concatenate([w[1] for w in extract])) followers_ = list(np.concatenate([x for x in [w[2] for w in extract]])) n = 0 for ret_ in retweeters_: if ret_ in friends_ or ret_ in followers_: n += 1 print(n) for e in extract: retweeter = str(e[0]) friends = [str(r) for r in e[1]] the_person_being_followed = [str(r) for r in e[2]] graph.add_edges_from([(retweeter, u) for u in friends + the_person_being_followed]) ''' related_rts = [r for r in retweeters_ if r in friends+the_person_being_followed] graph.add_edges_from([(retweeter, u) for u in related_rts]) graph.add_edges_from([(retweeter, 0)]) ''' clustering_coefficient = nx.average_clustering(graph) total.append(clustering_coefficient) print(clustering_coefficient) print(total) s_total = sorted(total) plt.plot(range(len(s_total)), s_total, 'bo-') plt.show() ''' # 这一块内容是旧版的,暂时不需要了 with open('Relation.txt') as r: for each_list in r: extract = json.loads(each_list) #print(extract) #print(type(extract)) #print(extract[1]) retweeter.append(str(extract[0])) friends.append([str(r) for r in extract[1]]) # 这里开始是打开第二个文件,导入另一组列表 with open('Relation_Followers_ids.txt') as r: for each_list in r: extract = json.loads(each_list) #print(extract) #print(type(extract)) #print(extract[1]) #retweeter.append(extract[0]) the_person_being_followed.append([str(r) for r in extract[1]]) ''' ''' graph = nx.Graph() n = 0 end = len(retweeter) ''' #print(set([r for r in retweeter if r in np.concatenate(friends)])) #print(set([r for r in retweeter if r in np.concatenate(the_person_being_followed)])) #set_friends = list(set([r for r in retweeter if r in np.concatenate(friends)])) #set_the_person_being_followed = list(set([r for r in retweeter if r in np.concatenate(the_person_being_followed)])) ''' # 这个是network x 的方法,现在用统计图,不用这部分了 graph = nx.Graph() i = 0 while i<len(retweeter): for id in friends[i]: graph.add_edges_from([(retweeter[i], id)]) for id in the_person_being_followed[i]: graph.add_edges_from([(retweeter[i], id)]) i+=1 ''' #print(nx.average_clustering(graph)) # 别画图了,每次都卡死…… #nx.draw(graph, with_labels=False) # This figure is for #Blizzcon. #Clustering coefficient plt.hist(total, bins=10, range=None, normed=False) plt.show()
{ "content_hash": "523a29dc47c5f19f54697bd0afe62e21", "timestamp": "", "source": "github", "line_count": 144, "max_line_length": 116, "avg_line_length": 25.493055555555557, "alnum_prop": 0.5979297194225007, "repo_name": "tapilab/is-prefixlt", "id": "263620cdab06a48d65ebe4b21ad9e532ae28c744", "size": "3833", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/3.2 Computing_Coefficient.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "36906" }, { "name": "Python", "bytes": "28637" } ], "symlink_target": "" }
""" A webdriver/selenium based scraper for San Francisco Board of Supervisors voting data. - Troy Deck (troy.deque@gmail.com) """ from selenium import webdriver from datetime import date import argparse import time import db ############# # Constants # ############# PATIENCE = 2 # Seconds to wait after executing a JS action VOTING_GRID_ID = 'ctl00_ContentPlaceHolder1_gridVoting_ctl00' # # Main scraping functions # def scrape_proposal_page(browser, proposal_url): """ Navigates to the page giving details about a piece of legislation, scrapes that data, and adds a model to the database session. Returns the new DB model. """ browser.get(proposal_url) file_number = int(extract_text(browser.find_element_by_css_selector( '#ctl00_ContentPlaceHolder1_lblFile2' ))) proposal_title = extract_text(browser.find_element_by_css_selector( '#ctl00_ContentPlaceHolder1_lblTitle2' )) proposal_type = extract_text(browser.find_element_by_css_selector( '#ctl00_ContentPlaceHolder1_lblIntroduced2' )) proposal_status = extract_text(browser.find_element_by_css_selector( '#ctl00_ContentPlaceHolder1_lblStatus2' )) introduction_date = parse_date(extract_text( browser.find_element_by_css_selector( '#ctl00_ContentPlaceHolder1_lblIntroduced2' ) )) db_proposal = db.Proposal(file_number, proposal_title) db_proposal.status = proposal_status db_proposal.proposal_type = proposal_type db_proposal.introduction_date = introduction_date db.session.add(db_proposal) db.session.flush() # TODO probably should refactor this out a t least return db_proposal def scrape_vote_page(browser): """ Assuming the browser is on a page containing a grid of votes, scrapes the vote data to populate the database. """ # Get the contents of the table headers, rows = extract_grid_cells(browser, VOTING_GRID_ID) # Do a quick check to ensure our assumption about the headers is correct assert headers[:6] == [ u'File #', u'Action Date', u'Title', u'Action Details', u'Meeting Details', u'Tally', ] # Go through the supervisors and add them to the DB if they are missing supervisors = headers[6:] legislator_objects = {} db.session.flush() # Pull values from each row and use them to populate the database second_browser = webdriver.Firefox() try: for row in rows: file_number = int(extract_text(row['File #'])) action_date = parse_date(extract_text(row['Action Date'])) # Find the proposal in the DB, or, if it isn't there, # create a record for it by scraping the info page about that # proposal. db_proposal = find_proposal(file_number) or scrape_proposal_page( second_browser, extract_href(row['File #']) ) db_vote_event = db.VoteEvent(db_proposal, action_date) db.session.add(db_vote_event) db.session.flush() for name in supervisors: vote_cast = extract_text(row[name]) if vote_cast in ('Aye', 'No'): db.session.add(db.Vote( record_supervisor(name), db_proposal, vote_cast == 'Aye' )) finally: second_browser.close() def scrape_vote_listing(browser): """ Starting from the first page and working to the last page, scrapes all votes from a multi-page grid and populates the database. """ page_number = 1 while select_grid_page(browser, VOTING_GRID_ID, page_number): scrape_vote_page(browser) db.session.flush() page_number += 1 def scrape_vote_years(year_range): """ Opens the votes page and scrapes the votes for all years in the given range. Populates the database and commits the transaction """ browser = webdriver.Firefox() try: # Navigate to the Board of Supervisors page browser.get('https://sfgov.legistar.com/MainBody.aspx') # Click the votes tab people_tab = browser.find_element_by_partial_link_text('Votes') people_tab.click() # Scrape each year of votes for year in year_range: if not select_dropdown_option( browser, 'ctl00_ContentPlaceHolder1_lstTimePeriodVoting_Input', str(year) ): raise Exception("Year not found in options.") scrape_vote_listing(browser) db.session.commit() except: db.session.rollback() raise finally: browser.close() # # Browser/DOM helpers # def select_dropdown_option(browser, selectbox_id, option_text): """ Interacts with a Telerik select-style control to select the option identified by the option_text. """ # Click the select box so Telerik will dynamically populate it selectbox = browser.find_element_by_id( selectbox_id ) selectbox.click() # Wait for the dropdown to appear time.sleep(PATIENCE) # Get the option items dropdown_id = selectbox_id.replace('Input', 'DropDown') #TODO hacky! dropdown = browser.find_element_by_id(dropdown_id) option_items = dropdown.find_elements_by_css_selector( 'div:nth-child(1) > ul:nth-child(1) > li' ) # Find the requested option for li in option_items: if option_text == extract_text(li): li.click() time.sleep(PATIENCE) return True return False def select_grid_page(browser, grid_id, page_number): """ Selects the specified page number for a grid view in the browser, if that page number is visible as an option. Returns True on success, false on failure. """ table = browser.find_element_by_id(grid_id) page_spans = table.find_elements_by_css_selector( 'thead > tr.rgPager > td > table > tbody > tr > td a > span' ) number_string = str(page_number) for index, span in enumerate(page_spans): span_text = extract_text(span) if number_string == span_text: span.click() time.sleep(PATIENCE) # TODO is this needed? return True elif span_text == '...' and index == len(page_spans) - 1: # We're on the last option and still haven't found ours, # so it could be on the next "page" of pages # (which we have to explicitly request with another page load) span.click() time.sleep(PATIENCE) return select_grid_page(browser, grid_id, page_number) return False def extract_grid_cells(browser, grid_id): """ Given the ID of a legistar table, returns a list of dictionaries for each row mapping column headers to td elements. """ table = browser.find_element_by_id(grid_id) header_cells = table.find_elements_by_css_selector( 'thead:nth-child(2) > tr:nth-child(2) > th' ) headers = [extract_text(cell) for cell in header_cells] tbody = table.find_element_by_css_selector('tbody:nth-child(4)') rows = tbody.find_elements_by_tag_name('tr') result_rows = [] for row in rows: cells = {} td_elements = row.find_elements_by_tag_name('td') for header, cell in zip(headers, td_elements): cells[header] = cell result_rows.append(cells) return (headers, result_rows) def extract_text(element): """ Returns the text from an element in a nice, readable form with whitespace trimmed and non-breaking spaces turned into regular spaces. """ return element.get_attribute('textContent').replace(u'\xa0', ' ').strip() def extract_href(element): """ Returns the href property of the first link found in the element's tree. """ return element.find_element_by_tag_name('a').get_attribute('href') def parse_date(date_text): """ Converts a date string in the American mm/dd/yyyy format to a Python date object. """ month, day, year = [int(field) for field in date_text.split('/')] return date(year, month, day) # # DB helpers # def record_supervisor(name): """ Queries for the given supervisor, creates a record for them in the database if they aren't there already, and returns a Legislator object. """ legislator = db.session.query(db.Legislator).filter_by(name=name).first() if not legislator: legislator = db.Legislator(name) db.session.add(legislator) db.session.flush() return legislator def find_proposal(file_number): """ Queries the database for a proposal based on its file number. Returns either the proposal model or None if it is not recorded. """ return (db.session.query(db.Proposal) .filter_by(file_number=file_number) .first() ) # # Main script # parser = argparse.ArgumentParser(description= ''' Populate a database with several years of voting records from the San Francisco board of supervisors. ''' ) parser.add_argument('first_year', metavar='first year', type=int) parser.add_argument('last_year', metavar='last year', type=int) args = parser.parse_args() scrape_vote_years(range(args.first_year, args.last_year + 1))
{ "content_hash": "fec6b94565abdba6f9c960f94839b6f7", "timestamp": "", "source": "github", "line_count": 304, "max_line_length": 80, "avg_line_length": 31.30921052631579, "alnum_prop": 0.6269174196259718, "repo_name": "tdeck/grab-sf-votes", "id": "fe433cb9fbba567df110e5bffd46a0b4237b01e0", "size": "9518", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "collect.py", "mode": "33188", "license": "mit", "language": [ { "name": "PLSQL", "bytes": "1057" }, { "name": "Python", "bytes": "11876" }, { "name": "Shell", "bytes": "182" } ], "symlink_target": "" }
""" INFINIDAT InfiniBox Volume Driver """ from contextlib import contextmanager import mock from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests import six from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import utils as vol_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) VENDOR_NAME = 'INFINIDAT' DELETE_URI = 'volumes/%s?approved=true' infinidat_opts = [ cfg.StrOpt('infinidat_pool_name', help='Name of the pool from which volumes are allocated'), ] CONF = cfg.CONF CONF.register_opts(infinidat_opts) @interface.volumedriver class InfiniboxVolumeDriver(san.SanDriver): VERSION = '1.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "INFINIDAT_Cinder_CI" def __init__(self, *args, **kwargs): super(InfiniboxVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(infinidat_opts) self._lookup_service = fczm_utils.create_lookup_service() def do_setup(self, context): """Driver initialization""" self._session = requests.Session() self._session.auth = (self.configuration.san_login, self.configuration.san_password) management_address = self.configuration.san_ip self._base_url = 'http://%s/api/rest/' % management_address backend_name = self.configuration.safe_get('volume_backend_name') self._backend_name = backend_name or self.__class__.__name__ self._volume_stats = None LOG.debug('setup complete. base url: %s', self._base_url) def _request(self, action, uri, data=None): LOG.debug('--> %(action)s %(uri)s %(data)r', {'action': action, 'uri': uri, 'data': data}) response = self._session.request(action, self._base_url + uri, json=data) LOG.debug('<-- %(status_code)s %(response_json)r', {'status_code': response.status_code, 'response_json': response.json()}) try: response.raise_for_status() except requests.HTTPError as ex: # text_type(ex) includes http code and url msg = _('InfiniBox storage array returned %(exception)s\n' 'Data: %(data)s\n' 'Response: %(response_json)s') % { 'exception': six.text_type(ex), 'data': repr(data), 'response_json': repr(response.json())} LOG.exception(msg) if response.status_code == 404: raise exception.NotFound() else: raise exception.VolumeBackendAPIException(data=msg) return response.json()['result'] def _get(self, uri): return self._request('GET', uri) def _post(self, uri, data): return self._request('POST', uri, data) def _delete(self, uri): return self._request('DELETE', uri) def _put(self, uri, data): return self._request('PUT', uri, data) def _cleanup_wwpn(self, wwpn): return wwpn.replace(':', '') def _make_volume_name(self, cinder_volume): return 'openstack-vol-%s' % cinder_volume.id def _make_snapshot_name(self, cinder_snapshot): return 'openstack-snap-%s' % cinder_snapshot.id def _make_host_name(self, wwpn): wwn_for_name = self._cleanup_wwpn(wwpn) return 'openstack-host-%s' % wwn_for_name def _get_infinidat_volume_by_name(self, name): volumes = self._get('volumes?name=%s' % name) if len(volumes) != 1: msg = _('Volume "%s" not found') % name LOG.error(msg) raise exception.InvalidVolume(reason=msg) return volumes[0] def _get_infinidat_snapshot_by_name(self, name): snapshots = self._get('volumes?name=%s' % name) if len(snapshots) != 1: msg = _('Snapshot "%s" not found') % name LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) return snapshots[0] def _get_infinidat_volume_id(self, cinder_volume): volume_name = self._make_volume_name(cinder_volume) return self._get_infinidat_volume_by_name(volume_name)['id'] def _get_infinidat_snapshot_id(self, cinder_snapshot): snap_name = self._make_snapshot_name(cinder_snapshot) return self._get_infinidat_snapshot_by_name(snap_name)['id'] def _get_infinidat_pool(self): pool_name = self.configuration.infinidat_pool_name pools = self._get('pools?name=%s' % pool_name) if len(pools) != 1: msg = _('Pool "%s" not found') % pool_name LOG.error(msg) raise exception.VolumeDriverException(message=msg) return pools[0] def _get_host(self, wwpn): host_name = self._make_host_name(wwpn) infinidat_hosts = self._get('hosts?name=%s' % host_name) if len(infinidat_hosts) == 1: return infinidat_hosts[0] def _get_or_create_host(self, wwpn): host_name = self._make_host_name(wwpn) infinidat_host = self._get_host(wwpn) if infinidat_host is None: # create host infinidat_host = self._post('hosts', dict(name=host_name)) # add port to host self._post('hosts/%s/ports' % infinidat_host['id'], dict(type='FC', address=self._cleanup_wwpn(wwpn))) return infinidat_host def _get_mapping(self, host_id, volume_id): existing_mapping = self._get("hosts/%s/luns" % host_id) for mapping in existing_mapping: if mapping['volume_id'] == volume_id: return mapping def _get_or_create_mapping(self, host_id, volume_id): mapping = self._get_mapping(host_id, volume_id) if mapping: return mapping # volume not mapped. map it uri = 'hosts/%s/luns?approved=true' % host_id return self._post(uri, dict(volume_id=volume_id)) def _get_online_fc_ports(self): nodes = self._get('components/nodes?fields=fc_ports') for node in nodes: for port in node['fc_ports']: if (port['link_state'].lower() == 'up' and port['state'] == 'OK'): yield self._cleanup_wwpn(port['wwpn']) @fczm_utils.add_fc_zone def initialize_connection(self, volume, connector): """Map an InfiniBox volume to the host""" volume_name = self._make_volume_name(volume) infinidat_volume = self._get_infinidat_volume_by_name(volume_name) for wwpn in connector['wwpns']: infinidat_host = self._get_or_create_host(wwpn) mapping = self._get_or_create_mapping(infinidat_host['id'], infinidat_volume['id']) lun = mapping['lun'] # Create initiator-target mapping. target_wwpns = list(self._get_online_fc_ports()) target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) return dict(driver_volume_type='fibre_channel', data=dict(target_discovered=False, target_wwn=target_wwpns, target_lun=lun, initiator_target_map=init_target_map)) @fczm_utils.remove_fc_zone def terminate_connection(self, volume, connector, **kwargs): """Unmap an InfiniBox volume from the host""" volume_id = self._get_infinidat_volume_id(volume) result_data = dict() for wwpn in connector['wwpns']: host_name = self._make_host_name(wwpn) infinidat_hosts = self._get('hosts?name=%s' % host_name) if len(infinidat_hosts) != 1: # not found. ignore. continue host_id = infinidat_hosts[0]['id'] # unmap uri = ('hosts/%s/luns/volume_id/%s' % (host_id, volume_id) + '?approved=true') try: self._delete(uri) except (exception.NotFound): continue # volume mapping not found # check if the host now doesn't have mappings, to delete host_entry # if needed infinidat_hosts = self._get('hosts?name=%s' % host_name) if len(infinidat_hosts) == 1 and len(infinidat_hosts[0]['luns']) == 0: # Create initiator-target mapping. target_wwpns = list(self._get_online_fc_ports()) target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) result_data = dict(target_wwn=target_wwpns, initiator_target_map=init_target_map) return dict(driver_volume_type='fibre_channel', data=result_data) def get_volume_stats(self, refresh=False): if self._volume_stats is None or refresh: pool = self._get_infinidat_pool() free_capacity_gb = float(pool['free_physical_space']) / units.Gi total_capacity_gb = float(pool['physical_capacity']) / units.Gi self._volume_stats = dict(volume_backend_name=self._backend_name, vendor_name=VENDOR_NAME, driver_version=self.VERSION, storage_protocol='FC', consistencygroup_support='False', total_capacity_gb=total_capacity_gb, free_capacity_gb=free_capacity_gb) return self._volume_stats def _create_volume(self, volume): # get pool id from name pool = self._get_infinidat_pool() # create volume volume_name = self._make_volume_name(volume) provtype = "THIN" if self.configuration.san_thin_provision else "THICK" data = dict(pool_id=pool['id'], provtype=provtype, name=volume_name, size=volume.size * units.Gi) return self._post('volumes', data) def create_volume(self, volume): """Create a new volume on the backend.""" # this is the same as _create_volume but without the return statement self._create_volume(volume) def delete_volume(self, volume): """Delete a volume from the backend.""" try: volume_name = self._make_volume_name(volume) volume = self._get_infinidat_volume_by_name(volume_name) if volume['has_children']: # can't delete a volume that has a live snapshot raise exception.VolumeIsBusy(volume_name=volume_name) self._delete(DELETE_URI % volume['id']) except (exception.InvalidVolume, exception.NotFound): return # volume not found def extend_volume(self, volume, new_size): """Extend the size of a volume.""" volume_id = self._get_infinidat_volume_id(volume) self._put('volumes/%s?approved=true' % volume_id, dict(size=new_size * units.Gi)) def create_snapshot(self, snapshot): """Creates a snapshot.""" volume_id = self._get_infinidat_volume_id(snapshot.volume) name = self._make_snapshot_name(snapshot) self._post('volumes', dict(parent_id=volume_id, name=name)) @contextmanager def _device_connect_context(self, volume): connector = utils.brick_get_connector_properties() connection = self.initialize_connection(volume, connector) try: yield self._connect_device(connection) finally: self.terminate_connection(volume, connector) def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot. InfiniBox does not yet support detached clone so use dd to copy data. This could be a lengthy operation. - create a clone from snapshot and map it - create a volume and map it - copy data from clone to volume - unmap volume and clone and delete the clone """ snapshot_id = self._get_infinidat_snapshot_id(snapshot) clone_name = self._make_volume_name(volume) + '-internal' infinidat_clone = self._post('volumes', dict(parent_id=snapshot_id, name=clone_name)) # we need a cinder-volume-like object to map the clone by name # (which is derived from the cinder id) but the clone is internal # so there is no such object. mock one clone = mock.Mock(id=str(volume.id) + '-internal') try: infinidat_volume = self._create_volume(volume) try: src_ctx = self._device_connect_context(clone) dst_ctx = self._device_connect_context(volume) with src_ctx as src_dev, dst_ctx as dst_dev: dd_block_size = self.configuration.volume_dd_blocksize vol_utils.copy_volume(src_dev['device']['path'], dst_dev['device']['path'], snapshot.volume.size * units.Ki, dd_block_size, sparse=True) except Exception: self._delete(DELETE_URI % infinidat_volume['id']) raise finally: self._delete(DELETE_URI % infinidat_clone['id']) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" try: snapshot_name = self._make_snapshot_name(snapshot) snapshot = self._get_infinidat_snapshot_by_name(snapshot_name) self._delete(DELETE_URI % snapshot['id']) except (exception.InvalidSnapshot, exception.NotFound): return # snapshot not found def _asssert_volume_not_mapped(self, volume): # copy is not atomic so we can't clone while the volume is mapped volume_name = self._make_volume_name(volume) infinidat_volume = self._get_infinidat_volume_by_name(volume_name) mappings = self._get("volumes/%s/luns" % infinidat_volume['id']) if len(mappings) == 0: return # volume has mappings msg = _("INFINIDAT Cinder driver does not support clone of an " "attached volume. " "To get this done, create a snapshot from the attached " "volume and then create a volume from the snapshot.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_cloned_volume(self, volume, src_vref): """Create a clone from source volume. InfiniBox does not yet support detached clone so use dd to copy data. This could be a lengthy operation. * map source volume * create and map new volume * copy data from source to new volume * unmap both volumes """ self._asssert_volume_not_mapped(src_vref) infinidat_volume = self._create_volume(volume) try: src_ctx = self._device_connect_context(src_vref) dst_ctx = self._device_connect_context(volume) with src_ctx as src_dev, dst_ctx as dst_dev: dd_block_size = self.configuration.volume_dd_blocksize vol_utils.copy_volume(src_dev['device']['path'], dst_dev['device']['path'], src_vref.size * units.Ki, dd_block_size, sparse=True) except Exception: self._delete(DELETE_URI % infinidat_volume['id']) raise def _build_initiator_target_map(self, connector, all_target_wwns): """Build the target_wwns and the initiator target map.""" target_wwns = [] init_targ_map = {} if self._lookup_service is not None: # use FC san lookup. dev_map = self._lookup_service.get_device_mapping_from_network( connector.get('wwpns'), all_target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector.get('wwpns', []) target_wwns = all_target_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map
{ "content_hash": "308457792c89023852d85c66ed0a5118", "timestamp": "", "source": "github", "line_count": 418, "max_line_length": 79, "avg_line_length": 41.5622009569378, "alnum_prop": 0.5658780866862373, "repo_name": "ge0rgi/cinder", "id": "067030f05fbfb40b1728822ae4520e04c033a783", "size": "18002", "binary": false, "copies": "1", "ref": "refs/heads/stable/ocata", "path": "cinder/volume/drivers/infinidat.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "19963591" } ], "symlink_target": "" }
import pyparsing import unittest from monasca_api.expression_parser import alarm_expr_parser class TestAlarmExpression(unittest.TestCase): good_simple_expression = "max(cpu.idle_perc{hostname=fred}, 60) > 10 times 4" def test_good_expression(self): expression = self.good_simple_expression sub_exprs = alarm_expr_parser.AlarmExprParser(expression).sub_expr_list self.assertEqual(1, len(sub_exprs)) def _ensure_parse_fails(self, expression): parser = alarm_expr_parser.AlarmExprParser(expression) self.assertRaises( (pyparsing.ParseException, pyparsing.ParseFatalException), getattr, parser, "sub_expr_list") def test_no_dimension_name(self): expression = self.good_simple_expression.replace('hostname', '') self._ensure_parse_fails(expression) def test_no_metric_name(self): expression = self.good_simple_expression.replace('cpu.idle_perc', '') self._ensure_parse_fails(expression) def test_invalid_period(self): expression = self.good_simple_expression.replace('60', '42') self._ensure_parse_fails(expression) def test_zero_period(self): expression = self.good_simple_expression.replace('60', '0') self._ensure_parse_fails(expression) def test_negative_period(self): expression = self.good_simple_expression.replace('60', '-60') self._ensure_parse_fails(expression) def test_zero_periods(self): expression = self.good_simple_expression.replace('times 4', 'times 0') self._ensure_parse_fails(expression)
{ "content_hash": "aedbe4aa10107e07a33e6f2b416297c7", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 81, "avg_line_length": 36.111111111111114, "alnum_prop": 0.676923076923077, "repo_name": "sapcc/monasca-api", "id": "0a8848c18bd1d87db2abed2439f8379638e643f4", "size": "2241", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "monasca_api/tests/test_alarm_expression.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PLpgSQL", "bytes": "4305" }, { "name": "Python", "bytes": "865299" }, { "name": "Ruby", "bytes": "4534" }, { "name": "Shell", "bytes": "72248" } ], "symlink_target": "" }
from distutils.core import setup setup( name = 'emitter.py', packages = ['emitter'], version = '7.0.2', description = 'A minimalist event emitter for Python 3.', author = 'titouandk', author_email = 'dk.titouan@gmail.com', url = 'https://github.com/titouandk/emitter.py', download_url = 'https://github.com/titouandk/emitter.py/tarball/7.0.2', keywords = ['emitter.py', 'event emitter', 'event', 'emitter', 'python 3', 'python3'], classifiers = ['Programming Language :: Python :: 3'], )
{ "content_hash": "f4a53bdf63046f85824aba960a7c31f5", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 88, "avg_line_length": 39.07692307692308, "alnum_prop": 0.6653543307086615, "repo_name": "titouandk/emitter.py", "id": "db4a08d726ec9280f0f82088bd66ec2550032e37", "size": "508", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "18540" } ], "symlink_target": "" }
from django.db import models from django.core.cache import cache from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.contrib.comments.models import Comment from courant.core.gettag import gettag class CommentOptions(models.Model): """ A collection of settings that governs how comments are moderated or treated. """ name = models.CharField(max_length=50) allow_anonymous = models.BooleanField(help_text="Allow anonymous users to post comments") moderate_anonymous_only = models.BooleanField(help_text="If set, only anonymous users' comments will be moderated. Authenticated users comments will immediately become publicly visible") moderate_after = models.PositiveSmallIntegerField(default=30, help_text="# of days after which posts must be moderated. Set to zero (0) for all posts to be moderated") close_after = models.PositiveSmallIntegerField(default=365, help_text="# of days after which commenting will be closed. Set to zero (0) to disable commenting") class Meta: verbose_name = "Comment Options" verbose_name_plural = "Comments Options" def __unicode__(self): return self.name class DefaultCommentOption(models.Model): """ Assigns a default CommentOptions instance for a given ContentType. """ content_type = models.ForeignKey(ContentType, unique=True) options = models.ForeignKey(CommentOptions) def __unicode__(self): return self.content_type.name def save(self, **kwargs): super(DefaultCommentOption, self).save(**kwargs) cache.delete('default_%s_comment_options' % self.content_type.model.lower()) gettag.register(Comment, name_field='body')
{ "content_hash": "f8636b6f785645998f53b718e8c94066", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 190, "avg_line_length": 42.06818181818182, "alnum_prop": 0.6985413290113452, "repo_name": "maxcutler/Courant-News", "id": "d2b30676458a73662a360c0953f70362192f1f63", "size": "1851", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "courant/core/discussions/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "47452" }, { "name": "Python", "bytes": "487441" } ], "symlink_target": "" }
from collections import OrderedDict, Counter import csv from datetime import datetime, timedelta import hashlib import json import os import platform import plistlib import re import shutil import sqlite3 from subprocess import call import sys import time import urllib import uuid import webbrowser from flask import Flask, render_template, request, flash, url_for, redirect, Response, send_from_directory import pyesedb import vss from werkzeug.utils import secure_filename class BrowsingHistory(object): """A class to load, modify and export a *copy* of the web browsing history. Supported browsers: - Google Chrome - Firefox - Safari - Internet Explorer (>= 10) / Edge """ def __init__(self): self.os = platform.system() self.os_release = platform.release() self.os_full = ' '.join([self.os, self.os_release]) self._upload_dir = self._root_path('uploads') self._file_path = os.path.split(os.path.realpath(__file__))[0] self._browser_name = None self._browser_specs = None self._db = None self._min_date = self._min_date(num_days=60) self.date_range = None self.num_domains = None self.ready = self._state() self._domains = None self._keywords = None self._entries = None def _handle_platform(self): """Helper function to handle platform name. :return str: Platform name """ if self.os in ['Linux', 'Darwin']: return self.os elif self.os == 'Windows': pat = re.compile(r'Windows\s\d{1,2}') match = re.findall(pat, self.os_full) if match: return match[0] def _build_path(self, _os): """Helper function to build and check the path to the browsing history. :param _os: Operation system :return str or boolean: path or False """ if (self._browser_name == 'IE11' and self.os in ['Linux', 'Darwin']) or (self._browser_name == 'Safari' and self.os in ['Linux', 'Windows']): return False else: user = os.getlogin() for p in self._browser_specs['path'][_os]: if self._browser_name == 'Firefox': # checking for profile name pat = re.compile(r'\w+.default([\w\-\_\.]+)?') if os.path.isdir(p.format(user)): for item in os.listdir(p.format(user)): if re.findall(pat, item): profile_name = item path = os.path.join(p.format(user), profile_name, self._browser_specs['file_name'][0]) if os.path.isfile(path): return path else: continue else: if os.path.isdir(p.format(user)): for f in self._browser_specs['file_name']: path = os.path.join(p.format(user), f) if os.path.isfile(path): return path else: continue return False def _create_db(self): """Creates an empty temporary sqlite database in the 'tmp' directory. :return: Database path or None """ try: conn = sqlite3.connect('tmp/browsing_history.db') cur = conn.cursor() with open(self._root_path('data/schema.sql'), 'r') as sql_script: queries = sql_script.read() cur.executescript(queries) conn.commit() except BaseException: return None finally: cur.close() conn.close() return 'tmp/browsing_history.db' def _unique_id_generator(self): """Helper function to generate unique identifiers. :return: integer value """ unique_id = 1 while True: yield unique_id unique_id += 1 def _extract_history_plist(self, pl): """Extracts Safari browsing history (History.plist) file. :param pl: File path (string) :return: Two lists of tuples """ visits = [] urls = [] g = self._unique_id_generator() with open(pl, 'rb') as f: d = plistlib.load(f) for item in d['WebHistoryDates']: date = self._convert_timestamp(float(item['lastVisitedDate']), browser_name=self._browser_name, tz='utc') # Filter by url and minimum date if self._is_url(item['']) and date >= self._min_date: last_visit_date = date visit_date = last_visit_date url = item[''] title = item['title'] visit_count = item['visitCount'] if 'redirectURLs' in item.keys(): redirect_urls = ' '.join(item['redirectURLs']) else: redirect_urls = None url_id = next(g) _id = url_id urls.append((_id, last_visit_date, redirect_urls, title, url, visit_count)) visits.append((url_id, visit_date)) else: continue return urls, visits def _copy_webcachev01_dat(self, file_path): ''' Creates a shadow copy of WebCacheVxx.dat and copies it into the upload folder. :param file_path: The file path of WebCacheVxx.dat :return: Boolean value ''' # Adapted from sblosser's example code: # https://github.com/sblosser/pyshadowcopy # on 2017-07-31 # Create a set that contains the LOCAL disks you want to shadow drv = file_path[0] local_drives = set() local_drives.add(drv) # Initialize the Shadow Copies try: sc = vss.ShadowCopy(local_drives) # An open and locked file we want to read locked_file = file_path shadow_path = sc.shadow_path(locked_file) try: shutil.copy(shadow_path, self._root_path('uploads')) except BaseException as e: print(e) sc.delete() return False finally: sc.delete() return True except BaseException: return False def _extract_webcachev01_dat(self, file_path): """Extracts data from WebCacheVxx.dat. :param str file_path: The file path of WebCacheVxx.dat :return lists urls, visits: Two lists of tuples """ # Adapted from Jon Glass' blog: # http://jon.glass/blog/attempts-to-parse-webcachev01-dat/ # on 2017-07-31 if self._copy_webcachev01_dat(file_path): file_name = os.path.split(file_path)[1] elif 'WebCacheV01.dat' in os.listdir(self._upload_dir): file_name = 'WebCacheV01.dat' elif 'WebCacheV24.dat' in os.listdir(self._upload_dir): file_path = 'WebCacheV24.dat' else: return False visits = [] urls = {} pat = re.compile(r'(?<=@)http[\w:\_\-/.]+') esedb_file = pyesedb.file() try: with open(os.path.join(self._root_path('uploads'), file_name), "rb") as f: esedb_file.open_file_object(f) containers_table = esedb_file.get_table_by_name("Containers") g = self._unique_id_generator() for i in range(0, containers_table.get_number_of_records()): if containers_table.get_record(i).get_value_data_as_string(8) == 'History': container_id = containers_table.get_record(i).get_value_data_as_integer(0) history_table = esedb_file.get_table_by_name("Container_" + str(container_id)) for j in range(0, history_table.get_number_of_records()): if history_table.get_record(j).is_long_value(17): url = history_table.get_record(j).get_value_data_as_long_value(17).get_data_as_string() else: url = history_table.get_record(j).get_value_data_as_string(17) date = self._convert_timestamp(history_table.get_record(j).get_value_data_as_integer(13), browser_name=self._browser_name, tz='utc') # Filter by url and minimum date if re.findall(pat, url) and date >= self._min_date: url = re.findall(pat, url)[0] if url not in urls.keys(): unique_id = next(g) urls[url] = {} urls[url]['unique_id'] = unique_id urls[url]['access_count'] = history_table.get_record(j).get_value_data_as_integer(8) urls[url]['redirect_urls'] = history_table.get_record(j).get_value_data_as_string(22) entry_id = history_table.get_record(j).get_value_data_as_integer(0) accessed_time = date unique_entry_id = int(str(container_id) + str(unique_id)) visits.append((accessed_time, unique_entry_id, urls[url]['unique_id'])) else: access_count = history_table.get_record(j).get_value_data_as_integer(8) if access_count > 0: urls[url]['access_count'] += access_count else: continue esedb_file.close() urls = [(value['access_count'], value['redirect_urls'], value['unique_id'], key) for key, value in urls.items()] return urls, visits except PermissionError: return False def _import_data(self, file_path=None): """Imports data from a file into the database. :param file_path: The file path of the browsing history database file (e.g., sqlite database file or a plist property list file). :return: boolean value """ if file_path: file_path = file_path else: file_path = self._build_path(self._handle_platform()) if file_path: db_tables = tuple(self._browser_specs['tables'].keys()) translate = self._load_json(self._root_path('data'), 'table_names') conn = False if self._browser_name == 'Safari': file_name = os.path.split(file_path)[1] if file_name == 'History.db': safari8_tables = self._browser_specs['tables_s8'] db_tables = tuple(safari8_tables.keys()) if os.path.split(file_path)[0] != self._upload_dir: try: shutil.copy(file_path, self._root_path('uploads')) except shutil.Error as e: print(e) return False file_path = os.path.join(self._root_path('uploads'), file_name) try: conn = sqlite3.connect(file_path) except sqlite3.OperationalError as e: print(e) return False else: urls, visits = self._extract_history_plist(file_path) elif self._browser_name == 'IE11': try: urls, visits = self._extract_webcachev01_dat(file_path) except TypeError: return False elif self._browser_name == 'Chrome': if os.path.split(file_path)[0] != self._upload_dir: try: shutil.copy(file_path, self._root_path('uploads')) except shutil.Error as e: print(e) return False file_path = os.path.join(self._root_path('uploads'), self._browser_specs['file_name'][0]) try: conn = sqlite3.connect(file_path) except sqlite3.OperationalError as e: print(e) return False elif self._browser_name == 'Firefox': try: conn = sqlite3.connect(file_path) except sqlite3.OperationalError as e: print(e) return False new_db = sqlite3.connect(self._db) new_db_cur = new_db.cursor() for table in db_tables: if conn and self._browser_name == 'Safari': od = OrderedDict(sorted(safari8_tables[table].items(), key=lambda t: t[0])) else: od = OrderedDict(sorted(self._browser_specs['tables'][table].items(), key=lambda t: t[0])) if conn: conn.create_function('REGEXP', 2, self._regexp) c = conn.cursor() if translate[table] == 'visits': if self._browser_name == 'Chrome': q = "SELECT {0} FROM visits WHERE ((visits.visit_time/1000000)-11644473600) >= {1};".format(', '.join(od.keys()), self._min_date) elif self._browser_name == 'Firefox': q = "SELECT {0} FROM moz_historyvisits WHERE (visit_date/1000000) >= {1};".format(', '.join(od.keys()), self._min_date) elif self._browser_name == 'Safari': q = "SELECT {0} FROM history_visits WHERE (history_visits.visit_time + 978307200) >= {1};".format(', '.join(od.keys()), self._min_date) else: raise ValueError("Browser name {0} doesn't match.".format(self._browser_name)) else: if self._browser_name == 'Chrome': q = "SELECT {0} FROM urls, visits WHERE urls.id = visits.url AND ((visits.visit_time/1000000)-11644473600) >= {1} AND NOT REGEXP('^file:', urls.url);".format(', '.join(od.keys()), self._min_date) elif self._browser_name == 'Firefox': q = "SELECT {0} FROM moz_places, moz_historyvisits WHERE moz_places.id = moz_historyvisits.place_id AND (moz_historyvisits.visit_date/1000000) >= {1} AND NOT REGEXP('^file:///', moz_places.url);".format(', '.join(od.keys()), self._min_date) elif self._browser_name == 'Safari': q = "SELECT {0} FROM history_items, history_visits WHERE history_items.id = history_visits.history_item AND (history_visits.visit_time + 978307200) >= {1} AND NOT REGEXP('^file:', history_items.url);".format(', '.join(od.keys()), self._min_date) else: raise ValueError("Browser name {0} doesn't match.".format(self._browser_name)) rq = c.execute(q) r = rq.fetchall() else: if translate[table] == 'visits': r = visits else: r = urls # Insert data into new database try: if conn and self._browser_name == 'Safari': placeholders = ', '.join(['?' for x in range(len(safari8_tables[table].values()))]) else: placeholders = ', '.join(['?' for x in range(len(self._browser_specs['tables'][table].values()))]) query = 'INSERT OR IGNORE INTO {0} ({1}) VALUES ({2});'.format(translate[table], ', '.join(od.values()), placeholders) new_db_cur.executemany(query, r) new_db.commit() except sqlite3.OperationalError as e: print('sqlite3.OperationalError: ', e) return False if conn: c.close() conn.close() new_db_cur.close() new_db.close() return True else: return False def _regexp(self, p, s): pat = re.compile(p) if re.match(pat, s): return True else: return False def _load_json(self, path, name): """Helper function to load the json browser spec files. :param path: Path name :param name: File name (without file extension) :return: json object """ with open('{0}.json'.format(os.path.join(path, name)), 'r') as file: return json.load(file) def _save_json(self, data, path, name): """Helper function to write json object to a json file. :param data: json object :param path: path name :param name: file name (without file extension) :return: nothing """ with open('{0}.json'.format(os.path.join(path, name)), 'w') as file: json.dump(data, fp=file) def load(self, file_path=None, browser_name=None, min_date=None): """Loads the browsing history. :param str file_path: The file path of the browsing history :param str browser_name: The browser name :param min_date: The start date of the import :return: boolean value """ self._db = self._create_db() if browser_name == None: self._browser_name = self._load_json('tmp', 'browser_name')['browser_name'] else: self._browser_name = browser_name self._save_json({'browser_name': self._browser_name}, 'tmp', 'browser_name') self._browser_specs = self._load_json(self._root_path('data'), self._browser_name) if min_date: self._min_date = min_date if self._db: if file_path: status = self._import_data(file_path=file_path) else: status = self._import_data() if status: self.date_range = self._date_range() self.num_domains = len(self.visits(date=False, n=None, ascending=False, plot=False)) self.ready = True return True else: return False def _state(self): """Helper function to keep track of the current state of the temporary database. :return: boolean value """ db = 'tmp/browsing_history.db' if os.path.isfile(db): try: self._db = db self._browser_name = self._load_json('tmp', 'browser_name')['browser_name'] self._browser_specs = self._load_json(self._root_path('data'), self._browser_name) self.date_range = self._date_range() self.num_domains = len(self.visits(date=False, n=None, ascending=False, plot=False)) return True except TypeError: return False else: return False def _query(self, q): """Helper function to query the sqlite database. :param str q: Sqlite query :return: List of tuples """ if self._db: with sqlite3.connect(self._db) as conn: # connection to db c = conn.cursor() c.execute(q) return c.fetchall() else: return [] def _update_db(self, x, kind='domains'): """Update function for the sqlite database. :param list x: URL ids :param str kind: What kind of data should be updated. domains (default), keywords, urls :return: nothing """ #self.ready = False try: conn = sqlite3.connect(self._db) c = conn.cursor() if isinstance(x, str): if kind == 'keywords': pat = re.compile(r'(?:\?q=|\?p=|\?query=|search?q=|\?q\d=|\&q\d=|\?k=|\?text=|\&q=|key=|\?search=|\&search=|\&searchTerm=|\?searchTerm=)([a-zA-Z0-9äöüïéàèáÜÄÖ\%\+\-\*\s]+)', re.IGNORECASE) _ids = self._keywords[x]['ids'] else: _ids = self._domains[x]['ids'] elif isinstance(x, list) and kind == 'urls': _ids = x else: raise ValueError('Input type unsupported: expects string or list') for i in _ids: entry = c.execute("SELECT url, rev_host FROM urls WHERE id = ?;", (i,)).fetchall() url = self._is_url(entry[0][0], r=True) if url: hashed_url = self._hash_domain(url) unique_id = '{0}-{1}-{2}'.format('anonymisiert', hashed_url, i) if kind == 'keywords': new_entry = re.sub(pat, unique_id, entry[0][0]) c.execute('UPDATE urls SET url = ?, title = ? WHERE id = ?;', (new_entry, '***', i)) conn.commit() elif kind == 'urls': domain = '{0}/{1}'.format(self._stem_url(entry[0][0]), '***') c.execute('UPDATE urls SET url = ?, title = ?, redirect_urls = ? WHERE id = ?;', (domain, '***', '***', i)) conn.commit() elif kind == 'domains': c.execute('UPDATE urls SET url = ?, title = ?, rev_host = ?, redirect_urls = ? WHERE id = ?;', (unique_id, '***', '***', '***', i)) conn.commit() else: raise ValueError('{0} is not a valid kind.'.format(kind)) else: continue except sqlite3.OperationalError as e: print(e) finally: c.close() conn.close() def _date_conv(self, date): """Helper function to convert the date(s). :param date: string or list in %Y-%m-%d format :return: start (int), end (int), date (string) """ if isinstance(date, list): date_str = 'between {0} and {1}'.format(date[0], date[1]) t = int(time.mktime(datetime.strptime(date[0], "%Y-%m-%d").timetuple()) * 1000000) tk = int(time.mktime(datetime.strptime(date[1], "%Y-%m-%d").timetuple()) * 1000000) elif isinstance(date, str): date_str = 'on {0}'.format(date) t = int(time.mktime(datetime.strptime(date, "%Y-%m-%d").timetuple())) tk = datetime.strptime(date, "%Y-%m-%d") + timedelta(days=1) tk = int(time.mktime(tk.timetuple()) * 1000000) return t, tk, date_str def visits(self, date=False, n=25, ascending=False, plot=False): """Function to load all URLs from the database for a certain date or date range. :param str date: A date (e.g., '2016-10-15') or a date range as list (e.g., ['2016-10-15','2016-10-25']) :param int n: the number of websites that should be plotted, default = top 25; for all websites set n = None :param boolean ascending: order :param plot: boolean value :return: OrderedDict """ if date: t, tk, date_str = self._date_conv(date) else: date_str = 'between {0} and {1}'.format(self.date_range[0], self.date_range[1]) if date: visits = self._query( "SELECT url, visit_count, urls.id FROM urls, visits WHERE urls.id = visits.url_id AND visit_date >= {0} AND visit_date < {1};".format( t, tk)) else: visits = self._query("SELECT url, visit_count, urls.id FROM urls, visits WHERE urls.id = visits.url_id;") d = {} unique_id = set() for visit in visits: domain = self._stem_url(visit[0]) count = visit[1] if domain not in d.keys(): d[domain] = 0 if visit[2] not in unique_id: unique_id.add(visit[2]) d[domain] += count total_n = sum(d.values()) if n == None: n = total_n if ascending == False: title = 'Top {0} visited websites {1} (n={2})'.format(n, date_str, total_n) od = OrderedDict(sorted(d.items(), key=lambda t: t[1])[-n:]) else: title = 'Least {0} visited websites {1} (n={2})'.format(n, date_str, total_n) od = OrderedDict(sorted(d.items(), key=lambda t: t[1])[:n]) source = {'x': list(od.keys()), 'y': list(od.values()), 'perc': [round((v / total_n) * 100, 2) for v in list(od.values())]} if plot == True: self._vbarplot(source, title) else: return od def entries(self, sort_by='date', q=None): """Function to load all entries from the database. :param str sort_by: Order. Domains or frequency :param str q: Search term :param stem_urls: Boolean value. Whether to return domains or urls :return: OrderedDict """ d = {} if q == None: visits = self._query("SELECT urls.id, visit_date, url, visit_count FROM visits, urls WHERE visits.url_id = urls.id;") else: visits = self._query("SELECT urls.id, visit_date, url, visit_count FROM visits, urls WHERE visits.url_id = urls.id AND url LIKE '%{0}%';".format(q)) # Filtering URLs only visits = [(e[0], self._get_date(e[1]), e[2], e[3], e[1]) for e in visits] # Sorting if sort_by == 'domains' or sort_by == None: visits = sorted(visits, key=lambda t: t[2]) elif sort_by == 'frequency': visits = sorted(visits, key=lambda t: t[3], reverse=True) elif sort_by == 'date' or sort_by == None: visits = sorted(visits, key=lambda t: t[4], reverse=True) self._entries = visits return visits def select_domains(self, sort_by='domains', q=None, stem_urls=True): """Function to load all URLs from the database. :param str sort_by: Order. Domains or frequency :param str q: Search term :param boolean stem_urls: Whether to return domains or urls :return: OrderedDict """ d = {} if q == None: visits = self._query("SELECT id, url, visit_count FROM urls;") else: visits = self._query("SELECT id, url, visit_count FROM urls WHERE url LIKE '%{0}%';".format(q)) for visit in visits: if stem_urls: domain = self._stem_url(visit[1]) else: domain = visit[1] count = visit[2] if domain in d.keys(): d[domain]['ids'].append(visit[0]) else: d[domain] = {'ids': [], 'count': 0} d[domain]['ids'].append(visit[0]) d[domain]['count'] += count if sort_by == 'domains' or sort_by == None: od = OrderedDict(sorted(d.items(), key=lambda t: t[0])) elif sort_by == 'frequency': od = OrderedDict(sorted(d.items(), key=lambda t: t[1]['count'], reverse=True)) self._domains = od return od def search_terms(self, sort_by='keywords', q=None): """Extracts search terms from urls in the database. :param str sort_by: specifies how the OrderedDict should be sorted. Default is keywords. :param str q: optional argument for a specific search term :return: OrderedDict """ d = {} pat = re.compile(r'(?:\?q=|\?p=|\?query=|search?q=|\?q\d=|\&q\d=|\?k=|\?text=|\&q=|key=|\?search=|\&search=|\&searchTerm=|\?searchTerm=)([a-zA-Z0-9äöüïéàèáÜÄÖ\%\+\-\*\s\.\,]+)', re.IGNORECASE) if q: entries = self._query("SELECT id, url FROM urls WHERE url LIKE '%{0}%';".format(q)) else: entries = self._query('SELECT id, url FROM urls;') for entry in entries: domain = self._stem_url(entry[1]) matches = re.findall(pat, entry[1]) if matches: for match in matches: term = urllib.parse.unquote_plus(match) if term not in d.keys(): d[term] = {'ids': [], 'count': 1, 'urls': [domain], 'match': match} d[term]['ids'].append(entry[0]) else: d[term]['ids'].append(entry[0]) d[term]['count'] += 1 if domain not in d[term]['urls']: d[term]['urls'].append(domain) if sort_by == 'keywords' or sort_by == None: od = OrderedDict(sorted(d.items(), key=lambda t: t[0])) elif sort_by == 'frequency': od = OrderedDict(sorted(d.items(), key=lambda t: t[1]['count'], reverse=True)) self._keywords = od return od def export(self): """Writes the browsing history to a CSV file. :return: Boolean value """ data = self._query( "SELECT url_id, visits.id, url, title, rev_host, visit_count, typed, last_visit_date, redirect_urls, referrer, visit_date, visit_type FROM visits, urls WHERE visits.url_id = urls.id;") if data: data = [t + (self._browser_name, self.os_full) for t in data] header = ['url_id', 'visits_id', 'url', 'title', 'rev_host', 'visit_count', 'typed', 'last_visit_date', 'redirect_urls', 'referrer', 'visit_date', 'visit_type', 'browser', 'operation system'] with open(os.path.join(self._file_path,'tmp', 'Export_Browserverlauf.csv'), 'w', encoding='utf-8') as f: writer = csv.writer(f, delimiter=';', lineterminator='\n') writer.writerow(header) writer.writerows(data) return True else: return False def _date_range(self): """Helper function. :return: Minimum and maximum date (timestamps) """ min_date, max_date = self._query("SELECT min(visit_date), max(visit_date) FROM visits;")[0] if min_date and max_date: min_date = self._get_date(min_date) max_date = self._get_date(max_date) return (min_date, max_date) else: return (' ', ' ') def _hash_domain(self, domain): """Helper function to hash the domain. :param domain: Domain (string) :return: Hashed domain """ salt = uuid.uuid4().hex return hashlib.sha256(salt.encode() + domain.encode()).hexdigest() + '-' + salt def _get_date(self, timestamp): """Helper function to convert timestamps into date strings. :param timestamp: Timestamp :return: Date string (e.g., '13.05.2014 08:34:45') """ date = datetime.fromtimestamp(timestamp) return date.strftime('%d.%m.%Y %H:%M:%S') def _convert_timestamp(self, timestamp, browser_name=None, tz='utc'): """Helper function to convert different timestamps formats into date strings or POSIX timestamp. :param timestamp: Timestamp :return: POSIX timestamp (UTC) """ if browser_name == 'Chrome': date = datetime(1601, 1, 1) + timedelta(microseconds=timestamp) elif browser_name == 'IE11': date = datetime(1601, 1, 1) + timedelta(microseconds=timestamp * 0.1) elif browser_name == 'Safari': date = datetime(2001, 1, 1) + timedelta(seconds=timestamp) elif browser_name == 'Firefox': date = datetime.fromtimestamp(timestamp / 1000000) else: date = datetime.fromtimestamp(timestamp) return date.timestamp() def _get_dto(self, timestamp): """Helper function to convert a timestamp to a datetime object :param timestamp: Timestamp :return: Datetime object """ return datetime.fromtimestamp(timestamp / 1000000) def _min_date(self, num_days): """Helper function to determine the minimum date :param int num_days: Number days to go back in time :return: timestamp (UTC) """ today = datetime.today() days = timedelta(num_days) min_date = today - days return min_date.timestamp() def _stem_url(self, url): """Helper function to stem URLs. :param str url: URL :return str: Domain """ anonym_pattern = re.compile('anonymisiert-[\w]+\-[\w]+') stemmed_url = self._is_url(url, r=True) if stemmed_url: if stemmed_url[:4] == 'www.': return stemmed_url[4:] else: return stemmed_url else: # checking for domain made anonymous if re.findall(anonym_pattern, url): return re.findall(anonym_pattern, url)[0] else: # check if url is already stemmed if url[:-5:-1] == '***/': return url[:-4] else: return url def _is_url(self, url, r=False): """Helper function to check if a string is an URL. :param url: URL (string) :param r: Whether the URL should be return or not :return: URL (string) or boolean value """ url_pattern = re.compile('(?<=\:\/\/)[a-z0-9\.\-\:]+') match = re.findall(url_pattern, url) if match: if r: return match[0] else: return True else: return False def _root_path(self, relative_path): """Helper function for path handling after bundling with pyinstaller. :param str: relative path """ # Adapted from max' StackOverflow answer: # https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile/13790741#13790741 # on 2017-07-31 try: base_path = sys._MEIPASS except Exception: base_path = os.path.abspath('.') return os.path.join(base_path, relative_path) def root_path(relative_path): """Helper function for path handling after app bundling :param str: relative path """ # Adapted from StackOverflow answer: https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile/13790741#13790741; 2017-07-31 try: base_path = sys._MEIPASS except Exception: base_path = os.path.abspath('.') return os.path.join(base_path, relative_path) if not os.path.isdir(root_path('uploads')): os.mkdir(root_path('uploads')) ALLOWED_EXTENSIONS = set(['sqlite', 'dat', 'plist', 'History', 'db']) FILE_PATH = os.path.split(os.path.realpath(__file__))[0] bh = BrowsingHistory() app = Flask(__name__, root_path=root_path('.')) app.secret_key = '8927-bdjbj20AWER$_' #app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER def allowed_file(filename): # Taken from Flask doc example: http://flask.pocoo.org/docs/0.12/ if '.' in filename: return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS else: return filename in ALLOWED_EXTENSIONS def _os_name(_os_full): pat = re.compile(r'(?<=Darwin\s)\d{1,2}') match = re.findall(pat, _os_full) if match: if match[0] == '10': return 'Mac OS X Snow Leopard' elif match[0] == '11': return 'Mac OS X Lion' elif match[0] == '12': return 'OS X Mountain Lion' elif match[0] == '13': return 'OS X Mavericks' elif match[0] == '14': return 'OS X Yosemite' elif match[0] == '15': return 'OS X El Capitan' elif match[0] == '16': return 'macOS Sierra' elif match[0] == '17': return 'macOS High Sierra' else: return _os_full @app.route('/exit') def shutdown_server(): # Taken from Flask doc example: http://flask.pocoo.org/docs/0.12/ func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server') func() return 'Das Programm wurde beendet. Sie können das Fenster schliessen.' @app.route('/', methods=['GET', 'POST']) def index(): # Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/ os_name = _os_name(bh.os_full) if request.method == 'GET': if request.args.get('load'): return render_template('index.html', os=os_name) elif request.args.get('notFound'): flash('Der Browserverlauf wurde nicht gefunden. Bitte wählen Sie die Datei manuell aus.') not_found = True return render_template('index.html', os=os_name) elif request.args.get('fileError'): flash('Die Datei konnte nicht gelesen werden.') not_found = True return render_template('index.html', os=os_name) else: if bh.ready: return redirect(url_for('dashboard')) else: return render_template('index.html', os=os_name) elif request.method == 'POST': browser_name = request.form.get('browser_name') if 'file' in request.files: # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(url_for('index', notFound=True, os=os_name)) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No selected file') return redirect(url_for('index', notFound=True, os=os_name)) if file and allowed_file(file.filename): if len(os.listdir(root_path('uploads'))) >= 1: for f in os.listdir(root_path('uploads')): os.remove(os.path.join(root_path('uploads'), f)) filename = secure_filename(file.filename) file.save(os.path.join(root_path('uploads'), filename)) state = bh.load(file_path=os.path.join(root_path('uploads'), filename), browser_name=browser_name) else: return redirect(url_for('index', fileError=True, os=os_name)) else: state = bh.load(browser_name=browser_name) if state: return redirect(url_for('dashboard')) else: return redirect(url_for('index', notFound=True, client=browser_name, os=os_name)) @app.route('/load') def load(): return redirect(url_for('index', load=True)) @app.route('/dashboard') def dashboard(): if bh.ready == False: return redirect(url_for('index')) else: date_range = bh.date_range num_domains = bh.num_domains browser_name = bh._browser_name top_10 = bh.visits(date = False, n = 10, ascending = False, plot = False) top_10 = OrderedDict(sorted(top_10.items(), key=lambda t: t[1], reverse=True)) return render_template('dashboard.html', date_range=date_range, num_domains=num_domains, browser_name=browser_name, top_10=top_10) @app.route('/select', methods=['GET', 'POST']) def select(): if request.method == 'POST': selection = request.form.getlist('check') for domain in selection: bh._update_db(domain, kind='domains') domains = bh.select_domains() elif request.method == 'GET': if request.args.get('sort') or request.args.get('q'): domains = bh.select_domains(sort_by=request.args.get('sort'), q=request.args.get('q')) else: domains = bh.select_domains() return render_template('select_domains.html', domains=domains) @app.route('/search-terms', methods=['POST', 'GET']) def search_terms(): if request.method == 'POST': selection = request.form.getlist('check') for search_term in selection: bh._update_db(search_term, kind='keywords') search_terms = bh.search_terms() elif request.method == 'GET': if request.args.get('sort') or request.args.get('q'): search_terms = bh.search_terms(sort_by=request.args.get('sort'), q=request.args.get('q')) else: search_terms = bh.search_terms() return render_template('search_terms.html', search_terms=search_terms) @app.route('/export') def export(): # Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/ if bh.export(): return send_from_directory(os.path.join(FILE_PATH, 'tmp'), 'Export_Browserverlauf.csv', as_attachment=True) else: flash('Export nicht möglich. Bitte laden Sie zuerst einen Browserverlauf.') return render_template('index.html', os=' '.join([bh.os, bh.os_release])) @app.route('/log') def get_log(): # Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/ if 'server.log' in os.listdir(os.path.join(FILE_PATH, 'tmp')): return send_from_directory(os.path.join(FILE_PATH, 'tmp'), 'server.log', as_attachment=True) else: flash('Es ist kein Log-File gefunden worden.') return render_template('index.html', os=' '.join([bh.os, bh.os_release])) @app.route('/faqs') def faqs(): return render_template('faqs.html') @app.route('/contact') def contact(): return render_template('contact.html') @app.route('/entries', methods=['POST', 'GET']) def list_entries(): if request.method == 'GET': if request.args.get('sort') or request.args.get('q'): urls = bh.entries(sort_by=request.args.get('sort'), q=request.args.get('q')) else: urls = bh.entries() elif request.method == 'POST': selection = request.form.getlist('check') bh._update_db(selection, kind='urls') urls = bh.entries() return render_template('entries.html', domains=urls) @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @app.errorhandler(405) def page_not_found(e): return render_template('405.html'), 405 @app.errorhandler(500) def page_not_found(e): return render_template('500.html'), 500 if __name__ == '__main__': print('STATUS: BrowsingHistoryEditor wird gestartet ...') if not app.debug: import logging from logging import FileHandler file_handler = FileHandler(os.path.join(FILE_PATH, 'tmp', 'server.log')) file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) logging.basicConfig(filename=os.path.join(FILE_PATH, 'tmp', 'server.log'), level=logging.DEBUG) webbrowser.open('http://localhost:5000', new=2) print('STATUS: BrowsingHistoryEditor läuft auf http://localhost:5000 (Drücken Sie CTRL+C, um das Programm zu beenden)') app.run(host='localhost', port=5000, debug=False)
{ "content_hash": "5fdce0d6ca23a5b3f8734f027bd63904", "timestamp": "", "source": "github", "line_count": 1058, "max_line_length": 273, "avg_line_length": 42.291115311909266, "alnum_prop": 0.5147058823529411, "repo_name": "grwllrnc/BrowsingHistoryEditor", "id": "9657f4d4240ba08fd015f17d8facc14e1e819a80", "size": "44796", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3329" }, { "name": "HTML", "bytes": "23259" }, { "name": "Python", "bytes": "44796" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_ips_global short_description: Configure IPS global parameter in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify ips feature and global category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 ips_global: description: - Configure IPS global parameter. default: null type: dict suboptions: anomaly_mode: description: - Global blocking mode for rate-based anomalies. type: str choices: - periodical - continuous database: description: - Regular or extended IPS database. Regular protects against the latest common and in-the-wild attacks. Extended includes protection from legacy attacks. type: str choices: - regular - extended deep_app_insp_db_limit: description: - Limit on number of entries in deep application inspection database (1 - 2147483647, 0 = use recommended setting) type: int deep_app_insp_timeout: description: - Timeout for Deep application inspection (1 - 2147483647 sec., 0 = use recommended setting). type: int engine_count: description: - Number of IPS engines running. If set to the default value of 0, FortiOS sets the number to optimize performance depending on the number of CPU cores. type: int exclude_signatures: description: - Excluded signatures. type: str choices: - none - industrial fail_open: description: - Enable to allow traffic if the IPS process crashes. Default is disable and IPS traffic is blocked when the IPS process crashes. type: str choices: - enable - disable intelligent_mode: description: - Enable/disable IPS adaptive scanning (intelligent mode). Intelligent mode optimizes the scanning method for the type of traffic. type: str choices: - enable - disable session_limit_mode: description: - Method of counting concurrent sessions used by session limit anomalies. Choose between greater accuracy (accurate) or improved performance (heuristics). type: str choices: - accurate - heuristic skype_client_public_ipaddr: description: - Public IP addresses of your network that receive Skype sessions. Helps identify Skype sessions. Separate IP addresses with commas. type: str socket_size: description: - IPS socket buffer size (0 - 256 MB). Default depends on available memory. Can be changed to tune performance. type: int sync_session_ttl: description: - Enable/disable use of kernel session TTL for IPS sessions. type: str choices: - enable - disable traffic_submit: description: - Enable/disable submitting attack data found by this FortiGate to FortiGuard. type: str choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure IPS global parameter. fortios_ips_global: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" ips_global: anomaly_mode: "periodical" database: "regular" deep_app_insp_db_limit: "5" deep_app_insp_timeout: "6" engine_count: "7" exclude_signatures: "none" fail_open: "enable" intelligent_mode: "enable" session_limit_mode: "accurate" skype_client_public_ipaddr: "<your_own_value>" socket_size: "13" sync_session_ttl: "enable" traffic_submit: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_ips_global_data(json): option_list = ['anomaly_mode', 'database', 'deep_app_insp_db_limit', 'deep_app_insp_timeout', 'engine_count', 'exclude_signatures', 'fail_open', 'intelligent_mode', 'session_limit_mode', 'skype_client_public_ipaddr', 'socket_size', 'sync_session_ttl', 'traffic_submit'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def ips_global(data, fos): vdom = data['vdom'] ips_global_data = data['ips_global'] filtered_data = underscore_to_hyphen(filter_ips_global_data(ips_global_data)) return fos.set('ips', 'global', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_ips(data, fos): if data['ips_global']: resp = ips_global(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "ips_global": { "required": False, "type": "dict", "default": None, "options": { "anomaly_mode": {"required": False, "type": "str", "choices": ["periodical", "continuous"]}, "database": {"required": False, "type": "str", "choices": ["regular", "extended"]}, "deep_app_insp_db_limit": {"required": False, "type": "int"}, "deep_app_insp_timeout": {"required": False, "type": "int"}, "engine_count": {"required": False, "type": "int"}, "exclude_signatures": {"required": False, "type": "str", "choices": ["none", "industrial"]}, "fail_open": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "intelligent_mode": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "session_limit_mode": {"required": False, "type": "str", "choices": ["accurate", "heuristic"]}, "skype_client_public_ipaddr": {"required": False, "type": "str"}, "socket_size": {"required": False, "type": "int"}, "sync_session_ttl": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "traffic_submit": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_ips(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_ips(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
{ "content_hash": "f59291213b6dc93ae59722b4b8811150", "timestamp": "", "source": "github", "line_count": 400, "max_line_length": 158, "avg_line_length": 34.43, "alnum_prop": 0.5650595410978797, "repo_name": "thaim/ansible", "id": "43134ff3322fef92d0a5dcda448321acefbf9b44", "size": "13790", "binary": false, "copies": "14", "ref": "refs/heads/fix-broken-link", "path": "lib/ansible/modules/network/fortios/fortios_ips_global.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
from django.test import TestCase from django.core.urlresolvers import reverse from django.contrib.auth.models import User from friends.models import FriendshipRequest, Friendship, UserBlocks from friends.templatetags import friends_tags class BaseTestCase(TestCase): fixtures = ['test_data.json'] def setUp(self): for i in range(1, 5): setattr(self, 'user%d' % i, User.objects.get(username='testuser%d' % i)) class BlocksFilterTestCase(BaseTestCase): def test_blocks_filter(self): result = friends_tags.blocks(self.user4) self.assertTrue(isinstance(result, dict)) self.assertTrue('applied' in result) self.assertEqual(len(result['applied']), 2) self.assertTrue(self.user1 in result['applied']) self.assertTrue(self.user3 in result['applied']) self.assertTrue('received' in result) self.assertEqual(len(result['received']), 2) self.assertTrue(self.user1 in result['received']) self.assertTrue(self.user2 in result['received']) class FriendsFilterTestCase(BaseTestCase): def test_friends_filter(self): result = friends_tags.friends_(self.user1) self.assertEqual(len(result), 1) self.assertTrue(self.user2 in result) class FriendshipModelsTestCase(BaseTestCase): def test_friendship_request(self): are_friends = Friendship.objects.are_friends for method, result in [('decline', False), ('cancel', False), ('accept', True)]: friendship_request = FriendshipRequest.objects.create( from_user=self.user3, to_user=self.user4) self.assertEqual(are_friends(self.user3, self.user4), False) getattr(friendship_request, method)() self.assertEqual(are_friends(self.user3, self.user4), result) def test_friendship_manager_query_methods(self): self.assertEqual(Friendship.objects.are_friends(self.user1, self.user2), True) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), False) self.assertEqual(Friendship.objects.are_friends(self.user2, self.user3), False) friends_of_user1 = Friendship.objects.friends_of(self.user1) friends_of_user2 = Friendship.objects.friends_of(self.user2) friends_of_user3 = Friendship.objects.friends_of(self.user3) self.assertEqual(list(friends_of_user1), [self.user2]) self.assertEqual(list(friends_of_user2), [self.user1]) self.assertEqual(list(friends_of_user3), []) def test_friendship_manager_befriend(self): Friendship.objects.befriend(self.user1, self.user4) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user4), True) def test_friendship_manager_unfriend(self): Friendship.objects.unfriend(self.user1, self.user2) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user2), False) class FriendshipRequestsFilterTestCase(BaseTestCase): def test_friendship_requests_filter(self): FriendshipRequest.objects.create(from_user=self.user1, to_user=self.user3) FriendshipRequest.objects.create(from_user=self.user4, to_user=self.user1) result = friends_tags.friendship_requests(self.user1) # result['sent'] shouldn't contain user2 because they # are already friends. result = { 'sent': [self.user3], 'received': [self.user4], } class FriendshipViewsTestCase(BaseTestCase): urls = 'friends.urls' def test_friendship_request(self): self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('friendship_request', args=('testuser3',))) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), False) self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user1, to_user=self.user3, accepted=False).count(), 1) def test_friendship_accept(self): FriendshipRequest.objects.create(from_user=self.user1, to_user=self.user3) self.client.login(username='testuser3', password='testuser3') self.client.get(reverse('friendship_accept', args=('testuser1',))) self.assertEqual(FriendshipRequest.objects.filter( accepted=True).count(), 2) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), True) def test_friendship_cancel(self): FriendshipRequest.objects.create(from_user=self.user1, to_user=self.user3) self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('friendship_cancel', args=('testuser3',))) self.assertEqual(FriendshipRequest.objects.filter( accepted=False).count(), 0) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), False) def test_friendship_decline(self): FriendshipRequest.objects.create(from_user=self.user1, to_user=self.user3) self.client.login(username='testuser3', password='testuser3') self.client.get(reverse('friendship_decline', args=('testuser1',))) self.assertEqual(FriendshipRequest.objects.filter( accepted=False).count(), 0) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), False) def test_friendship_delete(self): self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('friendship_delete', args=('testuser2',))) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user2), False) def test_friendship_mutual_request(self): self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('friendship_request', args=('testuser3',))) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), False) self.client.login(username='testuser3', password='testuser3') self.client.get(reverse('friendship_request', args=('testuser1',))) self.assertEqual(FriendshipRequest.objects.filter(from_user=self.user1, to_user=self.user3, accepted=True).count(), 1) self.assertEqual(Friendship.objects.are_friends(self.user1, self.user3), True) class UserBlockTestCase(BaseTestCase): def test_blocking_info_methods(self): self.user1.user_blocks.blocks.add(self.user3, self.user4) self.assertEqual(self.user1.user_blocks.block_count(), 2) summary = UserBlocks.objects.get(user=self.user1).block_summary() self.assertEqual(self.user3.username in summary, True) self.assertEqual(self.user4.username in summary, True) class UserBlocksViewsTestCase(BaseTestCase): urls = 'friends.urls' def test_block(self): self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('user_block', args=('testuser2',))) self.assertEqual(self.user2 in self.user1.user_blocks.blocks.all(), True) def test_unblock(self): self.user1.user_blocks.blocks.add(self.user2) self.client.login(username='testuser1', password='testuser1') self.client.get(reverse('user_unblock', args=('testuser2',))) self.assertEqual(self.user2 in self.user1.user_blocks.blocks.all(), False)
{ "content_hash": "229e0fb23e731819ad20517fa4ff3f70", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 79, "avg_line_length": 49.38150289017341, "alnum_prop": 0.5890202504974833, "repo_name": "muhuk/django-simple-friends", "id": "8728cb15aa382a53713a9f03303783cba920e6c1", "size": "8543", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "friends/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "51102" } ], "symlink_target": "" }
r""" Sparse distributed elements of free modules over multivariate (generalized) polynomial rings. This code and its data structures are very much like the distributed polynomials, except that the first "exponent" of the monomial is a module generator index. That is, the multi-exponent ``(i, e_1, ..., e_n)`` represents the "monomial" `x_1^{e_1} \cdots x_n^{e_n} f_i` of the free module `F` generated by `f_1, \ldots, f_r` over (a localization of) the ring `K[x_1, \ldots, x_n]`. A module element is simply stored as a list of terms ordered by the monomial order. Here a term is a pair of a multi-exponent and a coefficient. In general, this coefficient should never be zero (since it can then be omitted). The zero module element is stored as an empty list. The main routines are ``sdm_nf_mora`` and ``sdm_groebner`` which can be used to compute, respectively, weak normal forms and standard bases. They work with arbitrary (not necessarily global) monomial orders. In general, product orders have to be used to construct valid monomial orders for modules. However, ``lex`` can be used as-is. Note that the "level" (number of variables, i.e. parameter u+1 in distributedpolys.py) is never needed in this code. The main reference for this file is [SCA], "A Singular Introduction to Commutative Algebra". """ from __future__ import print_function, division from itertools import permutations from sympy.polys.monomials import ( monomial_mul, monomial_lcm, monomial_div, monomial_deg ) from sympy.polys.polytools import Poly from sympy.polys.polyutils import parallel_dict_from_expr from sympy import S, sympify from sympy.core.compatibility import range # Additional monomial tools. def sdm_monomial_mul(M, X): """ Multiply tuple ``X`` representing a monomial of `K[X]` into the tuple ``M`` representing a monomial of `F`. Examples ======== Multiplying `xy^3` into `x f_1` yields `x^2 y^3 f_1`: >>> from sympy.polys.distributedmodules import sdm_monomial_mul >>> sdm_monomial_mul((1, 1, 0), (1, 3)) (1, 2, 3) """ return (M[0],) + monomial_mul(X, M[1:]) def sdm_monomial_deg(M): """ Return the total degree of ``M``. Examples ======== For example, the total degree of `x^2 y f_5` is 3: >>> from sympy.polys.distributedmodules import sdm_monomial_deg >>> sdm_monomial_deg((5, 2, 1)) 3 """ return monomial_deg(M[1:]) def sdm_monomial_lcm(A, B): r""" Return the "least common multiple" of ``A`` and ``B``. IF `A = M e_j` and `B = N e_j`, where `M` and `N` are polynomial monomials, this returns `\lcm(M, N) e_j`. Note that ``A`` and ``B`` involve distinct monomials. Otherwise the result is undefined. Examples ======== >>> from sympy.polys.distributedmodules import sdm_monomial_lcm >>> sdm_monomial_lcm((1, 2, 3), (1, 0, 5)) (1, 2, 5) """ return (A[0],) + monomial_lcm(A[1:], B[1:]) def sdm_monomial_divides(A, B): """ Does there exist a (polynomial) monomial X such that XA = B? Examples ======== Positive examples: In the following examples, the monomial is given in terms of x, y and the generator(s), f_1, f_2 etc. The tuple form of that monomial is used in the call to sdm_monomial_divides. Note: the generator appears last in the expression but first in the tuple and other factors appear in the same order that they appear in the monomial expression. `A = f_1` divides `B = f_1` >>> from sympy.polys.distributedmodules import sdm_monomial_divides >>> sdm_monomial_divides((1, 0, 0), (1, 0, 0)) True `A = f_1` divides `B = x^2 y f_1` >>> sdm_monomial_divides((1, 0, 0), (1, 2, 1)) True `A = xy f_5` divides `B = x^2 y f_5` >>> sdm_monomial_divides((5, 1, 1), (5, 2, 1)) True Negative examples: `A = f_1` does not divide `B = f_2` >>> sdm_monomial_divides((1, 0, 0), (2, 0, 0)) False `A = x f_1` does not divide `B = f_1` >>> sdm_monomial_divides((1, 1, 0), (1, 0, 0)) False `A = xy^2 f_5` does not divide `B = y f_5` >>> sdm_monomial_divides((5, 1, 2), (5, 0, 1)) False """ return A[0] == B[0] and all(a <= b for a, b in zip(A[1:], B[1:])) # The actual distributed modules code. def sdm_LC(f, K): """Returns the leading coeffcient of ``f``. """ if not f: return K.zero else: return f[0][1] def sdm_to_dict(f): """Make a dictionary from a distributed polynomial. """ return dict(f) def sdm_from_dict(d, O): """ Create an sdm from a dictionary. Here ``O`` is the monomial order to use. Examples ======== >>> from sympy.polys.distributedmodules import sdm_from_dict >>> from sympy.polys import QQ, lex >>> dic = {(1, 1, 0): QQ(1), (1, 0, 0): QQ(2), (0, 1, 0): QQ(0)} >>> sdm_from_dict(dic, lex) [((1, 1, 0), 1), ((1, 0, 0), 2)] """ return sdm_strip(sdm_sort(list(d.items()), O)) def sdm_sort(f, O): """Sort terms in ``f`` using the given monomial order ``O``. """ return sorted(f, key=lambda term: O(term[0]), reverse=True) def sdm_strip(f): """Remove terms with zero coefficients from ``f`` in ``K[X]``. """ return [ (monom, coeff) for monom, coeff in f if coeff ] def sdm_add(f, g, O, K): """ Add two module elements ``f``, ``g``. Addition is done over the ground field ``K``, monomials are ordered according to ``O``. Examples ======== All examples use lexicographic order. `(xy f_1) + (f_2) = f_2 + xy f_1` >>> from sympy.polys.distributedmodules import sdm_add >>> from sympy.polys import lex, QQ >>> sdm_add([((1, 1, 1), QQ(1))], [((2, 0, 0), QQ(1))], lex, QQ) [((2, 0, 0), 1), ((1, 1, 1), 1)] `(xy f_1) + (-xy f_1)` = 0` >>> sdm_add([((1, 1, 1), QQ(1))], [((1, 1, 1), QQ(-1))], lex, QQ) [] `(f_1) + (2f_1) = 3f_1` >>> sdm_add([((1, 0, 0), QQ(1))], [((1, 0, 0), QQ(2))], lex, QQ) [((1, 0, 0), 3)] `(yf_1) + (xf_1) = xf_1 + yf_1` >>> sdm_add([((1, 0, 1), QQ(1))], [((1, 1, 0), QQ(1))], lex, QQ) [((1, 1, 0), 1), ((1, 0, 1), 1)] """ h = dict(f) for monom, c in g: if monom in h: coeff = h[monom] + c if not coeff: del h[monom] else: h[monom] = coeff else: h[monom] = c return sdm_from_dict(h, O) def sdm_LM(f): r""" Returns the leading monomial of ``f``. Only valid if `f \ne 0`. Examples ======== >>> from sympy.polys.distributedmodules import sdm_LM, sdm_from_dict >>> from sympy.polys import QQ, lex >>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(1), (4, 0, 1): QQ(1)} >>> sdm_LM(sdm_from_dict(dic, lex)) (4, 0, 1) """ return f[0][0] def sdm_LT(f): r""" Returns the leading term of ``f``. Only valid if `f \ne 0`. Examples ======== >>> from sympy.polys.distributedmodules import sdm_LT, sdm_from_dict >>> from sympy.polys import QQ, lex >>> dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(2), (4, 0, 1): QQ(3)} >>> sdm_LT(sdm_from_dict(dic, lex)) ((4, 0, 1), 3) """ return f[0] def sdm_mul_term(f, term, O, K): """ Multiply a distributed module element ``f`` by a (polynomial) term ``term``. Multiplication of coefficients is done over the ground field ``K``, and monomials are ordered according to ``O``. Examples ======== `0 f_1 = 0` >>> from sympy.polys.distributedmodules import sdm_mul_term >>> from sympy.polys import lex, QQ >>> sdm_mul_term([((1, 0, 0), QQ(1))], ((0, 0), QQ(0)), lex, QQ) [] `x 0 = 0` >>> sdm_mul_term([], ((1, 0), QQ(1)), lex, QQ) [] `(x) (f_1) = xf_1` >>> sdm_mul_term([((1, 0, 0), QQ(1))], ((1, 0), QQ(1)), lex, QQ) [((1, 1, 0), 1)] `(2xy) (3x f_1 + 4y f_2) = 8xy^2 f_2 + 6x^2y f_1` >>> f = [((2, 0, 1), QQ(4)), ((1, 1, 0), QQ(3))] >>> sdm_mul_term(f, ((1, 1), QQ(2)), lex, QQ) [((2, 1, 2), 8), ((1, 2, 1), 6)] """ X, c = term if not f or not c: return [] else: if K.is_one(c): return [ (sdm_monomial_mul(f_M, X), f_c) for f_M, f_c in f ] else: return [ (sdm_monomial_mul(f_M, X), f_c * c) for f_M, f_c in f ] def sdm_zero(): """Return the zero module element.""" return [] def sdm_deg(f): """ Degree of ``f``. This is the maximum of the degrees of all its monomials. Invalid if ``f`` is zero. Examples ======== >>> from sympy.polys.distributedmodules import sdm_deg >>> sdm_deg([((1, 2, 3), 1), ((10, 0, 1), 1), ((2, 3, 4), 4)]) 7 """ return max(sdm_monomial_deg(M[0]) for M in f) # Conversion def sdm_from_vector(vec, O, K, **opts): """ Create an sdm from an iterable of expressions. Coefficients are created in the ground field ``K``, and terms are ordered according to monomial order ``O``. Named arguments are passed on to the polys conversion code and can be used to specify for example generators. Examples ======== >>> from sympy.polys.distributedmodules import sdm_from_vector >>> from sympy.abc import x, y, z >>> from sympy.polys import QQ, lex >>> sdm_from_vector([x**2+y**2, 2*z], lex, QQ) [((1, 0, 0, 1), 2), ((0, 2, 0, 0), 1), ((0, 0, 2, 0), 1)] """ dics, gens = parallel_dict_from_expr(sympify(vec), **opts) dic = {} for i, d in enumerate(dics): for k, v in d.items(): dic[(i,) + k] = K.convert(v) return sdm_from_dict(dic, O) def sdm_to_vector(f, gens, K, n=None): """ Convert sdm ``f`` into a list of polynomial expressions. The generators for the polynomial ring are specified via ``gens``. The rank of the module is guessed, or passed via ``n``. The ground field is assumed to be ``K``. Examples ======== >>> from sympy.polys.distributedmodules import sdm_to_vector >>> from sympy.abc import x, y, z >>> from sympy.polys import QQ, lex >>> f = [((1, 0, 0, 1), QQ(2)), ((0, 2, 0, 0), QQ(1)), ((0, 0, 2, 0), QQ(1))] >>> sdm_to_vector(f, [x, y, z], QQ) [x**2 + y**2, 2*z] """ dic = sdm_to_dict(f) dics = {} for k, v in dic.items(): dics.setdefault(k[0], []).append((k[1:], v)) n = n or len(dics) res = [] for k in range(n): if k in dics: res.append(Poly(dict(dics[k]), gens=gens, domain=K).as_expr()) else: res.append(S.Zero) return res # Algorithms. def sdm_spoly(f, g, O, K, phantom=None): """ Compute the generalized s-polynomial of ``f`` and ``g``. The ground field is assumed to be ``K``, and monomials ordered according to ``O``. This is invalid if either of ``f`` or ``g`` is zero. If the leading terms of `f` and `g` involve different basis elements of `F`, their s-poly is defined to be zero. Otherwise it is a certain linear combination of `f` and `g` in which the leading terms cancel. See [SCA, defn 2.3.6] for details. If ``phantom`` is not ``None``, it should be a pair of module elements on which to perform the same operation(s) as on ``f`` and ``g``. The in this case both results are returned. Examples ======== >>> from sympy.polys.distributedmodules import sdm_spoly >>> from sympy.polys import QQ, lex >>> f = [((2, 1, 1), QQ(1)), ((1, 0, 1), QQ(1))] >>> g = [((2, 3, 0), QQ(1))] >>> h = [((1, 2, 3), QQ(1))] >>> sdm_spoly(f, h, lex, QQ) [] >>> sdm_spoly(f, g, lex, QQ) [((1, 2, 1), 1)] """ if not f or not g: return sdm_zero() LM1 = sdm_LM(f) LM2 = sdm_LM(g) if LM1[0] != LM2[0]: return sdm_zero() LM1 = LM1[1:] LM2 = LM2[1:] lcm = monomial_lcm(LM1, LM2) m1 = monomial_div(lcm, LM1) m2 = monomial_div(lcm, LM2) c = K.quo(-sdm_LC(f, K), sdm_LC(g, K)) r1 = sdm_add(sdm_mul_term(f, (m1, K.one), O, K), sdm_mul_term(g, (m2, c), O, K), O, K) if phantom is None: return r1 r2 = sdm_add(sdm_mul_term(phantom[0], (m1, K.one), O, K), sdm_mul_term(phantom[1], (m2, c), O, K), O, K) return r1, r2 def sdm_ecart(f): """ Compute the ecart of ``f``. This is defined to be the difference of the total degree of `f` and the total degree of the leading monomial of `f` [SCA, defn 2.3.7]. Invalid if f is zero. Examples ======== >>> from sympy.polys.distributedmodules import sdm_ecart >>> sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)]) 0 >>> sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)]) 3 """ return sdm_deg(f) - sdm_monomial_deg(sdm_LM(f)) def sdm_nf_mora(f, G, O, K, phantom=None): r""" Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``. The ground field is assumed to be ``K``, and monomials ordered according to ``O``. Weak normal forms are defined in [SCA, defn 2.3.3]. They are not unique. This function deterministically computes a weak normal form, depending on the order of `G`. The most important property of a weak normal form is the following: if `R` is the ring associated with the monomial ordering (if the ordering is global, we just have `R = K[x_1, \ldots, x_n]`, otherwise it is a certain localization thereof), `I` any ideal of `R` and `G` a standard basis for `I`, then for any `f \in R`, we have `f \in I` if and only if `NF(f | G) = 0`. This is the generalized Mora algorithm for computing weak normal forms with respect to arbitrary monomial orders [SCA, algorithm 2.3.9]. If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments on which to perform the same computations as on ``f``, ``G``, both results are then returned. """ from itertools import repeat h = f T = list(G) if phantom is not None: # "phantom" variables with suffix p hp = phantom[0] Tp = list(phantom[1]) phantom = True else: Tp = repeat([]) phantom = False while h: # TODO better data structure!!! Th = [(g, sdm_ecart(g), gp) for g, gp in zip(T, Tp) if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))] if not Th: break g, _, gp = min(Th, key=lambda x: x[1]) if sdm_ecart(g) > sdm_ecart(h): T.append(h) if phantom: Tp.append(hp) if phantom: h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp)) else: h = sdm_spoly(h, g, O, K) if phantom: return h, hp return h def sdm_nf_buchberger(f, G, O, K, phantom=None): r""" Compute a weak normal form of ``f`` with respect to ``G`` and order ``O``. The ground field is assumed to be ``K``, and monomials ordered according to ``O``. This is the standard Buchberger algorithm for computing weak normal forms with respect to *global* monomial orders [SCA, algorithm 1.6.10]. If ``phantom`` is not ``None``, it should be a pair of "phantom" arguments on which to perform the same computations as on ``f``, ``G``, both results are then returned. """ from itertools import repeat h = f T = list(G) if phantom is not None: # "phantom" variables with suffix p hp = phantom[0] Tp = list(phantom[1]) phantom = True else: Tp = repeat([]) phantom = False while h: try: g, gp = next((g, gp) for g, gp in zip(T, Tp) if sdm_monomial_divides(sdm_LM(g), sdm_LM(h))) except StopIteration: break if phantom: h, hp = sdm_spoly(h, g, O, K, phantom=(hp, gp)) else: h = sdm_spoly(h, g, O, K) if phantom: return h, hp return h def sdm_nf_buchberger_reduced(f, G, O, K): r""" Compute a reduced normal form of ``f`` with respect to ``G`` and order ``O``. The ground field is assumed to be ``K``, and monomials ordered according to ``O``. In contrast to weak normal forms, reduced normal forms *are* unique, but their computation is more expensive. This is the standard Buchberger algorithm for computing reduced normal forms with respect to *global* monomial orders [SCA, algorithm 1.6.11]. The ``pantom`` option is not supported, so this normal form cannot be used as a normal form for the "extended" groebner algorithm. """ h = sdm_zero() g = f while g: g = sdm_nf_buchberger(g, G, O, K) if g: h = sdm_add(h, [sdm_LT(g)], O, K) g = g[1:] return h def sdm_groebner(G, NF, O, K, extended=False): """ Compute a minimal standard basis of ``G`` with respect to order ``O``. The algorithm uses a normal form ``NF``, for example ``sdm_nf_mora``. The ground field is assumed to be ``K``, and monomials ordered according to ``O``. Let `N` denote the submodule generated by elements of `G`. A standard basis for `N` is a subset `S` of `N`, such that `in(S) = in(N)`, where for any subset `X` of `F`, `in(X)` denotes the submodule generated by the initial forms of elements of `X`. [SCA, defn 2.3.2] A standard basis is called minimal if no subset of it is a standard basis. One may show that standard bases are always generating sets. Minimal standard bases are not unique. This algorithm computes a deterministic result, depending on the particular order of `G`. If ``extended=True``, also compute the transition matrix from the initial generators to the groebner basis. That is, return a list of coefficient vectors, expressing the elements of the groebner basis in terms of the elements of ``G``. This functions implements the "sugar" strategy, see Giovini et al: "One sugar cube, please" OR Selection strategies in Buchberger algorithm. """ # The critical pair set. # A critical pair is stored as (i, j, s, t) where (i, j) defines the pair # (by indexing S), s is the sugar of the pair, and t is the lcm of their # leading monomials. P = [] # The eventual standard basis. S = [] Sugars = [] def Ssugar(i, j): """Compute the sugar of the S-poly corresponding to (i, j).""" LMi = sdm_LM(S[i]) LMj = sdm_LM(S[j]) return max(Sugars[i] - sdm_monomial_deg(LMi), Sugars[j] - sdm_monomial_deg(LMj)) \ + sdm_monomial_deg(sdm_monomial_lcm(LMi, LMj)) ourkey = lambda p: (p[2], O(p[3]), p[1]) def update(f, sugar, P): """Add f with sugar ``sugar`` to S, update P.""" if not f: return P k = len(S) S.append(f) Sugars.append(sugar) LMf = sdm_LM(f) def removethis(pair): i, j, s, t = pair if LMf[0] != t[0]: return False tik = sdm_monomial_lcm(LMf, sdm_LM(S[i])) tjk = sdm_monomial_lcm(LMf, sdm_LM(S[j])) return tik != t and tjk != t and sdm_monomial_divides(tik, t) and \ sdm_monomial_divides(tjk, t) # apply the chain criterion P = [p for p in P if not removethis(p)] # new-pair set N = [(i, k, Ssugar(i, k), sdm_monomial_lcm(LMf, sdm_LM(S[i]))) for i in range(k) if LMf[0] == sdm_LM(S[i])[0]] # TODO apply the product criterion? N.sort(key=ourkey) remove = set() for i, p in enumerate(N): for j in range(i + 1, len(N)): if sdm_monomial_divides(p[3], N[j][3]): remove.add(j) # TODO mergesort? P.extend(reversed([p for i, p in enumerate(N) if not i in remove])) P.sort(key=ourkey, reverse=True) # NOTE reverse-sort, because we want to pop from the end return P # Figure out the number of generators in the ground ring. try: # NOTE: we look for the first non-zero vector, take its first monomial # the number of generators in the ring is one less than the length # (since the zeroth entry is for the module generators) numgens = len(next(x[0] for x in G if x)[0]) - 1 except StopIteration: # No non-zero elements in G ... if extended: return [], [] return [] # This list will store expressions of the elements of S in terms of the # initial generators coefficients = [] # First add all the elements of G to S for i, f in enumerate(G): P = update(f, sdm_deg(f), P) if extended and f: coefficients.append(sdm_from_dict({(i,) + (0,)*numgens: K(1)}, O)) # Now carry out the buchberger algorithm. while P: i, j, s, t = P.pop() f, g = S[i], S[j] if extended: sp, coeff = sdm_spoly(f, g, O, K, phantom=(coefficients[i], coefficients[j])) h, hcoeff = NF(sp, S, O, K, phantom=(coeff, coefficients)) if h: coefficients.append(hcoeff) else: h = NF(sdm_spoly(f, g, O, K), S, O, K) P = update(h, Ssugar(i, j), P) # Finally interreduce the standard basis. # (TODO again, better data structures) S = set((tuple(f), i) for i, f in enumerate(S)) for (a, ai), (b, bi) in permutations(S, 2): A = sdm_LM(a) B = sdm_LM(b) if sdm_monomial_divides(A, B) and (b, bi) in S and (a, ai) in S: S.remove((b, bi)) L = sorted(((list(f), i) for f, i in S), key=lambda p: O(sdm_LM(p[0])), reverse=True) res = [x[0] for x in L] if extended: return res, [coefficients[i] for _, i in L] return res
{ "content_hash": "e5eb6f1a3518d3761a2ad1910c727f47", "timestamp": "", "source": "github", "line_count": 740, "max_line_length": 82, "avg_line_length": 29.56756756756757, "alnum_prop": 0.5593692870201097, "repo_name": "kaushik94/sympy", "id": "249bf57b83f51f6600f23cd4b108ef3eafff5e63", "size": "21880", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "sympy/polys/distributedmodules.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "5094" }, { "name": "Python", "bytes": "13553568" }, { "name": "Ruby", "bytes": "304" }, { "name": "Scheme", "bytes": "125" }, { "name": "Shell", "bytes": "4008" }, { "name": "TeX", "bytes": "32356" }, { "name": "XSLT", "bytes": "366202" } ], "symlink_target": "" }
import os import platform import re import subprocess import sys from distutils.core import Command from distutils.command.build import build from distutils.ccompiler import new_compiler # Don't force people to install setuptools unless # we have to. try: from setuptools import setup except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup DEBUG = False VERSION = "0.3.0" # Hijack the build process by inserting specialized commands into # the list of build sub commands build.sub_commands = [ ("build_cmongo", None), ("build_cmonary", None) ] + build.sub_commands # Platform specific stuff if platform.system() == 'Windows': compiler_kw = {'compiler' : 'mingw32'} linker_kw = {'libraries' : ['ws2_32']} so_target = 'libcmonary.dll' else: compiler_kw = {} linker_kw = {'libraries' : []} so_target = 'libcmonary.so' compiler = new_compiler(**compiler_kw) MONARY_DIR = "monary" CMONGO_SRC = "mongodb-mongo-c-driver-1.0.0" CFLAGS = ["-fPIC", "-O2"] if not DEBUG: CFLAGS.append("-DNDEBUG") class BuildException(Exception): """Indicates an error occurred while compiling from source.""" pass # I suspect I could be using the build_clib command for this, but don't know how. class BuildCMongoDriver(Command): """Custom command to build the C Mongo driver. Relies on autotools.""" description = "builds the C Mongo driver" user_options = [ ] def initialize_options(self): pass def finalize_options(self): pass def run(self): try: os.chdir(CMONGO_SRC) env = os.environ.copy() env.setdefault('CFLAGS', '') env['CFLAGS'] += ' -fPIC' status = subprocess.call(["./configure", "--enable-static", "--without-documentation", "--disable-maintainer-mode", "--disable-tests", "--disable-examples"], env=env) if status != 0: raise BuildException("configure script failed with exit status %d" % status) status = subprocess.call(["make"]) if status != 0: raise BuildException("make failed with exit status %d" % status) # after configuring, add libs to linker_kw with open(os.path.join("src", "libmongoc-1.0.pc")) as f: libs = re.search(r"Libs:\s+(.+)$", f.read(), flags=re.MULTILINE).group(1) libs = [l[2:] for l in re.split(r"\s+", libs)[:-1] if l.startswith("-l")] linker_kw["libraries"] += libs finally: os.chdir("..") class BuildCMonary(Command): """Custom command to build the cmonary library, static linking to the cmongo drivers, a producing a .so library that can be loaded via ctypes. """ description = "builds the cmonary library (for ctypes)" user_options = [ ] def initialize_options(self): pass def finalize_options(self): pass def run(self): compiler.compile([os.path.join(MONARY_DIR, "cmonary.c")], extra_preargs=CFLAGS, include_dirs=[os.path.join(CMONGO_SRC, "src", "mongoc"), os.path.join(CMONGO_SRC, "src", "libbson", "src", "bson")]) compiler.link_shared_lib([os.path.join(MONARY_DIR, "cmonary.o"), os.path.join(CMONGO_SRC, ".libs", "libmongoc-1.0.a"), os.path.join(CMONGO_SRC, "src", "libbson", ".libs", "libbson-1.0.a")], "cmonary", "monary", **linker_kw) # Get README info try: with open("README.rst") as fd: readme_content = fd.read() except: readme_content = "" setup( name = "Monary", version = VERSION, packages = ["monary"], requires = ["pymongo", "numpy"], package_dir = {"monary": "monary"}, package_data = {"monary": [so_target]}, author = "David J. C. Beach", author_email = "info@djcinnovations.com", description = "Monary performs high-performance column queries from MongoDB.", long_description = readme_content, keywords = "monary pymongo mongo mongodb numpy array", classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: Unix", "Operating System :: Microsoft :: Windows", "Environment :: Console", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Database" ], url = "http://bitbucket.org/djcbeach/monary/", cmdclass = { 'build_cmongo': BuildCMongoDriver, 'build_cmonary': BuildCMonary, } )
{ "content_hash": "b0e50196c3f3976d1b0ecefd41522189", "timestamp": "", "source": "github", "line_count": 145, "max_line_length": 104, "avg_line_length": 35.172413793103445, "alnum_prop": 0.5841176470588235, "repo_name": "ksuarz/monary", "id": "d5ca14c34038b30da3214386908f39f06dfbc6a7", "size": "5230", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1740646" }, { "name": "C++", "bytes": "43021" }, { "name": "CSS", "bytes": "53518" }, { "name": "JavaScript", "bytes": "129200" }, { "name": "Perl", "bytes": "7570" }, { "name": "Python", "bytes": "79040" }, { "name": "Shell", "bytes": "671166" } ], "symlink_target": "" }
"""Test cltk.corpus.""" from unicodedata import normalize import os import unittest from unittest.mock import patch import nltk from cltk.corpus.greek.alphabet import expand_iota_subscript from cltk.corpus.greek.alphabet import filter_non_greek from cltk.corpus.greek.beta_to_unicode import Replacer from cltk.corpus.greek.tlg.parse_tlg_indices import get_female_authors from cltk.corpus.greek.tlg.parse_tlg_indices import get_epithet_index from cltk.corpus.greek.tlg.parse_tlg_indices import get_epithets from cltk.corpus.greek.tlg.parse_tlg_indices import select_authors_by_epithet from cltk.corpus.greek.tlg.parse_tlg_indices import get_epithet_of_author from cltk.corpus.greek.tlg.parse_tlg_indices import get_geo_index from cltk.corpus.greek.tlg.parse_tlg_indices import get_geographies from cltk.corpus.greek.tlg.parse_tlg_indices import select_authors_by_geo from cltk.corpus.greek.tlg.parse_tlg_indices import get_geo_of_author from cltk.corpus.greek.tlg.parse_tlg_indices import get_lists from cltk.corpus.greek.tlg.parse_tlg_indices import get_id_author from cltk.corpus.greek.tlg.parse_tlg_indices import select_id_by_name from cltk.corpus.greek.tlg.parse_tlg_indices import get_works_by_id from cltk.corpus.greek.tlg.parse_tlg_indices import check_id from cltk.corpus.greek.tlg.parse_tlg_indices import get_date_author from cltk.corpus.greek.tlg.parse_tlg_indices import get_dates from cltk.corpus.greek.tlg.parse_tlg_indices import get_date_of_author from cltk.corpus.greek.tlg.parse_tlg_indices import _get_epoch from cltk.corpus.greek.tlg.parse_tlg_indices import _check_number from cltk.corpus.greek.tlg.parse_tlg_indices import _handle_splits from cltk.corpus.greek.tlgu import TLGU from cltk.corpus.middle_english.alphabet import normalize_middle_english from cltk.corpus.old_norse import runes from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths from cltk.corpus.utils.formatter import assemble_phi5_works_filepaths from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths from cltk.corpus.utils.formatter import assemble_tlg_works_filepaths from cltk.corpus.utils.formatter import phi5_plaintext_cleanup from cltk.corpus.utils.formatter import remove_non_ascii from cltk.corpus.utils.formatter import remove_non_latin from cltk.corpus.utils.formatter import tonos_oxia_converter from cltk.corpus.utils.formatter import tlg_plaintext_cleanup from cltk.corpus.utils.formatter import cltk_normalize from cltk.corpus.utils.importer import CorpusImporter from cltk.corpus.utils.importer import CorpusImportError from cltk.corpus.sanskrit.itrans.itrans_transliterator import * from cltk.corpus.sanskrit.itrans.unicode_transliterate import * from cltk.corpus.sanskrit.itrans.langinfo import * from cltk.corpus.sanskrit.itrans.sinhala_transliterator import \ SinhalaDevanagariTransliterator as sdt from cltk.corpus.punjabi.numerifier import punToEnglish_number from cltk.corpus.punjabi.numerifier import englishToPun_number from cltk.corpus.egyptian.transliterate_mdc import mdc_unicode from cltk.corpus.utils.formatter import normalize_fr from cltk.corpus.swadesh import Swadesh from cltk.corpus.readers import assemble_corpus, get_corpus_reader from cltk.corpus.latin.latin_library_corpus_types import corpus_texts_by_type, \ corpus_directories_by_type from cltk.utils.matrix_corpus_fun import distinct_words __license__ = 'MIT License. See LICENSE.' DISTRIBUTED_CORPUS_PATH_REL = get_cltk_data_dir() + '/test_distributed_corpora.yaml' DISTRIBUTED_CORPUS_PATH = os.path.expanduser(DISTRIBUTED_CORPUS_PATH_REL) class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904 """Class for unittest""" @classmethod def setUpClass(self): try: corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_text_latin_library') corpus_importer.import_corpus('latin_text_perseus') corpus_importer = CorpusImporter('greek') corpus_importer.import_corpus('greek_text_perseus') corpus_importer.import_corpus('greek_text_tesserae') nltk.download('punkt') nltk.download('averaged_perceptron_tagger') except: raise Exception('Failure to download test corpus') def test_greek_betacode_to_unicode(self): """Test converting Beta Code to Unicode. Note: assertEqual appears to not be correctly comparing certain characters (``ά`` and ``ί``, at least). """ replacer = Replacer() # Generic test beta_1 = r"""O(/PWS OU)=N MH\ TAU)TO\ """ unicode_1 = replacer.beta_code(beta_1) target_1 = 'ὅπως οὖν μὴ ταὐτὸ ' # Test for iota and diaeresis self.assertEqual(unicode_1, target_1) beta_2 = r"""*XALDAI+KH\N""" unicode_2 = replacer.beta_code(beta_2) target_2 = 'Χαλδαϊκὴν' self.assertEqual(unicode_2, target_2) # Test for upsilon and diaeresis beta_3 = r"""PROU+POTETAGME/NWN""" unicode_3 = replacer.beta_code(beta_3) target_3 = 'προϋποτεταγμένων' self.assertEqual(unicode_3, target_3) # Test for lowercase beta_4 = r"""proi+sxome/nwn""" unicode_4 = replacer.beta_code(beta_4) target_4 = 'προϊσχομένων' self.assertEqual(unicode_4, target_4) def test_tlgu_init(self): """Test constructors of TLGU module for check, import, and install.""" tlgu = TLGU(testing=True) self.assertTrue(tlgu) def test_import_greek_software_tlgu(self): """Test cloning TLGU.""" corpus_importer = CorpusImporter('greek') corpus_importer.import_corpus('greek_software_tlgu') file_rel = os.path.join(get_cltk_data_dir() + '/greek/software/greek_software_tlgu/README.md') _file = os.path.expanduser(file_rel) file_exists = os.path.isfile(_file) self.assertTrue(file_exists) def test_tlgu_convert(self): """Test TLGU convert. This reads the file ``tlgu_test_text_beta_code.txt``, which mimics a TLG file, and converts it. Note: assertEqual fails on some accented characters ('ή', 'ί'). """ in_test = os.path.abspath('cltk/tests/test_nlp/tlgu_test_text_beta_code.txt') out_test = os.path.normpath(get_cltk_data_dir() + '/tlgu_test_text_unicode.txt') tlgu = TLGU(testing=True) tlgu.convert(in_test, out_test) with open(out_test) as out_file: new_text = out_file.read() os.remove(out_test) target = """ βλλον δ' ἀλλλους χαλκρεσιν ἐγχεῃσιν. """ self.assertEqual(new_text, target) def test_tlgu_convert_fail(self): """Test the TLGU to fail when importing a corpus that doesn't exist.""" tlgu = TLGU(testing=True) with self.assertRaises(AssertionError): tlgu.convert('~/Downloads/corpora/TLG_E/bad_path.txt', '~/Documents/thucydides.txt') def test_tlgu_convert_corpus_fail(self): """Test the TLGU to fail when trying to convert an unsupported corpus.""" tlgu = TLGU(testing=True) with self.assertRaises(AssertionError): tlgu.convert_corpus(corpus='bad_corpus') def test_tlg_plaintext_cleanup(self): """Test post-TLGU cleanup of text of Greek TLG text.""" dirty = """{ΑΘΗΝΑΙΟΥ ΝΑΥΚΡΑΤΙΤΟΥ ΔΕΙΠΝΟΣΟΦΙΣΤΩΝ} LATIN Ἀθήναιος (μὲν) ὁ τῆς 999 βίβλου πατήρ: ποιεῖται δὲ τὸν λόγον πρὸς Τιμοκράτην.""" # pylint: disable=line-too-long clean = tlg_plaintext_cleanup(dirty, rm_punctuation=True, rm_periods=False) target = ' Ἀθήναιος ὁ τῆς βίβλου πατήρ ποιεῖται δὲ τὸν λόγον πρὸς Τιμοκράτην.' self.assertEqual(clean, target) def test_tlg_plaintext_cleanup_rm_periods(self): """Test post-TLGU cleanup of text of Greek TLG text.""" dirty = """{ΑΘΗΝΑΙΟΥ ΝΑΥΚΡΑΤΙΤΟΥ ΔΕΙΠΝΟΣΟΦΙΣΤΩΝ} LATIN Ἀθήναιος (μὲν) ὁ τῆς 999 βίβλου πατήρ: ποιεῖται δὲ τὸν λόγον πρὸς Τιμοκράτην.""" # pylint: disable=line-too-long clean = tlg_plaintext_cleanup(dirty, rm_punctuation=True, rm_periods=True) target = ' Ἀθήναιος ὁ τῆς βίβλου πατήρ ποιεῖται δὲ τὸν λόγον πρὸς Τιμοκράτην' self.assertEqual(clean, target) def test_phi5_plaintext_cleanup(self): """Test post-TLGU cleanup of text of Latin PHI5 text.""" dirty = """ {ODYSSIA} {Liber I} Virum áge 999 mihi, Camena, (insece) versutum. Pater noster, Saturni filie . . . Mea puera, quid verbi ex tuo ore supera fugit? argenteo polubro, aureo eclutro. """ clean = phi5_plaintext_cleanup(dirty, rm_punctuation=True, rm_periods=False) target = ' Virum áge mihi Camena versutum. Pater noster Saturni filie . . . Mea puera quid verbi ex tuo ore supera fugit argenteo polubro aureo eclutro. ' # pylint: disable=line-too-long self.assertEqual(clean, target) def test_phi5_plaintext_cleanup_rm_periods(self): """Test post-TLGU cleanup of text of Latin PHI5 text.""" dirty = """ {ODYSSIA} {Liber I} Virum áge 999 mihi, Camena, (insece) versutum. Pater noster, Saturni filie . . . Mea puera, quid verbi ex tuo ore supera fugit? argenteo polubro, aureo eclutro. """ clean = phi5_plaintext_cleanup(dirty, rm_punctuation=True, rm_periods=True) target = ' Virum áge mihi Camena versutum Pater noster Saturni filie Mea puera quid verbi ex tuo ore supera fugit argenteo polubro aureo eclutro ' # pylint: disable=line-too-long self.assertEqual(clean, target) def test_phi5_plaintext_cleanup_rm_periods_bytes(self): """Test post-TLGU cleanup of text of Latin PHI5 text.""" dirty = '\xcc\x81 Virum áge 999 mihi.' clean = phi5_plaintext_cleanup(dirty, rm_punctuation=True, rm_periods=True) target = '́ Virum áge mihi' self.assertEqual(clean, target) def test_cltk_normalize_compatible(self): """Test Normalizing Text with compatibility True""" s1 = 'café' s2 = 'cafe\u0301' normalized_text = cltk_normalize(s1, compatibility=True) target = normalize('NFKC', s2) self.assertEqual(normalized_text, target) def test_cltk_normalize_noncompatible(self): """Test Normalizing Text with compatibility False""" s1 = 'café' s2 = 'cafe\u0301' normalized_text = cltk_normalize(s1, compatibility=False) target = normalize('NFC', s2) self.assertEqual(normalized_text, target) def test_assemble_tlg_author(self): """Test building absolute filepaths from TLG index.""" paths = assemble_tlg_author_filepaths() self.assertEqual(len(paths), 1823) def test_assemble_phi5_author(self): """Test building absolute filepaths from TLG index.""" paths = assemble_phi5_author_filepaths() self.assertEqual(len(paths), 362) def test_assemble_tlg_works(self): """"Test building absolute filepaths from TLG works index.""" paths = assemble_tlg_works_filepaths() self.assertEqual(len(paths), 6625) def test_assemble_phi5_works(self): """"Test building absolute filepaths from PHI5 works index.""" paths = assemble_phi5_works_filepaths() self.assertEqual(len(paths), 836) def test_corpora_import_list_greek(self): """Test listing of available corpora.""" corpus_importer = CorpusImporter('greek') available_corpora = corpus_importer.list_corpora self.assertTrue(available_corpora) def test_corpora_import_list_latin(self): """Test listing of available corpora.""" corpus_importer = CorpusImporter('latin') available_corpora = corpus_importer.list_corpora self.assertTrue(available_corpora) def test_tonos_oxia_converter(self): """Test function converting tonos to oxia accent.""" char_tonos = "ά" # with tonos, for Modern Greek char_oxia = "ά" # with oxia, for Ancient Greek corrected = tonos_oxia_converter(char_tonos) self.assertEqual(char_oxia, corrected) def test_tonos_oxia_converter_reverse(self): """Test function converting tonos to oxia accent.""" char_tonos = "ά" # with tonos, for Modern Greek char_oxia = "ά" # with oxia, for Ancient Greek corrected = tonos_oxia_converter(char_oxia, reverse=True) self.assertEqual(char_tonos, corrected) def test_remove_non_ascii(self): """Test removing all non-ascii characters from a string.""" non_ascii_str = 'Ascii and some non-ascii: θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν' # pylint: disable=line-too-long ascii_str = remove_non_ascii(non_ascii_str) valid = 'Ascii and some non-ascii: ' self.assertEqual(ascii_str, valid) def test_remove_non_latin(self): """Test removing all non-Latin characters from a string.""" latin_str = '(1) Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.' # pylint: disable=line-too-long non_latin_str = remove_non_latin(latin_str) valid = ' Dices pulchrum esse inimicos ulcisci' self.assertEqual(non_latin_str, valid) def test_remove_non_latin_opt(self): """Test removing all non-Latin characters from a string, with `also_keep` parameter. """ latin_str = '(1) Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.' # pylint: disable=line-too-long non_latin_str = remove_non_latin(latin_str, also_keep=['.', ',']) valid = ' Dices pulchrum esse inimicos ulcisci.' self.assertEqual(non_latin_str, valid) def test_latin_library_reader_missing_corpus(self): """ Needs to precede (for now) the next two tests which load the corpus Provided by Patrick Burns """ corpus_importer = CorpusImporter('latin') # corpus_importer.import_corpus('latin_text_latin_library') corpus_importer.import_corpus('latin_models_cltk') def _import(): with patch('builtins.input', return_value='n'): from cltk.corpus.readers import latinlibrary self.assertRaises(OSError, _import) def test_import_lat_text_lat_lib(self): """Test cloning the Latin Library text corpus.""" corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_text_latin_library') file_rel = os.path.join(get_cltk_data_dir() + '/latin/text/latin_text_latin_library/README.md') _file = os.path.expanduser(file_rel) file_exists = os.path.isfile(_file) self.assertTrue(file_exists) def test_import_latin_library_corpus_reader(self): """Test the Latin Library corpus reader.""" corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_text_latin_library') reader = get_corpus_reader(language='latin', corpus_name='latin_text_latin_library') ALL_FILE_IDS = list(reader.fileids()) self.assertTrue(len(ALL_FILE_IDS) > 2100) def test_json_corpus_reader(self): """Test filtered corpus sents method.""" reader = get_corpus_reader(language='latin', corpus_name='latin_text_perseus') # this has simple sections reader._fileids = ['cicero__on-behalf-of-aulus-caecina__latin.json'] self.assertTrue(len(list(reader.paras())) >= 1) self.assertTrue(len(list(reader.sents())) > 400) self.assertTrue(len(list(reader.words())) > 12000) reader = get_corpus_reader(language='latin', corpus_name='latin_text_perseus') # this example has subsections reader._fileids = ['ausonius-decimus-magnus__eclogarum-liber__latin.json'] self.assertTrue(len(list(reader.docs())) == 1) self.assertTrue(len(list(reader.paras())) >= 1) self.assertTrue(len(list(reader.sents())) > 50) self.assertTrue(len(list(reader.words())) > 2750) # reader = get_corpus_reader(corpus_name='greek_text_perseus', language='greek') # reader._fileids = ['plato__apology__grc.json'] # self.assertTrue(len(list(reader.docs())) == 1) # self.assertTrue(len(list(reader.paras())) > 1) # self.assertTrue(len(list(reader.sents())) > 260) # self.assertTrue(len(list(reader.words())) > 9800) def test_tesserae_corpus_reader(self): """Test Tesserae corpus methods.""" # Update when corpus is add to CLTK reader = get_corpus_reader(language='greek', corpus_name='greek_text_tesserae') sample = reader.fileids()[0] self.assertTrue(len(list(reader.docs(sample))) >= 1) self.assertTrue(len(list(reader.texts(sample))) >= 1) self.assertTrue(len(list(reader.paras(sample))) >= 1) self.assertTrue(len(list(reader.sents(sample))) >= 1) self.assertTrue(len(list(reader.words(sample))) >= 1) self.assertTrue(len(list(reader.lines(sample))) >= 1) self.assertTrue(reader.describe()) self.assertTrue(len(list(reader.pos_tokenize(sample))) >= 1) def test_json_corpus_reader_sizes(self): """Test filtered corpus sizes method.""" reader = get_corpus_reader(language='latin', corpus_name='latin_text_perseus') self.assertTrue(len(list(reader.sizes())) > 290) def test_import_latin_models_cltk(self): """Test cloning the CLTK Latin models.""" corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_models_cltk') file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md') _file = os.path.expanduser(file_rel) file_exists = os.path.isfile(_file) self.assertTrue(file_exists) def test_import_greek_models_cltk(self): """Test pull (not clone) the CLTK Greek models. Import was run in ``setUp()``. """ corpus_importer = CorpusImporter('greek') corpus_importer.import_corpus('greek_models_cltk') file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md') _file = os.path.expanduser(file_rel) file_exists = os.path.isfile(_file) self.assertTrue(file_exists) def test_show_corpora_bad_lang(self): """Test failure of importer upon selecting unsupported language.""" with self.assertRaises(CorpusImportError): CorpusImporter('bad_lang') def test_import_nonexistant_corpus(self): """Test that creating a CorpusImporter for a non existent lang fails smoothly """ with self.assertRaises(CorpusImportError): corpus_importer = CorpusImporter('greek') corpus_importer.import_corpus('euclids_book_of_recipes') def test_import_latin_text_antique_digiliblt(self): """Test cloning the Antique Latin from digilibLT.""" corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_text_antique_digiliblt') file_rel = os.path.join(get_cltk_data_dir() + '/latin/text/latin_text_antique_digiliblt/README.md') _file = os.path.expanduser(file_rel) file_exists = os.path.isfile(_file) self.assertTrue(file_exists) def test_get_female_authors(self): """Test function to parse TLG female authors list.""" authors = get_female_authors() authors = sorted(authors)[:3] self.assertEqual(authors, ['0009', '0051', '0054']) def test_get_epithet_index(self): """Test get_epithet_index().""" ind = get_epithet_index() self.assertEqual(type(ind), dict) def test_get_epithets(self): """Test get_epithets().""" epithets = get_epithets() self.assertEqual(epithets[:2], ['Alchemistae', 'Apologetici']) def test_select_authors_by_epithet(self): """Test select_authors_by_epithet().""" authors = select_authors_by_epithet('Apologetici') self.assertEqual(len(authors), 9) def test_get_epithet_of_author(self): """Test get_epithet_of_author().""" epithet = get_epithet_of_author('0016') self.assertEqual(epithet, 'Historici/-ae') def test_get_geo_index(self): """Test get_geo_index().""" index = get_geo_index() self.assertEqual(type(index), dict) def test_get_geographies(self): """Test get_geographies().""" geos = get_geographies() self.assertEqual(type(geos), list) def test_select_authors_by_geo(self): """Test select_authors_by_geo().""" authors = select_authors_by_geo('Athenae') self.assertEqual(len(authors), 113) def test_get_geo_of_author(self): """Test get_geo_of_author().""" geo = get_geo_of_author('0008') self.assertEqual(geo, 'Naucratis') def test_get_lists(self): """Test get_lists().""" index = get_lists() self.assertEqual(type(index), dict) def test_get_id_author(self): """Test get_id_author().""" self.assertEqual(type(get_id_author()), dict) def test_select_id_by_name(self): """Test select_id_by_name().""" matches = select_id_by_name('hom') self.assertEqual(len(matches), 11) def test_get_works_by_id(self): """Test get_works_by_id().""" works = get_works_by_id("0007") self.assertEqual(len(works), 147) def test_check_id(self): """Test check_id""" author = check_id("0557") valid = "Epictetus Phil." self.assertEqual(author, valid) # #! Figure out why this test stopped working (actual function runs fine) # def test_get_date_author(self): # """Test get_date_author().""" # dates = get_date_author() # self.assertEqual(type(dates), dict) # #! Figure out why this test stopped working (actual function runs fine) # def test_get_dates(self): # """Test get_dates().""" # dates = get_dates() # self.assertEqual(type(dates), list) # self.assertEqual(len(dates), 183) # #! Figure out why this test stopped working (actual function runs fine) # def test_get_date_of_author(self): # """Test get_date_of_author().""" # self.assertEqual(get_date_of_author('1747'), '1 B.C./A.D. 1') # self.assertEqual(get_date_of_author('1143'), '2-1 B.C.') # self.assertEqual(get_date_of_author('0295'), 'Varia') # self.assertEqual(get_date_of_author('4304'), 'a. A.D. 10') # self.assertIsNone(get_date_of_author('123456')) def test_get_epoch(self): """Test _get_epoch().""" self.assertEqual(_get_epoch('A.D. 9-10'), 'ad') self.assertEqual(_get_epoch('p. A.D. 2'), 'ad') self.assertIsNone(_get_epoch('a. A.D. 2')) self.assertEqual(_get_epoch('3 B.C.'), 'bc') self.assertIsNone(_get_epoch('p. 7 B.C.')) self.assertEqual(_get_epoch('a. 1 B.C.'), 'bc') self.assertEqual(_get_epoch('a. 1 B.C.?'), 'bc') def test_check_number(self): """Test _check_number().""" self.assertTrue(_check_number('5')) self.assertTrue(_check_number('5?')) self.assertFalse(_check_number('A.D. 5')) self.assertFalse(_check_number('A.D. 5?')) self.assertFalse(_check_number('p. 4 B.C.')) def test_handle_splits(self): """Test _handle_splits().""" _dict = {'start_raw': 'A.D. 9', 'start_epoch': 'ad', \ 'stop_epoch': 'ad', 'stop_raw': 'A.D. 10'} self.assertEqual(_handle_splits('A.D. 9-10'), _dict) _dict = {'start_raw': 'A.D. 1?', 'start_epoch': 'ad', \ 'stop_epoch': 'ad', 'stop_raw': 'A.D. 6'} self.assertEqual(_handle_splits('A.D. 1?-6'), _dict) _dict = {'stop_raw': 'p. A.D. 2', 'start_raw': 'a. 4 B.C.', \ 'stop_epoch': 'ad', 'start_epoch': 'bc'} self.assertEqual(_handle_splits('a. 4 B.C.-p. A.D. 2'), _dict) _dict = {'stop_raw': 'A.D. 2?', 'start_raw': 'A.D. 2?', \ 'stop_epoch': 'ad', 'start_epoch': 'ad'} self.assertEqual(_handle_splits('A.D. 2?'), _dict) _dict = {'stop_raw': '1 B.C.?', 'start_raw': '2 B.C.?', \ 'stop_epoch': 'bc', 'start_epoch': 'bc'} self.assertEqual(_handle_splits('2/1 B.C.?'), _dict) def test_punjabi_to_english_number_conversion(self): str_test = '੧੨੩੪੫੬੭੮੯੦' self.assertEqual(1234567890, punToEnglish_number(str_test)) def test_englishToPun_number(self): str_test = '੧੨੩੪੫੬੭੮੯੦' self.assertEqual(str_test, englishToPun_number(1234567890)) def test_english_to_punjabi_number_conversion(self): """Test English to Punjabi number conversion.""" str_test = '੧੨੩੪੫੬੭੮੯੦' self.assertEqual(str_test, englishToPun_number(1234567890)) def make_distributed_corpora_testing_file(self): """Setup for some cloning tests, make file at get_cltk_data_dir() + '/test_distributed_corpora.yaml'. """ # ! Don't format this literal string, must be YAML-ish yaml_str_to_write = """example_distributed_latin_corpus: git_remote: git@github.com:kylepjohnson/latin_corpus_newton_example.git language: latin type: text example_distributed_fake_language_corpus: origin: git@github.com:kylepjohnson/doesntexistyet.git language: fake_language type: treebank """ cltk_data_dir = get_cltk_data_dir() if not os.path.isdir(cltk_data_dir): os.mkdir(cltk_data_dir) with open(DISTRIBUTED_CORPUS_PATH, 'w') as file_open: file_open.write(yaml_str_to_write) def remove_distributed_corpora_testing_file(self): """Remove ~/cltk_data/test_distributed_corpora.yaml.""" os.remove(DISTRIBUTED_CORPUS_PATH) def test_corpus_importer_variables_no_user_but_in_core(self): """Test function which checks for presence of ~/cltk_data/distributed_corpora.yaml. Look for a language not in core repos but not in user-defined. """ self.make_distributed_corpora_testing_file() corpus_importer = CorpusImporter('sanskrit', testing=True) self.assertIn('sanskrit_models_cltk', corpus_importer.list_corpora) self.remove_distributed_corpora_testing_file() def test_corpus_importer_variables_user_but_not_core(self): """Test function which checks for presence of `~/cltk_data/distributed_corpora.yaml`. Look for a language not in the core but in the user's custom file. """ self.make_distributed_corpora_testing_file() corpus_importer = CorpusImporter('fake_language', testing=True) corpus_name = corpus_importer.list_corpora target_name = 'example_distributed_fake_language_corpus' self.assertEqual(corpus_name[0], target_name) self.remove_distributed_corpora_testing_file() def test_corpus_importer_variables_no_user_but_yes_core(self): """Test function which checks for presence of `~/cltk_data/distributed_corpora.yaml`. Look for a language in the core but not in the user's custom file. """ self.make_distributed_corpora_testing_file() corpus_importer = CorpusImporter('pali', testing=True) corpora = corpus_importer.list_corpora self.assertIn('pali_text_ptr_tipitaka', corpora) self.remove_distributed_corpora_testing_file() def test_corpus_importer_variables_no_user_no_core(self): """Test function which checks for presence of `~/cltk_data/distributed_corpora.yaml`. Look for a language neither in the core or in the user's custom file. """ self.make_distributed_corpora_testing_file() with self.assertRaises(CorpusImportError): CorpusImporter('fake_language_nowhere') self.remove_distributed_corpora_testing_file() # # def test_import_punjabi_punjabi_text_gurban(self): # pun_import = CorpusImporter('punjabi') # corpora_list = pun_import.list_corpora # self.assertTrue('punjabi_text_gurban' in corpora_list) # pun_import.import_corpus('punjabi_text_gurban') # file_path = os.path.join(get_cltk_data_dir() + '/punjabi/text/punjabi_text_gurban/README.md') # _file = os.path.expanduser(file_path) # self.assertTrue(os.path.isfile(_file)) # # Ancient Egyptian Stuff ----------------------------- def test_egyptian_transliterate_mdc_to_unicode_q_kopf_True(self): """ test to transliterate mdc to unicode for ancient egyptian texts. q_kopf option True """ # mdc_string = """ink Smsw Sms nb=f bAk n ipt nswt irt pat wrt <Hswt> Hmt [nswt] snwsrt m Xnm-swt sAt nswt imn-m-HAt m qA-nfrw nfrw nbt imAx""" # test_result_string = mdc_unicode(mdc_string) # comparison_string = """i҆nk šmsw šms nb⸗f bꜣk n i҆pt nswt i҆rt pꜥt wrt 〈ḥswt〉 ḥmt [nswt] snwsrt m ẖnm-swt sꜣt nswt i҆mn-m-ḥꜣt m qꜣ-nfrw nfrw nbt i҆mꜣḫ""" # self.assertEqual(test_result_string, comparison_string) def test_egyptian_transliterate_mdc_to_unicode_q_kopf_False(self): """ test to transliterate mdc to unicode for ancient egyptian texts. q_kopf option False """ # mdc_string = """ink Smsw Sms nb=f bAk n ipt nswt irt pat wrt <Hswt> Hmt [nswt] snwsrt m Xnm-swt sAt nswt imn-m-HAt m qA-nfrw nfrw nbt imAx""" # test_result_string = mdc_unicode(mdc_string, q_kopf=False) # comparison_string = """i҆nk šmsw šms nb⸗f bꜣk n i҆pt nswt i҆rt pꜥt wrt 〈ḥswt〉 ḥmt [nswt] snwsrt m ẖnm-swt sꜣt nswt i҆mn-m-ḥꜣt m ḳꜣ-nfrw nfrw nbt i҆mꜣḫ""" # self.assertEqual(test_result_string, comparison_string) def test_expand_iota_subscript(self): """Test subscript expander.""" unexpanded = 'εἰ δὲ καὶ τῷ ἡγεμόνι πιστεύσομεν ὃν ἂν Κῦρος διδῷ' expanded = expand_iota_subscript(unexpanded) target = 'εἰ δὲ καὶ τῶΙ ἡγεμόνι πιστεύσομεν ὃν ἂν Κῦρος διδῶΙ' self.assertEqual(expanded, target) def test_expand_iota_subscript_lower(self): """Test subscript expander.""" unexpanded = 'εἰ δὲ καὶ τῷ ἡγεμόνι πιστεύσομεν ὃν ἂν Κῦρος διδῷ' expanded = expand_iota_subscript(unexpanded, lowercase=True) target = 'εἰ δὲ καὶ τῶι ἡγεμόνι πιστεύσομεν ὃν ἂν κῦρος διδῶι' self.assertEqual(expanded, target) # def test_filter_non_greek(self): """ Test filter non greek characters in a mixed string. """ test_input_string = "[Ἑκα]τόμανδ[ρος Αἰσχ]ρίωνος ⋮ Ἀρ[ιστείδη..c5..]" # PH247029, line 2 comparison_string = "Ἑκατμανδρος Αἰσχρωνος Ἀριστεδη" test_result_string = filter_non_greek(test_input_string) # self.assertEqual(test_result_string, comparison_string) def test_normalize(self): """ Test french normalizer """ text = "viw" normalized = normalize_fr(text) target = ['vieux'] self.assertEqual(normalized, target) def test_normalize_middle_english(self): """Tests ME normalizer""" in_test = "'Madame,' quod he, 'reule me As ȝ,e ly:k?eþ best.'" target = "'madame' quod he 'reule me as ye lyketh best'" test = normalize_middle_english(in_test) self.assertEqual(target, test) class TestFilteredCorpus(unittest.TestCase): """Test the Latin Library corpus reader filter""" @classmethod def setUpClass(self): try: corpus_importer = CorpusImporter('latin') corpus_importer.import_corpus('latin_models_cltk') corpus_importer.import_corpus('latin_text_latin_library') except: raise Exception('Failure to download test corpus') self.reader = get_corpus_reader(language='latin', corpus_name='latin_text_latin_library') self.reader._fileids = ['pervig.txt'] # Need a additional instance because tests below change internals #TO-DO Fix self.reader_2 = get_corpus_reader(language='latin', corpus_name='latin_text_latin_library') self.reader_3 = get_corpus_reader(language='latin', corpus_name='latin_text_latin_library') self.reader_4 = get_corpus_reader(language='latin', corpus_name='latin_text_latin_library') def test_import_latin_library_corpus_filter_by_file(self): """Test the Latin Library corpus reader filter by files.""" filtered_reader = assemble_corpus(self.reader_2, types_requested=['old'], type_files=corpus_texts_by_type) self.assertTrue(len(list(filtered_reader.fileids())) > 0) def test_import_latin_library_corpus_filter_by_dir(self): """Test the Latin Library corpus reader filter by directories.""" filtered_reader = assemble_corpus(self.reader_3, types_requested=['old'], type_dirs=corpus_directories_by_type) self.assertTrue(len(list(filtered_reader.fileids())) > 0) def test_import_latin_library_corpus_filter_by_file_and_dir(self): """Test the Latin Library corpus reader filter by directories.""" filtered_reader = assemble_corpus(self.reader_4, types_requested=['old'], type_dirs=corpus_directories_by_type, type_files=corpus_texts_by_type) self.assertTrue(len(list(filtered_reader.fileids())) > 0) def test_filtered_corpus_reader_sents(self): """Test filtered corpus sents method.""" sents = self.reader.sents() uniq_words = distinct_words(sents) # Curious—why the original test checked for two different words? if 'Library' in uniq_words: self.fail('Filtered word present!') # You can check for uniq_words because it implies that sents had content self.assertTrue(uniq_words) def test_filtered_corpus_reader_paras(self): """Test filtered corpus paras method.""" paras = self.reader.paras() sents = [sent for para in paras for sent in para] uniq_words = distinct_words(sents) if 'Library' in uniq_words: self.fail('Filtered word present!') self.assertTrue(uniq_words) def test_filtered_corpus_reader_words(self): """Test filtered corpus words method.""" words = self.reader.words() uniq_words = distinct_words(words) if 'Library' in uniq_words: self.fail('Filtered word present!') self.assertTrue(uniq_words) def test_filtered_corpus_reader_docs(self): """Test filtered corpus docs method.""" docs = list(self.reader.docs()) uniq_words = distinct_words(docs) if 'Library' in uniq_words: self.fail('Filtered word present!') self.assertTrue(len(docs) > 0) problem_files = ['caesar/bc3.txt', 'hymni.txt', 'varro.frag.txt', 'varro.ll10.txt', 'varro.ll5.txt', 'varro.ll6.txt', 'varro.ll7.txt', 'varro.ll8.txt', 'varro.ll9.txt'] for filename in problem_files: doc = list(self.reader.docs([filename])) assert(doc) assert(len(doc[0]) > 100) def test_filtered_corpus_reader_sizes(self): """Test filtered corpus sizes method.""" self.assertTrue(len(list(self.reader.sizes())) > 0) class TestUnicode(unittest.TestCase): "Test py23char" def test_py23char(self): self.assertEqual(py23char(0x92D), 'भ') self.assertFalse(py23char(0x93D) == 'भ') class TestTransliteration(unittest.TestCase): "Test the transliteration in corpus.sanskrit" def test_Indicization(self): # Test ItransTransliterator - Convert from Itrans to Devanagari x = ItransTransliterator.from_itrans('pitL^In', 'hi') y = ItransTransliterator.from_itrans('yogazcittavRttinirodhaH', 'hi') z = ItransTransliterator.from_itrans('yogazcittavRttinirodhaH', 'badVar') self.assertEqual(x, 'पितॣन्') self.assertEqual(y, 'योगश्चित्तव्ऱ्त्तिनिरोधः') self.assertEqual(z, 'yogazcittavRttinirodhaH') def test_ScriptConversion( self): # Test UnicodeIndicTransliterator - Convert between various scripts x = UnicodeIndicTransliterator.transliterate('राजस्थान', "hi", "pa") self.assertEqual(x, 'ਰਾਜਸ੍ਥਾਨ') y = UnicodeIndicTransliterator.transliterate('සිංහල අක්ෂර මාලාව', "si", "hi") self.assertEqual(y, 'सिंहल अक्षर मालाव') z = UnicodeIndicTransliterator.transliterate('सिंहल अक्षर मालाव', "hi", "si") self.assertEqual(z, 'සිංහල අක්ෂර මාලාව') t = UnicodeIndicTransliterator.transliterate('தமிழ் அரிச்சுவடி', 'ta', 'hi') self.assertEqual(t, 'तमिऴ् अरिच्चुवटि') h = UnicodeIndicTransliterator.transliterate('तमिऴ् अरिच्चुवटि', 'hi', 'ta') self.assertEqual(h, 'தமிழ் அரிச்சுவடி') def test_Romanization(self): x = ItransTransliterator.to_itrans('राजस्थान', 'hi') self.assertTrue(x == 'rAjasthAna' or x == 'raajasthaana') x = ItransTransliterator.to_itrans('राजस्थान', 'asdasd') self.assertEqual(x, 'राजस्थान') ml = ItransTransliterator.to_itrans('മല', 'ml') self.assertEqual(ml, 'mala') def test_SinhalaDevanagariTransliterator(self): sin = sdt.devanagari_to_sinhala('राजस्थान') self.assertEqual(sin, 'රාජස්ථාන') dev = sdt.sinhala_to_devanagari('රාජස්ථාන') self.assertEqual(dev, 'राजस्थान') class TestScriptInformation(unittest.TestCase): def test_IsVowel(self): self.assertFalse(is_vowel('क', 'hi')) self.assertTrue(is_vowel('अ', 'hi')) def test_IsConsonant(self): self.assertTrue(is_consonant('क', 'hi')) self.assertFalse(is_consonant('अ', 'hi')) def test_IsVelar(self): self.assertTrue(is_velar('क', 'hi')) self.assertFalse(is_velar('अ', 'hi')) def test_IsPalatal(self): self.assertTrue(is_palatal('च', 'hi')) self.assertFalse(is_palatal('त', 'hi')) def test_IsAspirated(self): self.assertTrue(is_aspirated('छ', 'hi')) self.assertFalse(is_aspirated('क', 'hi')) def test_IsUnvoiced(self): self.assertTrue(is_unvoiced('ट', 'hi')) self.assertFalse(is_unvoiced('ग', 'hi')) def test_IsNasal(self): self.assertTrue(is_nasal('ण', 'hi')) self.assertFalse(is_nasal('ड', 'hi')) def test_IsVowelSign(self): self.assertTrue(is_vowel_sign('ा', 'hi')) def test_IsNukta(self): self.assertTrue(is_nukta('़', 'hi')) def test_IsAum(self): self.assertTrue(is_aum('ॐ', 'hi')) def test_IsHalanta(self): self.assertTrue(is_halanta('्', 'hi')) def test_IsRetroflex(self): self.assertTrue(is_retroflex('ट', 'hi')) def test_IsDental(self): self.assertTrue(is_dental('त', 'hi')) def test_IsLabial(self): self.assertTrue(is_labial('प', 'hi')) def test_IsVoiced(self): self.assertTrue(is_voiced('ग', 'hi')) def test_IsUnAspirated(self): self.assertTrue(is_unaspirated('ज', 'hi')) def test_IsFricative(self): self.assertTrue(is_fricative('श', 'hi')) def test_IsApproximant(self): self.assertTrue(is_approximant('य', 'hi')) def test_IsNumber(self): self.assertTrue(is_number('२', 'hi')) def test_offset_to_char(self): self.assertEqual(offset_to_char(0x021, 'hi'), 'ड') def test_in_coordinated_range(self): self.assertTrue(in_coordinated_range(0x6e)) def test_is_indiclang_char(self): self.assertTrue(is_indiclang_char('क', 'hi')) def test_swadesh_greek(self): swadesh = Swadesh('gr') first_word = 'ἐγώ' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_latin(self): swadesh = Swadesh('la') first_word = 'ego' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_tocharianB(self): swadesh = Swadesh('txb') first_word = 'ñäś' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_old_portuguese(self): swadesh = Swadesh('pt_old') first_word = 'eu' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_sanskrit(self): swadesh = Swadesh('sa') first_word = 'अहम्' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_hindi(self): swadesh = Swadesh('hi') first_word = 'मैं' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_old_english(self): swadesh = Swadesh('eng_old') first_word = 'ic, iċċ, ih' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_old_norse(self): swadesh = Swadesh('old_norse') first_word = 'ek' match = swadesh.words()[0] self.assertEqual(first_word, match) def test_swadesh_arabic(self): swadesh = Swadesh('ar') first_word = "أنا" match = swadesh.words()[0] self.assertEqual(first_word, match) class TestRunes(unittest.TestCase): def test_rune_alphabet_name(self): self.assertEqual(runes.RunicAlphabetName.elder_futhark.value, "elder_futhark") def test_rune_definition(self): haglaz = runes.Rune(runes.RunicAlphabetName.elder_futhark, "\u16BA", "h", "h", "haglaz") self.assertEqual(haglaz.form, "ᚺ") def test_runic_transcription_definition(self): inscription = "ᚦᛁᛅᚴᚾ᛫ᛅᚢᚴ᛫ᚴᚢᚾᛅᚱ᛫ᚱᛅᛁᛋᛏᚢ᛫ᛋᛏᛅᛁᚾᛅ ᛅᚠᛏᛁᛦ᛫ᚢᛅᚱ᛫ᛒᚱᚢᚦᚢᚱ᛫ᛋᛁᚾ" transcription = runes.Transcriber.transcribe(inscription, runes.YOUNGER_FUTHARK) self.assertEqual(transcription, "þiakn᛫auk᛫kunar᛫raistu᛫staina᛫aftiR᛫uar᛫bruþur᛫sin") if __name__ == '__main__': unittest.main()
{ "content_hash": "8e38b40618ea89cbc1af17dd16a5f7bf", "timestamp": "", "source": "github", "line_count": 982, "max_line_length": 195, "avg_line_length": 43.32892057026476, "alnum_prop": 0.6335049002326729, "repo_name": "TylerKirby/cltk", "id": "a9147fe8d93c72fbf62b122545f479ad20e146e0", "size": "44094", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cltk/tests/test_corpus/test_corpus.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "4825" }, { "name": "Python", "bytes": "2987044" } ], "symlink_target": "" }
from django.core.exceptions import ImproperlyConfigured from django.contrib.gis.db.models.fields import GeometryField as django_GeometryField from django.contrib.gis.geos import Polygon from rest_framework.serializers import ModelSerializer, ListSerializer, LIST_SERIALIZER_KWARGS from rest_framework.utils.field_mapping import ClassLookupDict try: from collections import OrderedDict # python 2.6 except ImportError: from ordereddict import OrderedDict from .fields import GeometryField, GeometrySerializerMethodField # map drf-gis GeometryField to GeoDjango Geometry Field try: _geo_field_mapping = ModelSerializer._field_mapping.mapping except AttributeError: _geo_field_mapping = ModelSerializer.serializer_field_mapping _geo_field_mapping.update({ django_GeometryField: GeometryField }) class GeoModelSerializer(ModelSerializer): """ A subclass of DFR ModelSerializer that adds support for GeoDjango fields to be serialized as GeoJSON compatible data """ _field_mapping = ClassLookupDict(_geo_field_mapping) class GeoFeatureModelListSerializer(ListSerializer): @property def data(self): return super(ListSerializer, self).data def to_representation(self, data): """ Add GeoJSON compatible formatting to a serialized queryset list """ return OrderedDict(( ("type", "FeatureCollection"), ("features", super(GeoFeatureModelListSerializer, self).to_representation(data)) )) class GeoFeatureModelSerializer(GeoModelSerializer): """ A subclass of GeoModelSerializer that outputs geojson-ready data as features and feature collections """ @classmethod def many_init(cls, *args, **kwargs): child_serializer = cls(*args, **kwargs) list_kwargs = {'child': child_serializer} list_kwargs.update(dict([ (key, value) for key, value in kwargs.items() if key in LIST_SERIALIZER_KWARGS ])) meta = getattr(cls, 'Meta', None) list_serializer_class = getattr(meta, 'list_serializer_class', GeoFeatureModelListSerializer) return list_serializer_class(*args, **list_kwargs) def __init__(self, *args, **kwargs): super(GeoFeatureModelSerializer, self).__init__(*args, **kwargs) self.Meta.id_field = getattr(self.Meta, 'id_field', self.Meta.model._meta.pk.name) if self.Meta.geo_field is None: raise ImproperlyConfigured("You must define a 'geo_field'.") def check_excludes(field_name, field_role): """make sure the field is not excluded""" if hasattr(self.Meta, 'exclude'): if field_name in self.Meta.exclude: raise ImproperlyConfigured("You cannot exclude your '{0}'.".format(field_role)) def add_to_fields(field_name): """Make sure the field is included in the fields""" if hasattr(self.Meta, 'fields'): if field_name not in self.Meta.fields: if type(self.Meta.fields) is tuple: additional_fields = (field_name,) else: additional_fields = [field_name] self.Meta.fields += additional_fields check_excludes(self.Meta.geo_field, 'geo_field') add_to_fields(self.Meta.geo_field) self.Meta.bbox_geo_field = getattr(self.Meta, 'bbox_geo_field', None) if self.Meta.bbox_geo_field: check_excludes(self.Meta.bbox_geo_field, 'bbox_geo_field') add_to_fields(self.Meta.bbox_geo_field) self.Meta.auto_bbox = getattr(self.Meta, 'auto_bbox', False) if self.Meta.bbox_geo_field and self.Meta.auto_bbox: raise ImproperlyConfigured( "You must eiher define a 'bbox_geo_field' or 'auto_bbox', but you can not set both" ) def to_representation(self, instance): """ Serialize objects -> primitives. """ ret = OrderedDict() fields = [field for field in self.fields.values() if not field.write_only] # geo structure if self.Meta.id_field is not False: ret["id"] = "" ret["type"] = "Feature" ret["geometry"] = {} ret["properties"] = OrderedDict() if self.Meta.bbox_geo_field or self.Meta.auto_bbox: ret["bbox"] = None for field in fields: field_name = field.field_name if field.read_only and instance is None: continue value = field.get_attribute(instance) value_repr = None if value: if field_name == self.Meta.bbox_geo_field: # check for GEOSGeometry specfifc properties to generate the extent # of the geometry. if hasattr(value, 'extent'): value_repr = value.extent else: value_repr = field.to_representation(value) if self.Meta.id_field is not False and field_name == self.Meta.id_field: ret["id"] = value_repr elif field_name == self.Meta.geo_field: ret["geometry"] = value_repr if self.Meta.auto_bbox and value: ret['bbox'] = value.extent elif field_name == self.Meta.bbox_geo_field: ret["bbox"] = value_repr elif not getattr(field, 'write_only', False): ret["properties"][field_name] = value_repr return ret def to_internal_value(self, data): """ Override the parent method to first remove the GeoJSON formatting """ def make_unformated_data(feature): _dict = feature["properties"] if 'geometry' in feature: geom = {self.Meta.geo_field: feature["geometry"]} _dict.update(geom) if self.Meta.bbox_geo_field and 'bbox' in feature: # build a polygon from the bbox _dict.update({self.Meta.bbox_geo_field: Polygon.from_bbox(feature['bbox'])}) return _dict if 'features' in data: _unformatted_data = [] features = data['features'] for feature in features: _unformatted_data.append(make_unformated_data(feature)) elif 'properties' in data: _unformatted_data = make_unformated_data(data) else: _unformatted_data = data return super(GeoFeatureModelSerializer, self).to_internal_value(_unformatted_data)
{ "content_hash": "679dd10bcd8dc9d88682493c2b085af9", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 101, "avg_line_length": 38.40462427745665, "alnum_prop": 0.6055087296809151, "repo_name": "pglotov/django-rest-framework-gis", "id": "9e463c2130f9f43b0efe4dd0f2841840c43c74b8", "size": "6644", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rest_framework_gis/serializers.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "74207" } ], "symlink_target": "" }
import psycopg2 import time from catdb.postgres import Postgres from boto.s3.connection import S3Connection class Redshift(Postgres): def __init__(self, params): super(Redshift, self).__init__(params) def export_to_file(self, filename, table=None, schema=None, delimiter='|', null_value='\\N'): """TODO: Support explicit export to S3 :param filename: :param table: :param schema: :param delimiter: :param null_value: :return: """ aws_config = self._params['aws'] key = aws_config['access_key_id'] secret = aws_config['secret_access_key'] bucket_name = aws_config['temp_bucket'] prefix = aws_config['temp_prefix'] conn = S3Connection(key, secret) bucket = conn.get_bucket(bucket_name) temp_file_prefix = 'catdb_{ts}'.format(ts=int(time.time() * 1000000)) s3_path_prefix = 's3://{bucket}/{prefix}/{file}'.format( bucket=bucket_name, prefix=prefix, file=temp_file_prefix ) s3_file = temp_file_prefix + '000' with psycopg2.connect(**self._get_connect_params()) as conn: with conn.cursor() as cursor: cursor.execute( """ UNLOAD ('SELECT * FROM {schema}.{table}') TO '{filename}' CREDENTIALS 'aws_access_key_id={aws_key};aws_secret_access_key={aws_secret}' PARALLEL OFF """.format(schema=schema, table=table, filename=s3_path_prefix, aws_key=key, aws_secret=secret)) key = bucket.get_key('{prefix}/{file}'.format(prefix=prefix, file=s3_file)) key.get_contents_to_filename(filename) key.delete()
{ "content_hash": "2116bcd0c3ee34deaf209fcdf7c58294", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 116, "avg_line_length": 36.326530612244895, "alnum_prop": 0.5657303370786517, "repo_name": "chimpler/catdb", "id": "34ec53c27601d50e506dbff7c56326cdeb15a50c", "size": "1780", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "catdb/redshift.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "24090" } ], "symlink_target": "" }
""" Revision ID: 0234_ft_billing_postage Revises: 0233_updated_first_class_dates Create Date: 2018-09-28 14:43:26.100884 """ import sqlalchemy as sa from alembic import op revision = "0234_ft_billing_postage" down_revision = "0233_updated_first_class_dates" def upgrade(): op.add_column("ft_billing", sa.Column("postage", sa.String(), nullable=True)) op.execute("UPDATE ft_billing SET postage = (CASE WHEN notification_type = 'letter' THEN 'second' ELSE 'none' END)") def downgrade(): op.drop_column("ft_billing", "postage")
{ "content_hash": "e743cea65e346c71367724883d57540f", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 120, "avg_line_length": 25.904761904761905, "alnum_prop": 0.7132352941176471, "repo_name": "alphagov/notifications-api", "id": "447c39a0463a6f4754608e712aa7a52f029a84f3", "size": "544", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "migrations/versions/0234_ft_billing_postage.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "719" }, { "name": "Jinja", "bytes": "5543" }, { "name": "Makefile", "bytes": "6627" }, { "name": "Mako", "bytes": "361" }, { "name": "Procfile", "bytes": "35" }, { "name": "Python", "bytes": "3506225" }, { "name": "Shell", "bytes": "13179" } ], "symlink_target": "" }
from CIM15.IEC61970.LoadModel.SeasonDayTypeSchedule import SeasonDayTypeSchedule class ConformLoadSchedule(SeasonDayTypeSchedule): """A curve of load versus time (X-axis) showing the active power values (Y1-axis) and reactive power (Y2-axis) for each unit of the period covered. This curve represents a typical pattern of load over the time period for a given day type and season.A curve of load versus time (X-axis) showing the active power values (Y1-axis) and reactive power (Y2-axis) for each unit of the period covered. This curve represents a typical pattern of load over the time period for a given day type and season. """ def __init__(self, ConformLoadGroup=None, *args, **kw_args): """Initialises a new 'ConformLoadSchedule' instance. @param ConformLoadGroup: The ConformLoadGroup where the ConformLoadSchedule belongs. """ self._ConformLoadGroup = None self.ConformLoadGroup = ConformLoadGroup super(ConformLoadSchedule, self).__init__(*args, **kw_args) _attrs = [] _attr_types = {} _defaults = {} _enums = {} _refs = ["ConformLoadGroup"] _many_refs = [] def getConformLoadGroup(self): """The ConformLoadGroup where the ConformLoadSchedule belongs. """ return self._ConformLoadGroup def setConformLoadGroup(self, value): if self._ConformLoadGroup is not None: filtered = [x for x in self.ConformLoadGroup.ConformLoadSchedules if x != self] self._ConformLoadGroup._ConformLoadSchedules = filtered self._ConformLoadGroup = value if self._ConformLoadGroup is not None: if self not in self._ConformLoadGroup._ConformLoadSchedules: self._ConformLoadGroup._ConformLoadSchedules.append(self) ConformLoadGroup = property(getConformLoadGroup, setConformLoadGroup)
{ "content_hash": "537a28460763169e3ff54b0911c02585", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 501, "avg_line_length": 46.85, "alnum_prop": 0.7017075773745998, "repo_name": "rwl/PyCIM", "id": "e9773f039c45c24b300c6069ff849b46a2604b7a", "size": "2974", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "CIM15/IEC61970/LoadModel/ConformLoadSchedule.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7420564" } ], "symlink_target": "" }
from lxml import etree import webob from nova.api.openstack.compute.contrib import quota_classes from nova.api.openstack import wsgi from nova import test from nova.tests.api.openstack import fakes def quota_set(class_name): return {'quota_class_set': {'id': class_name, 'metadata_items': 128, 'volumes': 10, 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'instances': 10, 'injected_files': 5, 'cores': 20, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, 'injected_file_path_bytes': 255}} class QuotaClassSetsTest(test.TestCase): def setUp(self): super(QuotaClassSetsTest, self).setUp() self.controller = quota_classes.QuotaClassSetsController() def test_format_quota_set(self): raw_quota_set = { 'instances': 10, 'cores': 20, 'ram': 51200, 'volumes': 10, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'gigabytes': 1000, 'injected_files': 5, 'injected_file_path_bytes': 255, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, } quota_set = self.controller._format_quota_set('test_class', raw_quota_set) qs = quota_set['quota_class_set'] self.assertEqual(qs['id'], 'test_class') self.assertEqual(qs['instances'], 10) self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['volumes'], 10) self.assertEqual(qs['gigabytes'], 1000) self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['fixed_ips'], -1) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['key_pairs'], 100) def test_quotas_show_as_admin(self): req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) res_dict = self.controller.show(req, 'test_class') self.assertEqual(res_dict, quota_set('test_class')) def test_quotas_show_as_unauthorized_user(self): req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class') self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, req, 'test_class') def test_quotas_update_as_admin(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, 'gigabytes': 1000, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, }} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) res_dict = self.controller.update(req, 'test_class', body) self.assertEqual(res_dict, body) def test_quotas_update_as_user(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, 'gigabytes': 1000, 'floating_ips': 10, 'metadata_items': 128, 'injected_files': 5, 'fixed_ips': -1, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, }} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 'test_class', body) class QuotaTemplateXMLSerializerTest(test.TestCase): def setUp(self): super(QuotaTemplateXMLSerializerTest, self).setUp() self.serializer = quota_classes.QuotaClassTemplate() self.deserializer = wsgi.XMLDeserializer() def test_serializer(self): exemplar = dict(quota_class_set=dict( id='test_class', metadata_items=10, injected_file_path_bytes=255, injected_file_content_bytes=20, volumes=30, gigabytes=40, ram=50, floating_ips=60, fixed_ips=-1, instances=70, injected_files=80, security_groups=10, security_group_rules=20, key_pairs=100, cores=90)) text = self.serializer.serialize(exemplar) print text tree = etree.fromstring(text) self.assertEqual('quota_class_set', tree.tag) self.assertEqual('test_class', tree.get('id')) self.assertEqual(len(exemplar['quota_class_set']) - 1, len(tree)) for child in tree: self.assertTrue(child.tag in exemplar['quota_class_set']) self.assertEqual(int(child.text), exemplar['quota_class_set'][child.tag]) def test_deserializer(self): exemplar = dict(quota_class_set=dict( metadata_items='10', injected_file_content_bytes='20', volumes='30', gigabytes='40', ram='50', floating_ips='60', fixed_ips='-1', instances='70', injected_files='80', security_groups='10', security_group_rules='20', key_pairs='100', cores='90')) intext = ("<?xml version='1.0' encoding='UTF-8'?>\n" '<quota_class_set>' '<metadata_items>10</metadata_items>' '<injected_file_content_bytes>20' '</injected_file_content_bytes>' '<volumes>30</volumes>' '<gigabytes>40</gigabytes>' '<ram>50</ram>' '<floating_ips>60</floating_ips>' '<fixed_ips>-1</fixed_ips>' '<instances>70</instances>' '<injected_files>80</injected_files>' '<cores>90</cores>' '<security_groups>10</security_groups>' '<security_group_rules>20</security_group_rules>' '<key_pairs>100</key_pairs>' '</quota_class_set>') result = self.deserializer.deserialize(intext)['body'] self.assertEqual(result, exemplar)
{ "content_hash": "724cec3ce031bbe8ed8efdb11cc174aa", "timestamp": "", "source": "github", "line_count": 186, "max_line_length": 79, "avg_line_length": 40.62903225806452, "alnum_prop": 0.5028450443297605, "repo_name": "paulmathews/nova", "id": "f0ade4f90c726e4d6dd27095457fda2f0aee4f1b", "size": "8187", "binary": false, "copies": "1", "ref": "refs/heads/stable/folsom", "path": "nova/tests/api/openstack/compute/contrib/test_quota_classes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "16002" }, { "name": "JavaScript", "bytes": "7403" }, { "name": "Python", "bytes": "7293434" }, { "name": "Shell", "bytes": "16910" } ], "symlink_target": "" }
""" The FilterScheduler is for creating instances locally. You can customize this scheduler by specifying your own Host Filters and Weighing Functions. """ import random from oslo_config import cfg from oslo_log import log as logging from six.moves import range from nova import exception from nova.i18n import _ from nova import objects from nova import rpc from nova.scheduler import driver from nova.scheduler import scheduler_options CONF = cfg.CONF LOG = logging.getLogger(__name__) filter_scheduler_opts = [ cfg.IntOpt('scheduler_host_subset_size', default=1, help='New instances will be scheduled on a host chosen ' 'randomly from a subset of the N best hosts. This ' 'property defines the subset size that a host is ' 'chosen from. A value of 1 chooses the ' 'first host returned by the weighing functions. ' 'This value must be at least 1. Any value less than 1 ' 'will be ignored, and 1 will be used instead') ] CONF.register_opts(filter_scheduler_opts) class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.options = scheduler_options.SchedulerOptions() self.notifier = rpc.get_notifier('scheduler') def select_destinations(self, context, request_spec, filter_properties): """Selects a filtered set of hosts and nodes.""" self.notifier.info(context, 'scheduler.select_destinations.start', dict(request_spec=request_spec)) num_instances = request_spec['num_instances'] selected_hosts = self._schedule(context, request_spec, filter_properties) # Couldn't fulfill the request_spec if len(selected_hosts) < num_instances: # NOTE(Rui Chen): If multiple creates failed, set the updated time # of selected HostState to None so that these HostStates are # refreshed according to database in next schedule, and release # the resource consumed by instance in the process of selecting # host. for host in selected_hosts: host.obj.updated = None # Log the details but don't put those into the reason since # we don't want to give away too much information about our # actual environment. LOG.debug('There are %(hosts)d hosts available but ' '%(num_instances)d instances requested to build.', {'hosts': len(selected_hosts), 'num_instances': num_instances}) reason = _('There are not enough hosts available.') raise exception.NoValidHost(reason=reason) dests = [dict(host=host.obj.host, nodename=host.obj.nodename, limits=host.obj.limits) for host in selected_hosts] self.notifier.info(context, 'scheduler.select_destinations.end', dict(request_spec=request_spec)) return dests def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def _schedule(self, context, request_spec, filter_properties, more=True): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() instance_properties = request_spec['instance_properties'] # NOTE(danms): Instance here is still a dict, which is converted from # an object. The pci_requests are a dict as well. Convert this when # we get an object all the way to this path. # TODO(sbauza): Will be fixed later by the RequestSpec object pci_requests = instance_properties.get('pci_requests') if pci_requests: pci_requests = ( objects.InstancePCIRequests.from_request_spec_instance_props( pci_requests)) instance_properties['pci_requests'] = pci_requests instance_type = request_spec.get("instance_type", None) update_group_hosts = filter_properties.get('group_updated', False) config_options = self._get_configuration_options() filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'instance_type': instance_type}) # Find our local list of acceptable hosts by repeatedly # filtering and weighing our options. Each time we choose a # host, we virtually consume resources on it so subsequent # selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. This can bite you if the hosts # are being scanned in a filter or weighing function. hosts = self._get_all_host_states(elevated) max_loops = 2 selected_hosts = [] num_instances = request_spec.get('num_instances', 1) loop_count = 0 if more: max_loops = 2 * num_instances while loop_count < max_loops: loop_count += 1 for num in range(num_instances): # Filter local hosts based on requirements ... print "calling get_filtered_hosts with hosts = %s" % hosts filtered_hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties, index=num) print "filtered_hosts = %s" % filtered_hosts if not filtered_hosts: if more: hosts = self._get_all_host_states(elevated, more_hosts=1) continue # Can't get any more locally. break hosts = filtered_hosts print "filtered_hosts = %s" % filtered_hosts LOG.debug("Filtered %(hosts)s", {'hosts': hosts}) weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts}) scheduler_host_subset_size = CONF.scheduler_host_subset_size if scheduler_host_subset_size > len(weighed_hosts): scheduler_host_subset_size = len(weighed_hosts) if scheduler_host_subset_size < 1: scheduler_host_subset_size = 1 chosen_host = random.choice( weighed_hosts[0:scheduler_host_subset_size]) LOG.debug("Selected host: %(host)s", {'host': chosen_host}) selected_hosts.append(chosen_host) num_instances -= 1 # Now consume the resources so the filter/weights # will change for the next instance. chosen_host.obj.consume_from_instance(instance_properties) if update_group_hosts is True: # NOTE(sbauza): Group details are serialized into a list now # that they are populated by the conductor, we need to # deserialize them if isinstance(filter_properties['group_hosts'], list): filter_properties['group_hosts'] = set( filter_properties['group_hosts']) filter_properties['group_hosts'].add(chosen_host.obj.host) # FIXME Add code to ask balancer if all selected_hosts are free to be used by OpenStack return selected_hosts def _get_all_host_states(self, context, more_hosts=0): """Template method, so a subclass can implement caching.""" return self.host_manager.get_all_host_states(context, more_hosts=more_hosts)
{ "content_hash": "4b6cc43c5922d69dc700889e963e11e3", "timestamp": "", "source": "github", "line_count": 188, "max_line_length": 95, "avg_line_length": 43.41489361702128, "alnum_prop": 0.5891938250428816, "repo_name": "Francis-Liu/animated-broccoli", "id": "4f833cd07108fad99f3f9d3e44a962f3c2edad46", "size": "8802", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova/scheduler/filter_scheduler.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "16961288" }, { "name": "Shell", "bytes": "20716" }, { "name": "Smarty", "bytes": "351433" } ], "symlink_target": "" }
""" Django settings for test_project project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '+ln!mmswhbemdn@*v8sbic_n+i&j4+ct8(n=y09s81c)7fyyf2' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( ) MIDDLEWARE_CLASSES = ( ) ROOT_URLCONF = 'test_app.urls' WSGI_APPLICATION = 'test_project.wsgi.application' # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/'
{ "content_hash": "dd33c0a36997d32a171ab8f69cce95cb", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 71, "avg_line_length": 20.442857142857143, "alnum_prop": 0.7148846960167715, "repo_name": "ionelmc/django-uwsgi-cache", "id": "d86cc74e7ac1ed2392c31d4c416ae200c64a9969", "size": "1431", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_project/vanilla.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "15553" } ], "symlink_target": "" }
import os import os.path import re import codecs from setuptools import setup, find_packages NAME = "versioned" DESCRIPTION = "Versioned data format with CRDT capabilities " CLASSIFIERS = """ Programming Language :: Python """.strip().split("\n") AUTHOR = "Eugene Chernyshov" EMAIL = "chernyshov.eugene@gmail.com" URL = "https://github.com/Evgenus/versioned-data" KEYWORDS = "" here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, NAME, '__init__.py'), 'r') as fp: try: VERSION = re.findall(r"^__version__ = '([^']+)'$", fp.read(), re.M)[0] except IndexError: raise RuntimeError('Unable to determine version.') README = open(os.path.join(here, 'README.md')).read() CHANGES = open(os.path.join(here, 'CHANGES.md')).read() LICENSE = open(os.path.join(here, 'LICENSE')).read() TODO = open(os.path.join(here, 'TODO')).read() requires = open(os.path.join(here, 'requirements.txt')).readlines() setup(name=NAME, version=VERSION, description=DESCRIPTION, long_description=README + '\n\n' + CHANGES + '\n\n' + TODO, classifiers=CLASSIFIERS, author=AUTHOR, author_email=EMAIL, url=URL, license=LICENSE, keywords=KEYWORDS, packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=requires, tests_require=requires, test_suite=NAME+".tests", )
{ "content_hash": "d1056d24da7a1cf69d03c3ea9af294c2", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 78, "avg_line_length": 28.46, "alnum_prop": 0.6535488404778637, "repo_name": "Evgenus/versioned-data", "id": "873e12b5c19171f7f9c0cc99ec1796c6a2599132", "size": "1423", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "572" }, { "name": "Python", "bytes": "7439" } ], "symlink_target": "" }
import os APPLICATION_MODE_TEST = "test" APPLICATION_MODE_DEVELOPMENT = "development" APPLICATION_MODE_PRODUCTION = "production" APPLICATION_MODE = None def application_mode(): global APPLICATION_MODE if APPLICATION_MODE is None: server_software = os.environ.get("SERVER_SOFTWARE") if server_software is None: APPLICATION_MODE = APPLICATION_MODE_PRODUCTION elif server_software.startswith("Development"): APPLICATION_MODE = APPLICATION_MODE_DEVELOPMENT else: APPLICATION_MODE = APPLICATION_MODE_PRODUCTION print "mode: ", APPLICATION_MODE return APPLICATION_MODE
{ "content_hash": "1be3e9eb16b29e5b292084f9c6ba27bb", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 59, "avg_line_length": 26.16, "alnum_prop": 0.6957186544342507, "repo_name": "augment00/a00_registry", "id": "064bd229b1f9e1b548776058d2771d8e7e69639d", "size": "654", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/utilities/mode.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1155" }, { "name": "HTML", "bytes": "24099" }, { "name": "JavaScript", "bytes": "627" }, { "name": "Python", "bytes": "46112" } ], "symlink_target": "" }
""" Simple command line interface (CLI) for working with Redmine. Relies heavily on the python-redmine package (http://python-redmine.readthedocs.org/). """ import os,sys import logging import argparse from wikitools import DESRedmine from wikitools import __version__ description = __doc__ parser = argparse.ArgumentParser(description=description) parser.add_argument('url',help="URL of the page to edit.") parser.add_argument('input',nargs='+',help="Input file(s), pattern(s), or text.") parser.add_argument('-f','--force',action='store_true', help="Force execution.") parser.add_argument('-s','--section',default='redmine-des', help="") parser.add_argument('-v','--verbose',action='store_true', help="Output verbosity (via logging).") parser.add_argument('--version',action='version',version='%(prog)s '+str(__version__), help="Print version and exit.") parser.add_argument('-q','--quiet',action='store_true', help="Silence output (via logging).") parser.add_argument('-y','--yes',action='store_true', help="Do not ask for confirmation (NOT IMPLEMENTED).") group =parser.add_mutually_exclusive_group(required=True) group.add_argument('--attach',action='store_true', help="Attach a file to a wiki page.") group.add_argument('--create',action='store_true', help="Create a new wiki page (CAUTION).") group.add_argument('--delete',action='store_true', help="Delete a wiki page (CAUTION).") group.add_argument('--detach',action='store_true', help="Remove an attachment from wiki page.") group.add_argument('--download',action='store_true', help="Download attachment from wiki page.") opts = parser.parse_args() logging.basicConfig(stream=sys.stdout,level=logging.INFO) logging.captureWarnings(True) if opts.quiet: logging.getLogger().setLevel(logging.CRITICAL) elif opts.verbose: logging.getLogger().setLevel(logging.DEBUG) ### # Prints requests ### import httplib ### httplib.HTTPConnection.debuglevel = 1 else: logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('redmine.packages.requests.packages.urllib3').setLevel(logging.WARNING) # Create the interface redmine = DESRedmine(section=opts.section) # Parse the actions if opts.attach: status = redmine.add_attachments(opts.url,opts.input) if opts.create: redmine.create_wiki_page(opts.url,opts.force,text=opts.input) if opts.delete: redmine.delete_wiki_page(opts.url,opts.force) if opts.detach: status = redmine.delete_attachments(opts.url,opts.input,force=opts.force) if opts.download: status = redmine.download_attachments(opts.url,opts.input)
{ "content_hash": "e9e4fb53075188af55e943c418d6c217", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 93, "avg_line_length": 38.763888888888886, "alnum_prop": 0.6804012898602652, "repo_name": "kadrlica/wikitools", "id": "a40441ccc6e4a0e336b5aa42a14d888c69128475", "size": "2813", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wikitools/redmine-cli.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "97596" } ], "symlink_target": "" }
class Build(object): def __init__(self, datetime, commit): self.datetime = datetime self.commit = commit def __repr__(self): return '--datetime=%s --commit=%s' % (repr(self.datetime), repr(self.commit)) def tuple(self): return (self.datetime, self.commit)
{ "content_hash": "bda7018caebb045d192ae37058665995", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 81, "avg_line_length": 28, "alnum_prop": 0.6428571428571429, "repo_name": "alancutter/web-animations-perf-bot", "id": "a959f3a2b1a89771245228ae9dd71f6f9c45bc82", "size": "877", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "build.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "26766" } ], "symlink_target": "" }
import urllib,urllib2 from bs4 import BeautifulSoup import os, time url = 'http://ematch.scatt.com/download/' file_name_source = 'ematch_web.txt' out_folder = 'ematch/' time_delay_sec = 1 file(file_name_source, "w").write(urllib2.urlopen(url).read()) conn = urllib2.urlopen(url) html = conn.read() soup = BeautifulSoup(html) links = soup.find_all('a') #for tag in links: # link = tag.get('href',None) # if link != None: # print link if not os.path.exists(out_folder): os.mkdir(out_folder) #for sk in range(5,len(links)+1): for sk in range(5,len(links)): link = links[sk].get('href',None) if link != None: print sk,link urllib.urlretrieve(url+link, out_folder+link) time.sleep(time_delay_sec)
{ "content_hash": "48dc6294299e523c50426324375194db", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 62, "avg_line_length": 20.88235294117647, "alnum_prop": 0.6929577464788732, "repo_name": "zliobaite/Scatt-analysis", "id": "7ca3dfa74448524b06d5ff73025e0d16234dc1ab", "size": "710", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "3_data_scatt/run_download_scatts1.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "173" }, { "name": "Python", "bytes": "15615" }, { "name": "R", "bytes": "2593" }, { "name": "Visual Basic", "bytes": "11898" } ], "symlink_target": "" }
import os import sys import webbrowser from subprocess import Popen, PIPE import json # MacOS Chrome path chrome_path = 'open -a /Applications/Google\ Chrome.app %s' def run_this_scpt(scpt, args=[]): p = Popen(['osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate(scpt) return stdout def new_chrome_window(): """ Runs Apple script to get new chrome window """ run_this_scpt(""" tell application "Google Chrome" make new window activate end tell """) def is_file_format(my_file, format): if len(my_file.split(".")) >= 2: if my_file.split(".")[-1] == format: return True return False def do_scan_files(base_path): def scan_file(path): if is_file_format(path, 'json'): try: load_json(path) except: return None else: return None return path #Check if path is a single file if os.path.isfile(base_path): jfile = scan_file(base_path) return [jfile] files = [] for item in os.listdir(base_path): new_path = base_path + item #exclude .git folder if item == ".git": continue if os.path.isfile(new_path): jfile = scan_file(new_path) if jfile == None: continue files.append(jfile) elif os.path.isdir(new_path): files = files + do_scan_files(new_path + "/") return files def load_json(json_file): try: with open(json_file) as f: return json.load(f) except Exception as e: print (e) raise e if __name__ == '__main__': print 'chrometabs',len(sys.argv) files = list() #Setup files to scan if len(sys.argv) != 2: if len(sys.argv) == 1: json_path = "tabs/" else: print "Usage: python python src/chrometabs.py [path|json]" exit(2) else: json_path = sys.argv[1] #Attach absolute path path = os.getcwd() + "/" json_path = path + json_path print json_path files = do_scan_files(json_path) for f in files: try: f_json = load_json(f) except: continue if f_json == None: continue new_chrome_window() for name in sorted(f_json, key=f_json.get, reverse=False): url = f_json[name] print "%s => %s" % (name, url) webbrowser.open(url) print
{ "content_hash": "6ad00f3c428e35518372d1c8ecd108ef", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 79, "avg_line_length": 23.276785714285715, "alnum_prop": 0.5247410817031071, "repo_name": "jhvictoria/chrometabs", "id": "ac80d5080f269ef2815c7b3a8c7b2b81e0e35292", "size": "2607", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/chrometabs.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2607" } ], "symlink_target": "" }
from . import views from django.conf.urls import url urlpatterns = [ url(r"^$". views.rdp, name="RDP") ]
{ "content_hash": "d77e072b7ad3a61da82db5dbaf7bb190", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 37, "avg_line_length": 18.333333333333332, "alnum_prop": 0.6545454545454545, "repo_name": "cuckoobox/cuckoo", "id": "0aed796d38270301b1c1e4555d1e71c4ca461a32", "size": "279", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cuckoo/web/rdp/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "9652" }, { "name": "CSS", "bytes": "6810" }, { "name": "DTrace", "bytes": "8609" }, { "name": "HTML", "bytes": "233053" }, { "name": "JavaScript", "bytes": "21397" }, { "name": "Makefile", "bytes": "58" }, { "name": "Mako", "bytes": "1078" }, { "name": "Python", "bytes": "1101334" }, { "name": "Shell", "bytes": "59602" }, { "name": "Visual Basic", "bytes": "1101" } ], "symlink_target": "" }
"""Functional tests using WebTest. See: http://webtest.readthedocs.org/ """ from innovator.public.models import News class TestNews: def test_add_news(self, admin, testapp): # Goes to homepage res = testapp.get('/') # Fills out login form in navbar form = res.forms['loginForm'] form['username'] = admin.username form['password'] = '12345678' # Submits res = form.submit().follow() news_old_count = len(News.query.all()) res = testapp.get('/admin/news/compose-new/') form = res.forms['composeForm'] form['title'] = 'testnews1' form['summary'] = 'testsummary1' form['content'] = 'testcontent1' res = form.submit().follow() news_new_count = len(News.query.all()) assert news_new_count - news_old_count == 1 assert res.status_code == 200 assert 'News posted.' in res def test_delete_news(self, admin, news, testapp): # Goes to homepage res = testapp.get('/') # Fills out login form in navbar form = res.forms['loginForm'] form['username'] = admin.username form['password'] = '12345678' # Submits res = form.submit().follow() news_old_count = len(News.query.all()) # Go to edit page res = testapp.get('/admin/news/').click('unique') form = res.forms['deleteForm'] res = form.submit().follow() assert res.status_code == 200 assert 'News deleted.' in res news_new_count = len(News.query.all()) assert news_old_count - news_new_count == 1 def test_edit_news(self, admin, news, testapp): # Goes to homepage res = testapp.get('/') # Fills out login form in navbar form = res.forms['loginForm'] form['username'] = admin.username form['password'] = '12345678' # Submits res = form.submit().follow() assert len(News.query.filter_by(title='unique').all()) == 1 assert len(News.query.filter_by(title='new title').all()) == 0 # Go to edit page res = testapp.get('/admin/news/').click('unique') form = res.forms['editForm'] form['title'] = 'new title' res = form.submit().follow() assert res.status_code == 200 assert 'News edited.' in res assert len(News.query.filter_by(title='unique').all()) == 0 assert len(News.query.filter_by(title='new title').all()) == 1 def test_news_display(self, admin, news, testapp): res = testapp.get('/news/') assert res.status_code == 200 assert 'unique' in res assert 'summary' in res res = res.click('unique') assert res.status_code == 200 assert 'unique' in res assert 'content' in res assert 'summary' not in res
{ "content_hash": "0bec89b6ce7c512131259a4abecb282f", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 70, "avg_line_length": 35.53086419753087, "alnum_prop": 0.5687977762334955, "repo_name": "B-O-P/innovator", "id": "686b9e0dc91adb9c39c00827eedd91dd92dd20e4", "size": "2902", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_news.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "43496" }, { "name": "Python", "bytes": "116721" } ], "symlink_target": "" }
"""Services API Tests for Version 1.0. This is a testing template for the generated ServicesAPI Class. """ import unittest import requests import secrets from py3canvas.apis.services import ServicesAPI class TestServicesAPI(unittest.TestCase): """Tests for the ServicesAPI.""" def setUp(self): self.client = ServicesAPI(secrets.instance_address, secrets.access_token) def test_get_kaltura_config(self): """Integration test for the ServicesAPI.get_kaltura_config method.""" r = self.client.get_kaltura_config() def test_start_kaltura_session(self): """Integration test for the ServicesAPI.start_kaltura_session method.""" # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration. pass
{ "content_hash": "0c568b9336ad50de3565406869164cf1", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 126, "avg_line_length": 32.64, "alnum_prop": 0.7242647058823529, "repo_name": "tylerclair/py3canvas", "id": "2218ba28eb7dc3cad27039e78ec7eb8d768bd4de", "size": "816", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "py3canvas/tests/services.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1988347" } ], "symlink_target": "" }
from selenium import webdriver from lxml.cssselect import CSSSelector import selenium.webdriver.support.ui as ui import MySQLdb import time class MyDatabase: host = '' user = '' password = '' db = '' def __init__(self): self.connection = MySQLdb.connect(self.host, self.user, self.password, self.db,use_unicode=1,charset="utf8") self.cursor = self.connection.cursor() def insert_row(self, query, params): try: self.cursor.execute(query, params) self.connection.commit() except: self.connection.rollback() def query(self, query): cursor = self.connection.cursor( MySQLdb.cursors.DictCursor ) cursor.execute(query) return cursor.fetchall() def queryp(self, query, params): cursor = self.connection.cursor( MySQLdb.cursors.DictCursor ) cursor.execute(query, params) return cursor.fetchall() def __del__(self): self.connection.close() def get_id_from_table(db, x_id, tablename, url, colname): query = "SELECT "+x_id+" from "+str(tablename)+" where "+colname+"='"+str(url)+"'" x = db.query(query) return x def check_duplicate(db, hname): query = 'SELECT * from ta_hotel WHERE name=%s' params = (hname) x = db.queryp(query,params) if x: return True else: return False def get_hotels(url): br = webdriver.PhantomJS('phantomjs') wait = ui.WebDriverWait(br,9) db = MyDatabase() br.get(url) hotels_url_sel = u'.hotels a' ta_hotels_button = br.find_element_by_css_selector(hotels_url_sel) ta_hotels_button.click() time.sleep(1) destination_id_tupl = get_id_from_table(db, 'destination_id', 'destination', url, 'tripadvisor_url_main') destination_id = destination_id_tupl[0].get('destination_id') #now for the ta_hotel table ta_hotel_listing_sel = u'.listing' ta_hotel_url_sel = u'.property_title' ta_hotel_class_sel = u'.sprite-ratings-gry' next_button_sel = u'.sprite-pageNext' ta_hotel_url = [] ta_hotel_names = [] DUPLICATE_ERR = False x=0 while True: ta_hotel_class = [] ta_hotel_name = [] ta_hotel_listings = br.find_elements_by_css_selector(ta_hotel_listing_sel) for rowx in ta_hotel_listings: ta_hotel_name.append( rowx.find_element_by_css_selector(ta_hotel_url_sel).text ) ta_hotel_names.append( rowx.find_element_by_css_selector(ta_hotel_url_sel).text ) ta_hotel_url.append( rowx.find_element_by_css_selector(ta_hotel_url_sel).get_attribute("href") ) try: ta_hotelx_class = rowx.find_element_by_css_selector(ta_hotel_class_sel) ta_hotel_class.append( ta_hotelx_class.get_attribute("alt") ) except: ta_hotel_class.append( "no rating" ) m=0 for rowx in ta_hotel_listings: #check for duplicates duplicate = check_duplicate(db, ta_hotel_name[m]) if not duplicate: try: params = (None, None, destination_id, ta_hotel_name[m], ta_hotel_class[m], None, None, ta_hotel_url[m]) query = 'INSERT INTO ta_hotel VALUES (%s,%s,%s,%s,%s,%s,%s,%s)' db.insert_row(query, params) except: print "Error: restaurant not added to db" else: #DUPLICATE_ERR = True pass m+=1 #check for more pages of reviews try: ta_more_button = br.find_element_by_css_selector(next_button_sel) ta_more_button.click() time.sleep(5) except: break print "hotel page"+str(x) x+=1 ta_hotel_website_sel = ".test" ta_hotel_address_sel = ".standard_width .format_address" #print ta_hotel_url #if not DUPLICATE_ERR: n=0 for urlx in ta_hotel_url: try: br.get(urlx) time.sleep(4) ta_hotel_address = br.find_element_by_css_selector(ta_hotel_address_sel).text ta_hotel_site_button = br.find_elements_by_css_selector(ta_hotel_website_sel) ta_hotel_site_button[1].click() time.sleep(3) handles = br.window_handles br.switch_to_window(handles[2]); hotel_site_url = br.current_url #print str(hotel_site_url) #print str(ta_hotel_address) query = "UPDATE ta_hotel SET website=%s, address=%s WHERE name=%s" params = (hotel_site_url, ta_hotel_address, ta_hotel_names[n]) db.insert_row(query,params) print "updating hotel website + address..." br.close() br.switch_to_window(handles[1]); except: pass n+=1 print "get_hotels() success!" #get_restaurants('http://www.tripadvisor.co.uk/Tourism-g150807-Cancun_Yucatan_Peninsula-Vacations.html') #get_hotels('http://www.tripadvisor.com/Tourism-g150812-Playa_del_Carmen_Yucatan_Peninsula-Vacations.html')
{ "content_hash": "34298554c1a8529779a66f01312f1d1c", "timestamp": "", "source": "github", "line_count": 178, "max_line_length": 110, "avg_line_length": 24.724719101123597, "alnum_prop": 0.6846171324698932, "repo_name": "pablocastelo/tripadvisor_scraping", "id": "2fe74c8eb19e53b449dfb0f2ae82447023166576", "size": "4423", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "get_hotels.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "30571" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.shortcuts import render from rest_framework.response import Response from rest_framework.views import APIView from todo.serializers import TodoSerializer from todo.models import Todo # Create your views here. class TodoView(APIView): """ TodoView: used to handle the incoming requests relating to `todo` items """ def get(self, request): """ Retrieve a complete list of `todo` items from the Todo model, serialize them to JSON and return the serialized todo items """ todo_items = Todo.objects.all() # Serialize the data retrieved from the DB and serialize # them using the `TodoSerializer` serializer = TodoSerializer(todo_items, many=True) # Store the serialized data `serialized_data` serialized_data = serializer.data return Response(serialized_data)
{ "content_hash": "897ef1430931425c9acb6300f5d72766", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 75, "avg_line_length": 32.964285714285715, "alnum_prop": 0.6923076923076923, "repo_name": "GunnerJnr/_CodeInstitute", "id": "f79744a2bc2b32f9331530f531e9a4543b000663", "size": "947", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Stream-3/Full-Stack-Development/21.Django REST Framework/2.Serializers-And-Class-Based-Views/django_todo/todo/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "336" }, { "name": "CSS", "bytes": "2545480" }, { "name": "HTML", "bytes": "708226" }, { "name": "JavaScript", "bytes": "1984479" }, { "name": "Python", "bytes": "1727585" }, { "name": "Shell", "bytes": "75780" }, { "name": "TSQL", "bytes": "642" } ], "symlink_target": "" }
__author__ = 'Miguel Ángel García Calderón - tonsquemike@outlook.com' import re import sys, getopt # Store input and output file names ifile='' ofile='' # Read command line args myopts, args = getopt.getopt(sys.argv[1:],"i:o:") ############################### # o == option # a == argument passed to the o ############################### for o, a in myopts: if o == '-i': ifile=a elif o == '-o': ofile=a else: print("Usage: %s -i input -o output" % sys.argv[0]) # Display input and output file name passed as the args print ("Input file : %s and output file: %s" % (ifile,ofile) ) with open(ifile, 'r') as myfile: data=myfile.read() print data # Program Start p = re.compile(ur'((?:(?:if|while|for)))\s*\(.*?\)\s*({(?:{[^{}]*}|.)*?})', re.DOTALL) match = p.findall(data) print match
{ "content_hash": "b396c9606ceced4fee558e308afb595f", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 86, "avg_line_length": 24.41176470588235, "alnum_prop": 0.5566265060240964, "repo_name": "tonsquemike/match-statements", "id": "fb8a21a0423d67570bb1b87604a5d29997b89027", "size": "866", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "match.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "196" }, { "name": "Python", "bytes": "866" } ], "symlink_target": "" }