text
stringlengths 4
1.02M
| meta
dict |
---|---|
import os
import os.path
import sys
import ast
import imp
# Functions & classes =========================================================
def filter_private(tree):
"""Filter private AST elements."""
return filter(lambda x: not x.name.startswith("_"), tree)
def get_func(tree):
"""Pick functions from AST `tree`."""
out = []
tree = filter(lambda x: isinstance(x, ast.FunctionDef), tree)
for el in filter_private(tree):
variables = map(lambda x: x.id, el.args.args)
out.append("%s(%s)" % (el.name, ", ".join(variables)))
return out
def get_classes(tree):
"""Pick classes from AST `tree`."""
tree = filter(lambda x: isinstance(x, ast.ClassDef), tree)
return map(lambda x: "class " + x.name, tree)
def get_properties(class_name, mod):
"""Pick properties which belongs to `class_name` in module `mod`."""
out = []
# look into module for given class_name
if not hasattr(mod, class_name):
return []
cls = getattr(mod, class_name)
# well, this is useless, but you never know..
if not cls:
return []
methods = []
properties = []
static_methods = []
for el in dir(cls):
if el.startswith("_"):
continue
obj = getattr(cls, el)
name = type(obj).__name__
# rewrite to use ast module (you can't get obj. property from class)
if name == "instancemethod":
methods.append("." + obj.__name__ + "()")
elif name == "function":
static_methods.append(class_name + "." + obj.__name__ + "()")
elif name == "property" or name != "method_descriptor":
properties.append("." + el)
out.extend(properties)
out.extend(methods)
out.extend(static_methods)
return out
def add_import_path(path):
"""
Adds new import `path` to current path list.
"""
if path not in sys.path:
sys.path.insert(
0,
os.path.abspath(path)
)
def import_module(filename, path):
"""
Import `filename` as module.
Args:
filename (str): Local filename.
path (str): Full path to the file.
Returns:
module obj: Imported module.
"""
add_import_path(os.path.dirname(path))
# try to import module, doesn't work for packages with relative imports
try:
return imp.load_source(filename, path)
except ValueError as e:
if "Attempted relative import in non-package" not in e:
raise
# handling of the 'ValueError: Attempted relative import in non-package'
# problem
import_path = os.path.dirname(os.path.dirname(path))
package_name = os.path.basename(os.path.dirname(path))
sub_package_name = os.path.splitext(os.path.basename(path))[0]
add_import_path(import_path)
pkg = __import__(package_name)
return getattr(pkg, sub_package_name)
def load_data_from_module(clsn, info_dict, path):
"""
Get data for given element.
"""
out = []
user_path = info_dict["filename"]
# convert module path to file path
full_path = os.path.join(path, user_path)
if not user_path.endswith(".py"):
full_path += ".py"
# remove invisible unicode zero widht space character
full_path = full_path.replace("__", "__")
if not os.path.exists(full_path):
sys.stderr.write("'%s' doesn't exists!\n" % full_path)
return []
if info_dict["type"] in ["module", "mod"]:
mod = ast.parse(open(full_path).read()).body
out.extend(get_func(mod))
out.extend(get_classes(mod))
elif info_dict["type"] in ["struct", "structure"]:
out.extend(
get_properties(
clsn,
import_module(user_path, full_path)
)
)
else:
return []
return out
| {
"content_hash": "ca611916b1b56b6bdcf6341cfd665659",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.575591985428051,
"repo_name": "edeposit/uml_templater",
"id": "38be0bdb1ad699e50bfc6520081519831f4d0cce",
"size": "4010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/uml_templater/inspector.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "911"
},
{
"name": "Python",
"bytes": "10789"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from builtins import str
# from lino.utils.test import DocTest
from lino.utils.djangotest import WebIndexTestCase
from django.db import models
from django.conf import settings
from lino.api import rt
from lino.utils.djangotest import TestCase
class TestCase(TestCase):
maxDiff = None
def test01(self):
"""We create a member, and three GFK-related objects whose `owner`
fields point to that member. And then we try to delete that
member.
"""
Member = rt.models.gfktest.Member
Note = rt.models.gfktest.Note
Memo = rt.models.gfktest.Memo
Comment = rt.models.gfktest.Comment
BrokenGFKs = rt.models.gfks.BrokenGFKs
def check_status(*args):
for i, m in enumerate((Member, Comment, Note, Memo)):
n = m.objects.all().count()
if n != args[i]:
msg = "Expected %d objects in %s but found %d"
msg %= (args[i], m.__name__, n)
self.fail(msg)
gfklist = [
(f.model, f.fk_field, f.ct_field)
for f in settings.SITE.kernel.GFK_LIST]
self.assertEqual(gfklist, [
(Comment, 'owner_id', 'owner_type'),
(Memo, 'owner_id', 'owner_type'),
(Note, 'owner_id', 'owner_type')])
def create_objects():
mbr = Member(name="John",id=1)
mbr.save()
self.assertEqual(mbr.name, "John")
Comment(owner=mbr, text="Just a comment...").save()
Note(owner=mbr, text="John owes us 100€").save()
Memo(owner=mbr, text="More about John and his friends").save()
return mbr
mbr = create_objects()
check_status(1, 1, 1, 1)
try:
mbr.delete()
except Warning as e:
self.assertEqual(
str(e), "Cannot delete member John because 1 notes refer to it.")
else:
self.fail("Expected an exception")
# they are all still there:
check_status(1, 1, 1, 1)
# delete the note manually
Note.objects.all().delete()
check_status(1, 1, 0, 1)
mbr.delete()
# the memo remains:
check_status(0, 0, 0, 1)
Memo.objects.all().delete()
# The above behaviour is thanks to a `pre_delete_handler`
# which Lino adds automatically. Theoretically it is no longer
# possible to produce broken GFKs. But now we disable this
# `pre_delete_handler` and use Django's raw `delete` method in
# order to produce some broken GFKs:
from django.db.models.signals import pre_delete
from lino.core.model import pre_delete_handler
pre_delete.disconnect(pre_delete_handler)
check_status(0, 0, 0, 0)
mbr = create_objects()
check_status(1, 1, 1, 1)
models.Model.delete(mbr)
pre_delete.connect(pre_delete_handler)
# The member has been deleted, but all generic related objects
# are still there:
check_status(0, 1, 1, 1)
# That's what the BrokenGFKs table is supposed to show:
# rst = BrokenGFKs.request().table2rst()
rst = BrokenGFKs.request().to_rst()
# print rst
self.assertEqual(rst, """\
====================== ================== ======================================================== ========
Database model Database object Message Action
---------------------- ------------------ -------------------------------------------------------- --------
`comment <Detail>`__ *Comment object* Invalid primary key 1 for gfktest.Member in `owner_id` delete
`note <Detail>`__ *Note object* Invalid primary key 1 for gfktest.Member in `owner_id` manual
`memo <Detail>`__ *Memo object* Invalid primary key 1 for gfktest.Member in `owner_id` clear
====================== ================== ======================================================== ========
""")
| {
"content_hash": "a22b34d6ceb8dfd3a5450f4a20c2081d",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 107,
"avg_line_length": 37.054054054054056,
"alnum_prop": 0.5280816921954777,
"repo_name": "khchine5/book",
"id": "f7911baf3528b0e8243101cf38404c6801a6d909",
"size": "4261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_book/projects/gfktest/test_gfk.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "Python",
"bytes": "486198"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
} |
from oauth2_provider.settings import oauth2_settings
from oauthlib.common import generate_token
from django.http import JsonResponse
from oauth2_provider.models import AccessToken, Application, RefreshToken
from django.utils.timezone import now, timedelta
def get_token_json(access_token):
"""
Takes an AccessToken instance as an argument
and returns a JsonResponse instance from that
AccessToken
"""
token = {
'access_token': access_token.token,
'expires_in': oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
'token_type': 'Bearer',
'refresh_token': access_token.refresh_token.token,
'scope': access_token.scope
}
return JsonResponse(token)
def get_access_token(user):
"""
Takes a user instance and return an access_token as a JsonResponse
instance.
"""
# our oauth2 app
app = Application.objects.get(name="myapp")
# We delete the old access_token and refresh_token
try:
old_access_token = AccessToken.objects.get(
user=user, application=app)
old_refresh_token = RefreshToken.objects.get(
user=user, access_token=old_access_token
)
except:
pass
else:
old_access_token.delete()
old_refresh_token.delete()
# we generate an access token
token = generate_token()
# we generate a refresh token
refresh_token = generate_token()
expires = now() + timedelta(seconds=oauth2_settings.
ACCESS_TOKEN_EXPIRE_SECONDS)
scope = "read write"
# we create the access token
access_token = AccessToken.objects.\
create(user=user,
application=app,
expires=expires,
token=token,
scope=scope)
# we create the refresh token
RefreshToken.objects.\
create(user=user,
application=app,
token=refresh_token,
access_token=access_token)
# we call get_token_json and returns the access token as json
return get_token_json(access_token) | {
"content_hash": "a2488e1254d4c29a86fef3c572253783",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 73,
"avg_line_length": 29.661971830985916,
"alnum_prop": 0.6324786324786325,
"repo_name": "wasit7/tutorials",
"id": "931b01d7beb045a7e348342cad52a488f9e71926",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/demo/sign/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "161779"
},
{
"name": "Batchfile",
"bytes": "1953"
},
{
"name": "C",
"bytes": "580699"
},
{
"name": "C++",
"bytes": "500977"
},
{
"name": "CMake",
"bytes": "14548"
},
{
"name": "CSS",
"bytes": "12348"
},
{
"name": "Cuda",
"bytes": "16475"
},
{
"name": "Elixir",
"bytes": "391"
},
{
"name": "HTML",
"bytes": "81272"
},
{
"name": "JavaScript",
"bytes": "389"
},
{
"name": "Jupyter Notebook",
"bytes": "1175781"
},
{
"name": "Makefile",
"bytes": "8294"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Processing",
"bytes": "10267"
},
{
"name": "Python",
"bytes": "193149"
},
{
"name": "Shell",
"bytes": "559"
},
{
"name": "XSLT",
"bytes": "2042"
}
],
"symlink_target": ""
} |
import unittest
from airflow.utils.weight_rule import WeightRule
class TestWeightRule(unittest.TestCase):
def test_valid_weight_rules(self):
self.assertTrue(WeightRule.is_valid(WeightRule.DOWNSTREAM))
self.assertTrue(WeightRule.is_valid(WeightRule.UPSTREAM))
self.assertTrue(WeightRule.is_valid(WeightRule.ABSOLUTE))
self.assertEqual(len(WeightRule.all_weight_rules()), 3)
| {
"content_hash": "8be8e8435165902383e8109a53006fb2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.7524271844660194,
"repo_name": "owlabs/incubator-airflow",
"id": "c862263387cc31e2152a8aa4520cdc842278d218",
"size": "1224",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/utils/test_weight_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
import unittest
from . import db
class TestDB(unittest.TestCase):
def test_in_db(self):
self.assertIn(3, db)
def test_index(self):
self.assertEqual(db.index(2), 1)
| {
"content_hash": "dd4332d7bf663280f803d0523fb9e2aa",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 16.083333333333332,
"alnum_prop": 0.6373056994818653,
"repo_name": "weijarz/python-testpkg",
"id": "a6a5e79a493ac15d594c3b645349568138f6745f",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mytests/test_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5143"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.disconnect import DisconnectData
from iris_sdk.models.notes import Notes
from iris_sdk.models.disconnect_order_response import DisconnectOrderResponse
XML_NAME_DISCONNECT = "DisconnectTelephoneNumberOrder"
XPATH_DISCONNECT = "/{}"
class Disconnect(BaseResource, DisconnectData):
"""Disconnect telephone numbers order for account"""
_node_name = XML_NAME_DISCONNECT
_xpath = XPATH_DISCONNECT
@property
def id(self):
return self.order_id
@id.setter
def id(self, id):
self.order_id = id
@property
def notes(self):
return self._notes
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
DisconnectData.__init__(self)
self._notes = Notes(self, client)
def get(self, id, params=None):
_id = id
order_response = DisconnectOrderResponse(self._parent)
self.clear()
order_response.order_request = self
return order_response.get(_id, params=params)
def save(self):
str = self._save(True)
order_response = DisconnectOrderResponse(self._parent)
self.clear()
order_response.order_request = self
order_response._from_xml(self._element_from_string(str))
self.order_status = order_response.order_status
return True | {
"content_hash": "5a75baf0bcc8fa808419c10c0a5c58f2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6757475083056479,
"repo_name": "bandwidthcom/python-bandwidth-iris",
"id": "25b06c6e27aedf1b0fbd93855854e056883f457f",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iris_sdk/models/disconnect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
} |
import sys
import os
import spotlight
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Spotlight'
copyright = u'2017, Maciej Kula'
author = u'Maciej Kula'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = spotlight.__version__
# The full version, including alpha/beta/rc tags.
release = spotlight.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
html_context = {
'css_files': [
'_static/css/spotlight_theme.css'
],
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/img/spotlight.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'spotlightdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'spotlight.tex', u'Spotlight Documentation',
u'Maciej Kula', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'spotlight', u'Spotlight Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Spotlight', u'Spotlight Documentation',
author, 'Spotlight', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Compact attribute lists
napoleon_use_ivar = True
| {
"content_hash": "6e3aaa6df0199b5e29b7c9beab704a24",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 80,
"avg_line_length": 31.92517006802721,
"alnum_prop": 0.7028553164287237,
"repo_name": "maciejkula/spotlight",
"id": "ba04298eda058f1adb5e08a2a39e4424da073c76",
"size": "9808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147302"
},
{
"name": "Shell",
"bytes": "3427"
}
],
"symlink_target": ""
} |
import requests
url = "https://maps.googleapis.com/maps/api/directions/json?origin=Sydney%2C%20AU&destination=Perth%2C%20AU&waypoints=side_of_road%3Avia%3Aenc%3AlexeF%7B~wsZejrPjtye%40%3A&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
# [END maps_http_directions_sydney_perth_waypoints_mixed] | {
"content_hash": "6170d768f890d5784e34dc0002be6db1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 189,
"avg_line_length": 32,
"alnum_prop": 0.7734375,
"repo_name": "googlemaps/openapi-specification",
"id": "354dfda4a3403a67983705936dc678a92ec292e5",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dist/snippets/maps_http_directions_sydney_perth_waypoints_mixed/maps_http_directions_sydney_perth_waypoints_mixed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Starlark",
"bytes": "11394"
},
{
"name": "TypeScript",
"bytes": "71469"
}
],
"symlink_target": ""
} |
import operator
from django.utils.six import iteritems
from bson import SON, DBRef
from mongoengine.base import ALLOW_INHERITANCE, BaseDocument
from mongoengine.common import _import_class
from mongoengine import fields
from django_town.mongoengine_extension.fields.base import *
def to_dict_document(self, serializer=None):
data = SON()
data["_id"] = None
# data['_cls'] = self._class_name
for field_name in self:
value = self._data.get(field_name, None)
field = self._fields.get(field_name)
if field is None and self._dynamic:
field = self._dynamic_fields.get(field_name)
if value and isinstance(field, (ResourceField, ResourceIntField, DynamicResourceField)):
pass
# value = value.to_dict(fields=field.fields)
if value is not None:
value = field.to_dict(value, serializer=serializer)
# Handle self generating fields
if value is None and field._auto_gen:
value = field.generate()
self._data[field_name] = value
if value is not None:
data[field.db_field] = value
# If "_id" has not been set, then try and set it
Document = _import_class("Document")
if isinstance(self, Document):
if data["_id"] is None:
data["_id"] = self._data.get("id", None)
if data['_id'] is None:
data.pop('_id')
# Only add _cls if allow_inheritance is True
# if (not hasattr(self, '_meta') or
# not self._meta.get('allow_inheritance', ALLOW_INHERITANCE)):
# data.pop('_cls')
return data
def to_dict_for_default(self, value, serializer=None):
return self.to_mongo(value)
def to_dict_for_complex(self, value, serializer=None):
"""Convert a Python type to a MongoDB-compatible type.
"""
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
GenericReferenceField = _import_class("GenericReferenceField")
if isinstance(value, basestring):
return value
if hasattr(value, 'to_dict'):
if isinstance(value, Document):
return GenericReferenceField().to_dict(value, serializer=serializer)
cls = value.__class__
val = value.to_dict(serializer=serializer)
# If we its a document thats not inherited add _cls
# if (isinstance(value, EmbeddedDocument)):
# val['_cls'] = cls.__name__
return val
is_list = False
if not hasattr(value, 'items'):
try:
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = dict([(key, self.field.to_dict(item, serializer=serializer))
for key, item in iteritems(value)])
else:
value_dict = {}
for k, v in iteritems(value):
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error('You can only reference documents once they'
' have been saved to the database')
# If its a document that is not inheritable it won't have
# any _cls data so make it a generic reference allows
# us to dereference
meta = getattr(v, '_meta', {})
allow_inheritance = (
meta.get('allow_inheritance', ALLOW_INHERITANCE)
is True)
if not allow_inheritance and not self.field:
value_dict[k] = GenericReferenceField().to_dict(v, serializer=serializer)
else:
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, 'to_dict'):
cls = v.__class__
val = v.to_dict(serializer=serializer)
# If we its a document thats not inherited add _cls
# if (isinstance(v, (Document, EmbeddedDocument))):
# val['_cls'] = cls.__name__
value_dict[k] = val
else:
value_dict[k] = self.to_dict(v, serializer=serializer)
if is_list: # Convert back to a list
return [v for k, v in sorted(value_dict.items(),
key=operator.itemgetter(0))]
return value_dict
def to_dict_for_point(self, value, serializer=None):
if not isinstance(value, list):
point = value['coordinates']
else:
point = value
return {"latitude": point[1], "longitude": point[0]}
def to_dict_for_embedded_field(self, value, serializer=None):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_dict(value, serializer=serializer)
setattr(BaseDocument, 'to_dict', to_dict_document)
for field in fields.__all__:
cls = getattr(fields, field)
if field in ["ComplexBaseField", "ListField"]:
setattr(cls, 'to_dict', to_dict_for_complex)
elif field in ["PointField"]:
setattr(cls, 'to_dict', to_dict_for_point)
elif field in ["EmbeddedDocumentField"]:
setattr(cls, 'to_dict', to_dict_for_embedded_field)
elif not hasattr(cls, 'to_dict'):
setattr(cls, 'to_dict', to_dict_for_default)
| {
"content_hash": "4de0dcfc196d905dea50c52ec0e42d9a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 96,
"avg_line_length": 36.153333333333336,
"alnum_prop": 0.5898948921261294,
"repo_name": "uptown/django-town",
"id": "a2560aaeb9692059319f1dd4de0acb280d4abb07",
"size": "5423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_town/mongoengine_extension/fields/hook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "382389"
}
],
"symlink_target": ""
} |
from typing import Any, Dict
from ..utils.projector import register_projector_element
# Important: All functions have to be prune. This means, that thay can only
# access the data, that they get as argument and do not have any
# side effects. They are called from an async context. So they have
# to be fast!
def mediafile(
element: Dict[str, Any], all_data: Dict[str, Dict[int, Dict[str, Any]]]
) -> Dict[str, Any]:
"""
Slide for Mediafile.
"""
return {"error": "TODO"}
def register_projector_elements() -> None:
register_projector_element("mediafiles/mediafile", mediafile)
| {
"content_hash": "85b6339305744e5447217355a1b35afd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.6625,
"repo_name": "boehlke/OpenSlides",
"id": "5779059a2937b384f2f384a2169bf01981ebe0be",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openslides/mediafiles/projector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43810"
},
{
"name": "Dockerfile",
"bytes": "855"
},
{
"name": "HTML",
"bytes": "210347"
},
{
"name": "JavaScript",
"bytes": "86897"
},
{
"name": "Python",
"bytes": "894871"
},
{
"name": "Smarty",
"bytes": "3818"
},
{
"name": "TypeScript",
"bytes": "1230305"
}
],
"symlink_target": ""
} |
from django.contrib.gis import admin as geoadmin
from website.models import ApartmentsNY
class ApartmentsNYAdmin(geoadmin.OSMGeoAdmin):
list_display = ('name', 'city', 'price', 'wifi', 'breakfast')
search_fields = ['name']
list_filter = ('city',)
default_lon = -8236306.48
default_lat = 5028376.23
default_zoom = 5
geoadmin.site.register(ApartmentsNY, ApartmentsNYAdmin)
| {
"content_hash": "f4c490eafc7348a11c971f4c9303e305",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 31.076923076923077,
"alnum_prop": 0.7004950495049505,
"repo_name": "pvsalgado/maps_web_gsa",
"id": "458b83145545027077012075dff8e93bd7a4662e",
"size": "404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_project/website/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87371"
},
{
"name": "HTML",
"bytes": "8726"
},
{
"name": "JavaScript",
"bytes": "10472"
},
{
"name": "Python",
"bytes": "7404"
}
],
"symlink_target": ""
} |
from io import BytesIO
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.detail import SingleObjectMixin
from django.core.files.images import ImageFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms.utils import ErrorList
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.views.decorators.csrf import csrf_protect
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from django.views.generic.list import ListView
from buzzit_app.forms import RegistrationForm
from buzzit_models.models import *
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth import login, logout as authlogout
from django.contrib.auth.views import password_change as _pw_change_
import logging
from django.forms.fields import FileField, ClearableFileInput
from django.core.exceptions import ObjectDoesNotExist
from PIL import Image
import imghdr
import os
from django.contrib import messages
from django.core.mail import send_mail
import hashlib
from os import urandom
def start(request):
"""
Controls the behaviour of guests visiting the page and the login procedure.
If a user is already authenticated he gets redirected to his home.
Else a login form is provided.
:param request: The request object
:return: start.html template rendered with a login form element "form"
"""
if request.user.is_authenticated():
if not request.user.is_active:
messages.error(request, "Sie sind deaktiviert!")
return render(request, "guest/start.html", {"form": AuthenticationForm()})
return HttpResponseRedirect(reverse("home"))
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
if user:
if not user.is_active:
messages.error(request, "Sie sind deaktiviert!")
return render(request, "guest/start.html", {"form": AuthenticationForm()})
login(request, user)
messages.success(request, "Sie wurden eingeloggt!")
redirect_to = request.REQUEST.get("next", False)
if (redirect_to):
if not is_safe_url(url=redirect_to, host=request.get_host()):
return HttpResponseRedirect(reverse("home"))
return HttpResponseRedirect(redirect_to)
return HttpResponseRedirect(reverse("home"))
else:
messages.error(request, "Benutzername/Passwort falsch!")
else:
form = AuthenticationForm()
return render(request, "guest/start.html", {"form": form})
def __get_home_posts__(request):
circles_of_which_we_are_member = Circle.objects.filter(members=request.user.pk)
message_list = []
# nachrichten von usern denen, wir folgen, und in deren kreis wir sind:
for circle in circles_of_which_we_are_member:
message_list += (circle.messages.filter(answer_to=None).all())
# public nachrichten von usern, denen wir folgen:
followed_profiles = request.user.profile.follows.all()
for followed_profile in followed_profiles:
public_messages_of_user = Circle_message.objects.filter(creator=followed_profile.user, public=True, answer_to=None)
message_list += public_messages_of_user.all()
(settings, created) = Settings.objects.get_or_create(owner=request.user)
if settings.show_own_messages_on_home_screen:
message_list += Circle_message.objects.filter(creator=request.user, answer_to=None).all()
message_list.sort(key=lambda m: m.created, reverse=True)
return message_list
@login_required
def home(request):
"""
The start page of logged in users.
:param request: The request object.
:return: the home.html template rendered with a user object "user" and a profile object "profile"
"""
message_list = __get_home_posts__(request)
return render(request, "logged_in/home.html", {"user": request.user,
"profile": Profile.objects.get(user=request.user.pk),
"message_list": message_list,
"circles": Circle.objects.filter(owner=request.user.pk)})
@login_required
def view_profile(request, slug):
profile = Profile.objects.get(pk=slug)
profile.i_am_following = request.user.profile.follows.all().filter(pk=profile.user)
if profile == request.user.profile:
messages.info(request, "Das ist Dein eigenes oeffentliches Profil")
circles_im_in = Circle.objects.filter(members=request.user, owner=profile.user)
message_list = []
# nachrichten, die in kreisen sind, denen ich zugeteilt wurde
for circle in circles_im_in:
message_list += (circle.messages.filter(answer_to=None).all())
# nachrichten, die keinem kreis zugeordnet sind - also public sind
# 1. alle circles
circles_of_user = Circle.objects.filter(owner=profile.user)
# 2. alle public nachrichten vom user
messages_of_user = Circle_message.objects.filter(creator=profile.user, public=True, answer_to=None)
message_list += (messages_of_user.all())
message_list.sort(key=lambda m: m.created, reverse=True)
return render(request, "logged_in/view_profile.html", {"profile": profile,
"message_list": message_list,
"user": request.user})
class EditProfileView(SuccessMessageMixin, UpdateView):
"""
Controls the behaviour if a logged in user want to edit his profile.
If an image is uploaded, then a smaller version of this is created.
Returns the edit_own_profile.html rendered with a profile object.
"""
model = Profile
template_name = "logged_in/edit_own_profile.html"
fields = ["gender", "description"]
success_url = reverse_lazy("update_profile")
success_message = "Profil wurde gespeichert"
def get_form(self, form_class=None):
"""
Normal form, just with an added File Upload for a picture
:param form_class:
:return:
"""
form = super(EditProfileView, self).get_form(form_class)
form.fields['image_file'] = FileField(widget=ClearableFileInput(attrs={"accept": "image/*"}))
form.fields['image_file'].required = False
return form
def __create_small_picture__(request):
"""
Generates a smaller, standard size (128x128) picture of original image with filename <o_o_image_filename>.
Filename of smaller file is <o_o_image_filename>_sm
If this fails, the smaller file will we removed!
:param request: the originial request object
:param o_image: the filename of original image
:return: True on success, False else
"""
profile = request.user.profile
outfile = profile.profile_picture_full.path + "_sm"
try:
im = Image.open(request.user.profile.profile_picture_full.path)
im.thumbnail((128, 128))
thumb_io = BytesIO()
im.save(thumb_io, format='JPEG')
thumb_file = InMemoryUploadedFile(thumb_io, None, 'pp.jpg', 'image/jpeg',
thumb_io.getbuffer().nbytes, None)
profile.profile_picture_small = thumb_file
profile.save()
return True
except IOError:
logging.error("Fehler beim speichern des thumbnails")
try:
os.remove(outfile)
except IOError:
pass
return False
def form_valid(self, form):
"""
Handles the Image upload, verify, saving, etc.
:param form:
:return:
"""
instance = form.save(commit=False)
instance.user = self.request.user
image_file = self.request.FILES.get('image_file', False)
if image_file:
imgtype = imghdr.what(image_file)
if imgtype in ["jpeg", "png", "gif"]:
instance.profile_picture_full = image_file
instance.save()
if not EditProfileView.__create_small_picture__(self.request):
errors = form._errors.setdefault('image_file', ErrorList())
messages.warning(self.request, "Bild nicht gespeichert - altes Bild wurde geloescht")
errors.append(
"Das Thumbnail konnte nicht erzeugt werden; benutzen Sie ein anderes (jpg,png,gif(nicht animiert)) Bild.")
return super(EditProfileView, self).form_invalid(form)
return super(EditProfileView, self).form_valid(form)
def get_object(self, queryset=None):
return Profile.objects.get(user=self.request.user.pk)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(EditProfileView, self).dispatch(request, *args, **kwargs)
class EditUserdataView(SuccessMessageMixin, UpdateView):
"""
View to edit user data.
Template is "edit_own_userdata.html"
"""
model = User
template_name = "logged_in/edit_own_userdata.html"
fields = ["first_name", "last_name", "email"]
success_url = "/updateuser"
success_message = "Daten gespeichert!"
def get_object(self, queryset=None):
return self.request.user
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(EditUserdataView, self).dispatch(request, *args, **kwargs)
class UserSearchResultsView(ListView):
"""
Handles the results of a user search.
"""
model = User
template_name = "logged_in/usersearch_results.html"
# context_object_name = "O"
def get_context_data(self, **kwargs):
context = super(UserSearchResultsView, self).get_context_data(**kwargs)
ownprofile = self.request.user.profile
ownprofile.follows_list = ownprofile.follows.all()
context["ownprofile"] = ownprofile
return context
def get_queryset(self):
ownprofile = self.request.user.profile
ownprofile.follows_list = ownprofile.follows.all()
usrname = self.request.GET.get("q", False)
if usrname and len(usrname) > 0:
userset = User.objects.filter(username__contains=usrname).order_by("username")
else:
userset = User.objects.all().order_by("username")
for user in userset:
user.i_am_following = ownprofile.follows.all().filter(pk=user)
return userset
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserSearchResultsView, self).dispatch(request, *args, **kwargs)
@csrf_protect
def register(request):
"""
Handle user registration and create profile for the user.
use the registration form and check all the fields , with valid infos create object user and store
all the attributes
param: request
return object of httpresponse
"""
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email'],
first_name=form.cleaned_data.get('first_name', ''),
last_name=form.cleaned_data.get('last_name', ''),
)
user.is_active = False
user.save()
new_profile = Profile()
new_profile.user = user
new_profile.gender = ""
new_profile.description = ""
new_profile.save()
token = hashlib.sha1()
token.update(urandom(64))
token = token.hexdigest()
at = AccountActivation()
at.username = user
at.token = token
at.save()
activation_address = request.build_absolute_uri(reverse("activate_account", args=(at.username.username, at.token)))
send_mail("Aktiviere Deinen Account",
message= "Gehe zu: '%s' um Deinen Account zu aktivieren. Danach kannst Du Dich einloggen!" % activation_address,
html_message="<html><h3>Dein neues Passwort:</h3>" +
"<a href='%s'>Klicke hier um den Account zu aktivieren!</a>." % activation_address +
"</html>" ,from_email="AccountAktivierung@vps146949.ovh.net", recipient_list=(user.email,))
messages.success(request, "Sie sind registriert und haben eine EMail bekommen!\\nBestaetigen Sie dort die EMail Adresse")
return HttpResponseRedirect(reverse("start"))
else:
messages.error(request, "Sie haben ungueltige Daten angegeben!")
else:
form = RegistrationForm()
variables = {
'form': form
}
return render(request,
'guest/register.html',
variables,
)
def register_success(request):
return render_to_response(
'registration/success.html',
)
@login_required
def logout(request):
"""
Logout the current user.
:param request:
:return:
"""
authlogout(request)
messages.success(request, "Sie sind ausgeloggt!")
return HttpResponseRedirect(reverse("start"))
def __create_dummy_pic_response__():
"""
Creates a red dummy image, if read from disk fails, but image should be available.
:return:
"""
red = Image.new('RGBA', (128, 128), (255, 0, 0, 0))
response = HttpResponse(content_type="image/jpeg")
red.save(response, "JPEG")
return response
@login_required
def profilepicture_full(request, slug):
"""
Returns the full size profile image or the dummy, if not present/on IOError.
:param request:
:param slug:
:return:
"""
try:
profile = Profile.objects.get(pk=slug)
except ObjectDoesNotExist:
return __create_dummy_pic_response__()
if profile.profile_picture_full:
image = profile.profile_picture_full.path
else:
image = Profile.objects.get(user__username="SYSTEM").profile_picture_full.path
try:
with open(image, "rb") as f:
return HttpResponse(f.read(), content_type="image/jpeg")
except IOError:
return __create_dummy_pic_response__()
@login_required
def profilepicture_small(request, slug):
"""
Returns the standard 128x128 profile image or the dummy if not present/on IO error.
:param request:
:param slug:
:return:
"""
try:
profile = Profile.objects.get(pk=slug)
except ObjectDoesNotExist:
return __create_dummy_pic_response__()
if profile.profile_picture_small:
image = profile.profile_picture_small.path
else:
image = Profile.objects.get(user__username="SYSTEM").profile_picture_small.path
try:
with open(image, "rb") as f:
return HttpResponse(f.read(), content_type="image/jpeg")
except IOError:
return __create_dummy_pic_response__()
def password_change(request):
if request.method == "POST":
messages.info(request, "Wenn keine Fehler angezeigt wurden, wurde das Passwort geaendert")
return _pw_change_(request,
template_name='logged_in/change_password.html',
post_change_redirect=reverse("home"))
def impressum(request):
if request.user.is_authenticated():
return render(request, "logged_in/impressum.html")
else:
return render(request, "guest/impressum.html")
def reset_password(request):
if request.method == "POST":
username = request.POST.get("username", False)
email = request.POST.get("email", False)
if not (username or email):
return render(request, "forgot_password/forgot_password.html", {"errors": "Benutzername oder EMail fehlen"})
try:
user = User.objects.get(username=username, email=email)
except ObjectDoesNotExist:
return render(request, "forgot_password/forgot_password.html",
{"errors": "Benutzername oder Email stimmen nicht"})
new_pwd = hashlib.sha1()
new_pwd.update(urandom(64))
new_pwd = new_pwd.hexdigest()
user.set_password(new_pwd)
user.save()
send_mail("Dein neues Password",
message= "Deine neues Passwort lautet: '%s'. Log Dich ein, um es direkt zu aendern!" % new_pwd,
html_message="<html><h3>Dein neues Passwort:</h3>" +
"<p>%s</p><br />" % new_pwd +
"<a href='%s'>Log Dich ein und aendere es!</a>." % request.build_absolute_uri(reverse("start")) +
"</html>" ,from_email="PasswortAenderung@vps146949.ovh.net", recipient_list=(user.email,))
return render(request, "forgot_password/message_password_sent.html")
return render(request, "forgot_password/forgot_password.html")
def activateaccount(request, username, token):
try:
activation_data = AccountActivation.objects.get(username_id=username, token=token)
except ObjectDoesNotExist:
messages.error(request, "Dieser Link ist ungueltig!")
return HttpResponseRedirect(reverse("start"))
user = activation_data.username
user.is_active = True
user.save()
activation_data.delete()
messages.success(request, "Dein Account wurde aktiviert! Logge Dich jetzt ein")
return HttpResponseRedirect(reverse("start")) | {
"content_hash": "8c706b0b72f0b9e4e22792450e98248f",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 134,
"avg_line_length": 40.32222222222222,
"alnum_prop": 0.6342243042160375,
"repo_name": "jmennen/group5",
"id": "ed2c05c08cd2494b8f8f2068dd54200fa1a9f967",
"size": "18145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/buzzit/buzzit_app/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "136971"
},
{
"name": "HTML",
"bytes": "224970"
},
{
"name": "JavaScript",
"bytes": "187926"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "95098"
}
],
"symlink_target": ""
} |
from itertools import groupby
# TODO: BNODE_KEY used to be '$bnode', but this *should* be more compliant with
# "de-facto json-with-refs"(?). All that is left is to "stdize" uri, lang and
# datatype..
URI_KEY = '$uri'
BNODE_KEY = '$id'
DATATYPE_KEY = '$datatype'
VALUE_KEY = '$value'
LANG_TAG = '@'
LAX_ONE = True
XSD = "http://www.w3.org/2001/XMLSchema#"
# NOTE: Only convert deterministically..
TYPE_CONVERSION = {
XSD+'boolean': lambda v: (v != "false" and v == "true"),
XSD+'integer': int,
XSD+'float': float,
}
def treeify_results(results, root={}):
"""
Takes an object isomorphic to a parsed SPARQL JSON result and creates a
tree object (suitable for JSON serialization).
"""
varmodel = _var_tree_model(results['head']['vars'])
bindings = results["results"]["bindings"]
root = root or {}
_fill_nodes(varmodel, root, bindings)
return root
def _var_tree_model(rqvars, sep="__"):
vartree = {}
for var in sorted(rqvars):
currtree = vartree
for key in var.split(sep):
use_one = False
if key.startswith('1_'):
use_one = True
key = key[2:]
currtree = currtree.setdefault(key, (use_one, var, {}))[-1]
return vartree
def _fill_nodes(varmodel, tree, bindings):
"""Computing a tree model from var names following a given convention."""
for key, namedmodel in varmodel.items():
use_one, varname, subvarmodel = namedmodel
nodes = [] #tree.setdefault(key, [])
for keybinding, gbindings in groupby(bindings, lambda b: b.get(varname)):
if not keybinding:
continue
node = _make_node(keybinding)
# TODO:Ok? Duplicates (due to join combinations) may occur in res,
# but should be filtered here by not continuing if keyed value
# exists in an already added node..
if any(n for n in nodes if n == node or
isinstance(node, dict) and isinstance(n, dict) and
[n.get(k) for k in node] == node.values()):
continue
nodes.append(node)
# NOTE: if node is "literal", subvarmodel should be falsy
if subvarmodel:
_fill_nodes(subvarmodel, node, list(gbindings))
tree[key] = _oneify(nodes) if use_one else nodes
return tree
def _make_node(binding):
node = {}
vtype = binding['type']
value = binding['value']
if vtype == 'uri':
node[URI_KEY] = value
elif vtype == 'bnode':
node[BNODE_KEY] = value
elif vtype == 'literal':
lang = binding.get('xml:lang')
if lang:
node[LANG_TAG+lang] = value
else:
node = value
elif vtype == 'typed-literal':
datatype = binding.get('datatype')
converter = TYPE_CONVERSION.get(datatype)
if converter:
node = converter(value)
else:
node[VALUE_KEY] = value
node[DATATYPE_KEY] = datatype
else:
raise TypeError("Unknown value type: %s" % vtype)
return node
def _oneify(nodes, lax_one=None):
if lax_one is None:
lax_one = LAX_ONE
if not nodes:
return None
first = nodes[0]
if is_lang_node(first):
# TODO: warn if a node isn't a dict:
# "value was expected to be a lang dict but was %r."
first = dict(
node.items()[0] if isinstance(node, dict) else ('', node)
for node in nodes if node
)
elif not lax_one and len(nodes) > 1:
raise CardinalityError(nodes)
return first
def is_lang_node(obj):
return isinstance(obj, dict) and any(
key.startswith(LANG_TAG) for key in obj)
def is_datatype_node(obj):
return isinstance(obj, dict) and DATATYPE_KEY in obj
def is_literal(obj):
return not isinstance(obj, dict) or is_datatype_node(obj) or is_lang_node(obj)
def is_resource(obj):
#return isinstance(obj, dict) and (URI_KEY in obj or BNODE_KEY in obj)
# FIXME: currently we do allow for "pure" anonymous nodes (w/o BNODE_KEY:s)
# but this check is expensive(!):
return not is_literal(obj)
class CardinalityError(ValueError):
pass
| {
"content_hash": "a9497bba00a8dc56c680f6e98310ad90",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 82,
"avg_line_length": 30.676258992805757,
"alnum_prop": 0.5884146341463414,
"repo_name": "tectronics/oort.python",
"id": "49f8be83ef19582e051b0f22e4ba1f4b9385c258",
"size": "4264",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oort/sparqltree/autotree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "100048"
}
],
"symlink_target": ""
} |
import datetime
import os
from decimal import Decimal
from unittest import mock, skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelChoiceIterator, ModelFormMetaclass, construct_instance,
fields_for_model, model_to_dict, modelform_factory,
)
from django.forms.widgets import CheckboxSelectMultiple
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF,
CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost,
Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage,
ImprovedArticle, ImprovedArticleWithParentLink, Inventory,
NullableUniqueCharFieldModel, Person, Photo, Post, Price, Product,
Publication, PublicationDefaults, StrictAssignmentAll,
StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple,
Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, OptionalImageFile, NoExtensionImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class NoExtensionImageFileForm(forms.ModelForm):
class Meta:
model = NoExtensionImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user',
last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_save_blank_null_unique_charfield_saves_null(self):
form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields=['codename'])
empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None
form = form_class(data={'codename': ''})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
# Save a second form to verify there isn't a unique constraint violation.
form = form_class(data={'codename': ''})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
with self.assertRaisesMessage(FieldError, 'no-field'):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
def test_extra_declared_field_model_form(self):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
def test_extra_field_modelform_factory(self):
with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
msg = "{'quantity': ['This field cannot be null.']}"
with self.assertRaisesMessage(ValidationError, msg):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
msg = 'ModelForm has no model class specified.'
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" required /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required /></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
def test_default_populated_on_optional_field(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data uses the model field default.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, 'di')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
# Blank data doesn't use the model field default.
mf2 = PubForm({'mode': ''})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.mode, '')
def test_default_not_populated_on_optional_checkbox_input(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('active',)
# Empty data doesn't use the model default because CheckboxInput
# doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertIs(m1.active, False)
self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput)
self.assertIs(m1._meta.get_field('active').get_default(), True)
def test_default_not_populated_on_checkboxselectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unchecked
# CheckboxSelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_default_not_populated_on_selectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.SelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unselected
# SelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_prefixed_form_with_default_field(self):
class PubForm(forms.ModelForm):
prefix = 'form-prefix'
class Meta:
model = PublicationDefaults
fields = ('mode',)
mode = 'de'
self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default())
mf1 = PubForm({'form-prefix-mode': mode})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, mode)
def test_default_splitdatetime_field(self):
class PubForm(forms.ModelForm):
datetime_published = forms.SplitDateTimeField(required=False)
class Meta:
model = PublicationDefaults
fields = ('datetime_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1))
mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1))
def test_default_filefield(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('file',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.file.name, 'default.txt')
mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.file.name, 'name')
def test_default_selectdatewidget(self):
class PubForm(forms.ModelForm):
date_published = forms.DateField(required=False, widget=forms.SelectDateWidget)
class Meta:
model = PublicationDefaults
fields = ('date_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.date_published, datetime.date.today())
mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" required />',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" required />',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other', 'author': self.writer.pk, 'isbn': isbn,
'suffix1': '1', 'suffix2': '2',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
if connection.features.interprets_empty_strings_as_nulls:
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
else:
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = PostForm(data, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1),
)
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = DerivedPostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"}
form = FlexDatePostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third")
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required /></li>
<li>The URL: <input type="text" name="url" maxlength="40" required /></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" required /></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required /><br />
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required /></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': str(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(Article, fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required /></li>
<li><label for="id_categories">Categories:</label>
<select multiple="multiple" name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It&39;s a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required /></td></tr>
<tr><th>Writer:</th><td><select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required /></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required /></td></tr>''')
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required /></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': str(self.w_royko.pk),
'article': 'Hello.',
'categories': [str(self.c1.id), str(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" required /></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required /></li>
<li>Pub date: <input type="text" name="pub_date" required /></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
def test_recleaning_model_form_instance(self):
"""
Re-cleaning an instance that was added via a ModelForm shouldn't raise
a pk uniqueness error.
"""
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = '__all__'
form = AuthorForm({'full_name': 'Bob'})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Alice'
obj.full_clean()
class ModelChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
# ModelChoiceField ############################################################
def test_modelchoicefield(self):
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
self.assertEqual(f.clean(self.c3.id).name, 'Third')
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(f.clean(c4.id).name, 'Fourth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='4th').delete()
msg = "['Select a valid choice. That choice is not one of the available choices.']"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(c4.id)
def test_modelchoicefield_choices(self):
f = forms.ModelChoiceField(Category.objects.filter(pk=self.c1.id), required=False)
self.assertIsNone(f.clean(''))
self.assertEqual(f.clean(str(self.c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# len can be called on choices
self.assertEqual(len(f.choices), 2)
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
with self.assertRaises(ValidationError):
f.clean(self.c3.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (self.c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'category Entertainment'),
(self.c2.pk, "category It's a test"),
(self.c3.pk, 'category Third')])
def test_modelchoicefield_11183(self):
"""
Regression test for ticket #11183.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all())
form1 = ModelChoiceForm()
field1 = form1.fields['category']
# To allow the widget to change the queryset of field1.widget.choices correctly,
# without affecting other forms, the following must hold:
self.assertIsNot(field1, ModelChoiceForm.base_fields['category'])
self.assertIs(field1.widget.choices.field, field1)
def test_modelchoicefield_result_cache_not_shared(self):
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all())
form1 = ModelChoiceForm()
self.assertCountEqual(form1.fields['category'].queryset, [self.c1, self.c2, self.c3])
form2 = ModelChoiceForm()
self.assertIsNone(form2.fields['category'].queryset._result_cache)
def test_modelchoicefield_queryset_none(self):
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(queryset=None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['category'].queryset = Category.objects.filter(slug__contains='test')
form = ModelChoiceForm()
self.assertCountEqual(form.fields['category'].queryset, [self.c2, self.c3])
def test_modelchoicefield_22745(self):
"""
#22745 -- Make sure that ModelChoiceField with RadioSelect widget
doesn't produce unnecessary db queries when accessing its BoundField's
attrs.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all(), widget=forms.RadioSelect)
form = ModelChoiceForm()
field = form['category'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_disabled_modelchoicefield(self):
class ModelChoiceForm(forms.ModelForm):
author = forms.ModelChoiceField(Author.objects.all(), disabled=True)
class Meta:
model = Book
fields = ['author']
book = Book.objects.create(author=Writer.objects.create(name='Test writer'))
form = ModelChoiceForm({}, instance=book)
self.assertEqual(
form.errors['author'],
['Select a valid choice. That choice is not one of the available choices.']
)
def test_disabled_modelchoicefield_has_changed(self):
field = forms.ModelChoiceField(Author.objects.all(), disabled=True)
self.assertIs(field.has_changed('x', 'y'), False)
def test_disabled_multiplemodelchoicefield(self):
class ArticleForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
class Meta:
model = Article
fields = ['categories']
category1 = Category.objects.create(name='cat1')
category2 = Category.objects.create(name='cat2')
article = Article.objects.create(
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name='Test writer'),
)
article.categories.set([category1.pk])
form = ArticleForm(data={'categories': [category2.pk]}, instance=article)
self.assertEqual(form.errors, {})
self.assertEqual([x.pk for x in form.cleaned_data['categories']], [category2.pk])
# Disabled fields use the value from `instance` rather than `data`.
form = ArticleForm(data={'categories': [category2.pk]}, instance=article)
form.fields['categories'].disabled = True
self.assertEqual(form.errors, {})
self.assertEqual([x.pk for x in form.cleaned_data['categories']], [category1.pk])
def test_disabled_modelmultiplechoicefield_has_changed(self):
field = forms.ModelMultipleChoiceField(Author.objects.all(), disabled=True)
self.assertIs(field.has_changed('x', 'y'), False)
def test_modelchoicefield_iterator(self):
"""
Iterator defaults to ModelChoiceIterator and can be overridden with
the iterator attribute on a ModelChoiceField subclass.
"""
field = forms.ModelChoiceField(Category.objects.all())
self.assertIsInstance(field.choices, ModelChoiceIterator)
class CustomModelChoiceIterator(ModelChoiceIterator):
pass
class CustomModelChoiceField(forms.ModelChoiceField):
iterator = CustomModelChoiceIterator
field = CustomModelChoiceField(Category.objects.all())
self.assertIsInstance(field.choices, CustomModelChoiceIterator)
def test_modelchoicefield_iterator_pass_model_to_widget(self):
class CustomModelChoiceValue:
def __init__(self, value, obj):
self.value = value
self.obj = obj
def __str__(self):
return str(self.value)
class CustomModelChoiceIterator(ModelChoiceIterator):
def choice(self, obj):
value, label = super().choice(obj)
return CustomModelChoiceValue(value, obj), label
class CustomCheckboxSelectMultiple(CheckboxSelectMultiple):
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
option = super().create_option(name, value, label, selected, index, subindex=None, attrs=None)
# Modify the HTML based on the object being rendered.
c = value.obj
option['attrs']['data-slug'] = c.slug
return option
class CustomModelMultipleChoiceField(forms.ModelMultipleChoiceField):
iterator = CustomModelChoiceIterator
widget = CustomCheckboxSelectMultiple
field = CustomModelMultipleChoiceField(Category.objects.all())
self.assertHTMLEqual(
field.widget.render('name', []),
'''<ul>
<li><label><input type="checkbox" name="name" value="%d" data-slug="entertainment" />Entertainment</label></li>
<li><label><input type="checkbox" name="name" value="%d" data-slug="its-test" />It's a test</label></li>
<li><label><input type="checkbox" name="name" value="%d" data-slug="third-test" />Third</label></li>
</ul>''' % (self.c1.pk, self.c2.pk, self.c3.pk),
)
def test_modelchoicefield_num_queries(self):
"""
Widgets that render multiple subwidgets shouldn't make more than one
database query.
"""
categories = Category.objects.all()
class CategoriesForm(forms.Form):
radio = forms.ModelChoiceField(queryset=categories, widget=forms.RadioSelect)
checkbox = forms.ModelMultipleChoiceField(queryset=categories, widget=forms.CheckboxSelectMultiple)
template = Template("""
{% for widget in form.checkbox %}{{ widget }}{% endfor %}
{% for widget in form.radio %}{{ widget }}{% endfor %}
""")
with self.assertNumQueries(2):
template.render(Context({'form': CategoriesForm()}))
class ModelMultipleChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(
f.clean([str(self.c1.id), str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False
)
self.assertQuerysetEqual(
f.clean([self.c1.id, str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False
)
self.assertQuerysetEqual(
f.clean((self.c1.id, str(self.c2.id))),
["Entertainment", "It's a test"], ordered=False
)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(),
validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True,
queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
form = WriterForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
def test_to_field_name_with_initial_data(self):
class ArticleCategoriesForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug')
class Meta:
model = Article
fields = ['categories']
article = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name='Test writer'),
article='Hello.',
)
article.categories.add(self.c2, self.c3)
form = ArticleCategoriesForm(instance=article)
self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug])
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': str(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data['publication'])
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertIs(f.clean(False), False)
self.assertIs(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', str(form))
self.assertNotIn('myfile-clear', str(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', str(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertFalse(doc.myfile)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'})
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = str(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertHTMLEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file" />'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), 'test.png')
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""
Regression test for #8842: FilePathField(blank=True)
"""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
names = [p[1] for p in form['path'].field.choices]
names.sort()
self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'test_uuid.py', 'tests.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertIsNone(instance.width)
self.assertIsNone(instance.height)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
# Editing an instance that has an image without an extension shouldn't
# fail validation. First create:
f = NoExtensionImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/no_extension')
# Then edit:
f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance)
self.assertTrue(f.is_valid())
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitly including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(4): # would be 5 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(
str(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" required /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required /></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" required /></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required /></p>
<p><label for="id_colours">Colours:</label>
<select multiple="multiple" name="colours" id="id_colours" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('title', 'date_published', 'mode', 'category')
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required /></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" required />
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}" /></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di" /></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3" />
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ['name'])
self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), [])
self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo'])
self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age'])
self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name'])
def test_field_removal_name_clashes(self):
"""
Form fields can be removed in subclasses by setting them to None
(#22510).
"""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = ()
class LimitChoicesToTests(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToManyField can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
A custom field with a `queryset` attribute but no `limit_choices_to`
works (#23795).
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ['has_fooled_today'])
self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields['most_recently_fooled']
with mock.patch.object(field, 'limit_choices_to') as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""A custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields:
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget)
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
msg = (
"BrokenLocalizedTripleForm.Meta.localized_fields "
"cannot be a string. Did you mean to type: ('foo',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(TestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
class ModelToDictTests(TestCase):
def test_many_to_many(self):
"""Data for a ManyToManyField is a list rather than a lazy QuerySet."""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
item = ColourfulItem.objects.create()
item.colours.set([blue])
data = model_to_dict(item)['colours']
self.assertEqual(data, [blue])
item.colours.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
| {
"content_hash": "265108aecad201596c30b0a2c5c966ef",
"timestamp": "",
"source": "github",
"line_count": 3130,
"max_line_length": 129,
"avg_line_length": 40.777955271565496,
"alnum_prop": 0.6000626787323227,
"repo_name": "tomchristie/django",
"id": "7af2aff825db49613261d8fa267a41e82eda5197",
"size": "127635",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tests/model_forms/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "219349"
},
{
"name": "JavaScript",
"bytes": "252940"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12092827"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import org.vertx.java.platform.impl.JythonVerticleFactory
import org.vertx.java.core.buffer.Buffer
import org.vertx.java.core.Handler
import org.vertx.java.core.AsyncResultHandler
import org.vertx.java.core.json.JsonObject
import java.lang
from core.javautils import map_to_java, map_from_java
from core.buffer import Buffer
__author__ = "Scott Horn"
__email__ = "scott@hornmicro.com"
__credits__ = "Based entirely on work by Tim Fox http://tfox.org"
class EventBus(object):
"""This class represents a distributed lightweight event bus which can encompass multiple vert.x instances.
It is very useful for otherwise isolated vert.x application instances to communicate with each other.
The event bus implements a distributed publish / subscribe network.
Messages are sent to an address. There can be multiple handlers registered against that address.
All messages sent over the bus are transient. On event of failure of all or part of the event bus messages
may be lost. Applications should be coded to cope with lost messages, e.g. by resending them, and making application
services idempotent.
The order of messages received by any specific handler from a specific sender will match the order of messages
sent from that sender.
When sending a message, a reply handler can be provided. If so, it will be called when the reply from the receiver
has been received.
"""
handler_dict = {}
@staticmethod
def send(address, message, reply_handler=None):
"""Send a message on the event bus
Keyword arguments:
@param address: the address to send to
@param message: The message to send
@param reply_handler: An optional reply handler.
It will be called when the reply from a receiver is received.
"""
EventBus.send_or_pub(True, address, message, None, reply_handler)
@staticmethod
def send_with_timeout(address, message, timeout, reply_handler):
"""Send a message on the event bus with a reply timeout
Keyword arguments:
@param address: the address to send to
@param message: The message to send
@param timeout: A reply timeout
@param reply_handler: An optional reply handler.
It will be called when the reply from a receiver is received.
"""
EventBus.send_or_pub(True, address, message, timeout, reply_handler)
@staticmethod
def publish(address, message):
"""Publish a message on the event bus
Keyword arguments:
@param address: the address to publish to
@param message: The message to publish
"""
EventBus.send_or_pub(False, address, message)
@staticmethod
def send_or_pub(send, address, message, timeout=None, reply_handler=None):
if not address:
raise RuntimeError("An address must be specified")
if message is None:
raise RuntimeError("A message must be specified")
message = EventBus.convert_msg(message)
if send:
if reply_handler != None:
if timeout is not None:
EventBus.java_eventbus().sendWithTimeout(address, message, timeout, AsyncInternalHandler(reply_handler))
else:
EventBus.java_eventbus().send(address, message, InternalHandler(reply_handler))
else:
EventBus.java_eventbus().send(address, message)
else:
EventBus.java_eventbus().publish(address, message)
@staticmethod
def register_handler(address, local_only=False, handler=None):
""" Register a handler.
Keyword arguments:
@param address: the address to register for. A single handler can be registered against many addresses.
@param local_only: if True then handler won't be propagated across cluster
@param handler: The handler
@return: id of the handler which can be used in EventBus.unregister_handler
"""
if handler is None:
raise RuntimeError("handler is required")
internal = InternalHandler(handler)
if local_only:
EventBus.java_eventbus().registerLocalHandler(address, internal)
else:
EventBus.java_eventbus().registerHandler(address, internal)
id = java.util.UUID.randomUUID().toString()
EventBus.handler_dict[id] = address, internal
return id
@staticmethod
def register_simple_handler(local_only=False, handler=None):
"""
Registers a handler against a uniquely generated address, the address is returned as the id
received by the handler. A single handler can be registered against many addresses.
Keyword arguments:
@param local_only: If Rrue then handler won't be propagated across cluster
@param handler: The handler
@return: id of the handler which can be used in EventBus.unregister_handler
"""
if handler is None:
raise RuntimeError("Handler is required")
internal = InternalHandler(handler)
id = java.util.UUID.randomUUID().toString()
if local_only:
EventBus.java_eventbus().registerLocalHandler(id, internal)
else:
EventBus.java_eventbus().registerHandler(id, internal)
EventBus.handler_dict[id] = id, internal
return id
@staticmethod
def unregister_handler(handler_id):
"""Unregisters a handler
Keyword arguments:
@param handler_id: the id of the handler to unregister. Returned from EventBus.register_handler
"""
[address, handler] = EventBus.handler_dict.pop(handler_id)
EventBus.java_eventbus().unregisterHandler(address, handler)
@staticmethod
def convert_msg(message):
if isinstance(message, dict):
message = org.vertx.java.core.json.JsonObject(map_to_java(message))
elif isinstance(message, Buffer):
message = message._to_java_buffer()
elif isinstance(message, long):
message = java.lang.Long(message)
elif isinstance(message, float):
message = java.lang.Double(message)
elif isinstance(message, int):
message = java.lang.Integer(message)
else:
message = map_to_java(message)
return message
@staticmethod
def java_eventbus():
return org.vertx.java.platform.impl.JythonVerticleFactory.vertx.eventBus()
# Allow the event bus reply timeout to be set directly as
# a property of the event bus.
EventBus.default_reply_timeout = property(lambda: EventBus.java_eventbus().getDefaultReplyTimeout(), lambda x: EventBus.java_eventbus().setDefaultReplyTimeout(x))
class InternalHandler(org.vertx.java.core.Handler):
def __init__(self, handler):
self.handler = handler
def handle(self, message):
self.handler(Message(message))
class AsyncInternalHandler(org.vertx.java.core.AsyncResultHandler):
def __init__(self, handler):
self.handler = handler
def handle(self, result):
if result.failed():
self.handler(result.cause(), None)
else:
self.handler(None, Message(result.result()))
class Message(object):
"""Represents a message received from the event bus"""
def __init__(self, message):
self.java_obj = message
if isinstance(message.body(), org.vertx.java.core.json.JsonObject):
self.body = map_from_java(message.body().toMap())
elif isinstance(message.body(), org.vertx.java.core.buffer.Buffer):
self.body = Buffer(message.body())
else:
self.body = map_from_java(message.body())
def reply(self, reply, handler=None):
"""Reply to this message. If the message was sent specifying a receipt handler, that handler will be
called when it has received a reply. If the message wasn't sent specifying a receipt handler
this method does nothing.
Keyword arguments:
@param reply: message to send as reply
@param handler: the reply handler
"""
reply = EventBus.convert_msg(reply)
if handler is None:
self.java_obj.reply(reply)
else:
self.java_obj.reply(reply, InternalHandler(handler))
| {
"content_hash": "7257724979f46e75cbe503a8ef247d80",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 162,
"avg_line_length": 39.344339622641506,
"alnum_prop": 0.6633497182592015,
"repo_name": "fhg-fokus-nubomedia/signaling-plane",
"id": "8c32e6943d414905a2a895016c90dc778a051a07",
"size": "8938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sys-mods/io.vertx~lang-jython~2.1.0-CUSTOM-PYTHON-2.7/core/event_bus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12152"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "2637100"
},
{
"name": "Java",
"bytes": "5622899"
},
{
"name": "JavaScript",
"bytes": "3448641"
},
{
"name": "Python",
"bytes": "161709"
},
{
"name": "Shell",
"bytes": "8658"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "aac36b1d860cba616562a679894a7c0b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 174,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.7153284671532847,
"repo_name": "antoinecarme/pyaf",
"id": "7a7bdaaa571df0ad7a5d11a5b52cbf7b0fa4bef8",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_0/ar_/test_artificial_32_RelativeDifference_LinearTrend_0__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import requests
from ..exceptions import ClientError
class PGDBClient(object):
"""
Simple client for interacting with ISCAN servers.
"""
def __init__(self, host, token=None, corpus_name=None):
self.host = host
self.token = token
if self.host.endswith('/'):
self.host = self.host[:-1]
self.corpus_name = corpus_name
self.query_behavior = 'speaker'
def login(self, user_name, password):
"""
Get an authentication token from the ISCAN server using the specified credentials
Parameters
----------
user_name : str
User name
password : str
Password
Returns
-------
str
Authentication token to use in future requests
"""
end_point = '/'.join([self.host, 'api', 'rest-auth', 'login', ''])
resp = requests.post(end_point, {'username': user_name, 'password': password})
token = resp.json()['key']
self.token = token
return token
def create_database(self, database_name):
"""
Create a new database with the specified name
Parameters
----------
database_name : str
Name of the database to be created
Returns
-------
dict
Database information
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
raise ClientError('Could not create database, already exists.')
end_point = '/'.join([self.host, 'api', 'databases', ''])
data = {'name': database_name}
resp = requests.post(end_point, data=data, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not create database: {}'.format(resp.text))
return resp.json()
def delete_database(self, database_name):
"""
Delete a database and all associated content
Parameters
----------
database_name : str
Name of database to be deleted
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not delete database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.delete(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 204:
raise ClientError('Could not delete database.')
def database_status(self, database_name=None):
"""
Get the current status of a specified database, or all databases on the server.
Parameters
----------
database_name : str
Name of database to get status of, if not specified, will get status of all databases
Returns
-------
dict
Database status JSON
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
else:
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_directory(self, database_name):
"""
Get the directory of a local database
Parameters
----------
database_name : str
Name of database
Returns
-------
str
Database data directory
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'data_directory', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_ports(self, database_name):
"""
Get the ports of a locally running database
Parameters
----------
database_name : str
Name of database
Returns
-------
dict
Ports of the database
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def list_databases(self):
"""
Get a list of all databases
Returns
-------
list
Database information
"""
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 200:
raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))
return resp.json()
def list_corpora(self, database_name=None):
"""
Get a list of all corpora
Parameters
----------
database_name : str
Name of the database to restrict corpora list to, optional
Returns
-------
list
Corpora information
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'corpora', ''])
else:
end_point = '/'.join([self.host, 'api', 'corpora', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def start_database(self, database_name):
"""
Start a database
Parameters
----------
database_name : str
Database to start
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'start', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not start database: {}'.format(resp.text))
def stop_database(self, database_name):
"""
Stop a database
Parameters
----------
database_name : str
Database to stop
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'stop', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not stop database: {}'.format(resp.text))
| {
"content_hash": "c4906d51073107f3974afec247f0a7dd",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 108,
"avg_line_length": 33.426877470355734,
"alnum_prop": 0.5263095660399669,
"repo_name": "PhonologicalCorpusTools/PyAnnotationGraph",
"id": "75fe4ffed842895823f432c3592116337d923fac",
"size": "8457",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "polyglotdb/client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308936"
},
{
"name": "Shell",
"bytes": "1157"
}
],
"symlink_target": ""
} |
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from bgpvpn_dashboard.api import bgpvpn as bgpvpn_api
LOG = logging.getLogger(__name__)
class AddRouterParametersInfoAction(workflows.Action):
advertise_extra_routes = forms.BooleanField(
label=_("Advertise Extra Routes"),
initial=True,
required=False,
help_text="Boolean flag controlling whether or not the routes "
"specified in the routes attribute of the router will be "
"advertised to the BGPVPN (default: true).")
class Meta(object):
name = _("Optional Parameters")
slug = "add_router_parameters"
def __init__(self, request, context, *args, **kwargs):
super(AddRouterParametersInfoAction, self).__init__(
request, context, *args, **kwargs)
if 'with_parameters' in context:
self.fields['with_parameters'] = forms.BooleanField(
initial=context['with_parameters'],
required=False,
widget=forms.HiddenInput()
)
class CreateRouterAssociationInfoAction(workflows.Action):
router_resource = forms.ChoiceField(
label=_("Associate Router"),
widget=forms.ThemableSelectWidget(
data_attrs=('name', 'id'),
transform=lambda x: "%s" % x.name_or_id))
class Meta(object):
name = _("Create Association")
help_text = _("Create a new router association.")
slug = "create_router_association"
def __init__(self, request, context, *args, **kwargs):
super(CreateRouterAssociationInfoAction, self).__init__(
request, context, *args, **kwargs)
# when an admin user uses the project panel BGPVPN, there is no
# tenant_id in context because bgpvpn_get doesn't return it
if request.user.is_superuser and context.get("project_id"):
tenant_id = context.get("project_id")
else:
tenant_id = self.request.user.tenant_id
try:
routers = api.neutron.router_list(request, tenant_id=tenant_id)
if routers:
choices = [('', _("Choose a router"))] + [(r.id, r) for r in
routers]
self.fields['router_resource'].choices = choices
else:
self.fields['router_resource'].choices = [('', _("No router"))]
except Exception:
exceptions.handle(request, _("Unable to retrieve routers"))
if api.neutron.is_extension_supported(request,
'bgpvpn-routes-control'):
self.fields['with_parameters'] = forms.BooleanField(
label=_("Optional parameters"),
initial=False,
required=False,
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-hide-tab': 'router_association__'
'add_router_parameters',
'data-hide-on-checked': 'false'
}))
class AddRouterParametersInfo(workflows.Step):
action_class = AddRouterParametersInfoAction
depends_on = ("bgpvpn_id", "name")
contributes = ("advertise_extra_routes",)
class CreateRouterAssociationInfo(workflows.Step):
action_class = CreateRouterAssociationInfoAction
contributes = ("router_resource", "with_parameters")
class RouterAssociation(workflows.Workflow):
slug = "router_association"
name = _("Associate a BGPVPN to a Router")
finalize_button_name = _("Create")
success_message = _('Router association with "%s" created.')
failure_message = _('Unable to create a router association with "%s".')
success_url = "horizon:project:bgpvpn:index"
default_steps = (CreateRouterAssociationInfo,
AddRouterParametersInfo)
wizard = True
def format_status_message(self, message):
name = self.context['name'] or self.context['bgpvpn_id']
return message % name
def handle(self, request, context):
bgpvpn_id = context['bgpvpn_id']
router_id = context["router_resource"]
msg_error = _("Unable to associate router %s") % router_id
try:
router_association = bgpvpn_api.router_association_create(
request, bgpvpn_id, router_id=router_id)
except Exception:
exceptions.handle(request, msg_error)
return False
if not context["with_parameters"]:
return True
asso_id = router_association['router_association']['id']
try:
bgpvpn_api.router_association_update(
request, bgpvpn_id, asso_id,
advertise_extra_routes=context['advertise_extra_routes'])
return True
except exceptions:
bgpvpn_api.router_association_delete(request, asso_id, bgpvpn_id)
exceptions.handle(request, msg_error)
return False
| {
"content_hash": "3f761de922258f64ec5fe6282419dc66",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 38.32592592592592,
"alnum_prop": 0.5983764978739853,
"repo_name": "openstack/networking-bgpvpn",
"id": "b3f3aad71aae0dab60bfed9183fbfb3058f555ef",
"size": "5800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bgpvpn_dashboard/dashboards/project/bgpvpn/workflows.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5665"
},
{
"name": "Mako",
"bytes": "1055"
},
{
"name": "Python",
"bytes": "477528"
},
{
"name": "Shell",
"bytes": "4835"
}
],
"symlink_target": ""
} |
from typing import Dict, List, Optional
from joueur.base_game import BaseGame
# import game objects
from games.pirates.game_object import GameObject
from games.pirates.player import Player
from games.pirates.port import Port
from games.pirates.tile import Tile
from games.pirates.unit import Unit
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Game(BaseGame):
"""The class representing the Game in the Pirates game.
Steal from merchants and become the most infamous pirate.
"""
def __init__(self):
"""Initializes a Game with basic logic as provided by the Creer code generator.
"""
BaseGame.__init__(self)
# private attributes to hold the properties so they appear read only
self._bury_interest_rate = 0
self._crew_cost = 0
self._crew_damage = 0
self._crew_health = 0
self._crew_moves = 0
self._crew_range = 0
self._current_player = None
self._current_turn = 0
self._game_objects = {}
self._heal_factor = 0
self._map_height = 0
self._map_width = 0
self._max_turns = 100
self._merchant_gold_rate = 0
self._merchant_interest_rate = 0
self._min_interest_distance = 0
self._players = []
self._ports = []
self._rest_range = 0
self._session = ""
self._ship_cost = 0
self._ship_damage = 0
self._ship_health = 0
self._ship_moves = 0
self._ship_range = 0
self._tiles = []
self._time_added_per_turn = 0
self._units = []
self.name = "Pirates"
self._game_object_classes = {
'GameObject': GameObject,
'Player': Player,
'Port': Port,
'Tile': Tile,
'Unit': Unit
}
@property
def bury_interest_rate(self) -> float:
"""float: The rate buried gold increases each turn.
"""
return self._bury_interest_rate
@property
def crew_cost(self) -> int:
"""int: How much gold it costs to construct a single crew.
"""
return self._crew_cost
@property
def crew_damage(self) -> int:
"""int: How much damage crew deal to each other.
"""
return self._crew_damage
@property
def crew_health(self) -> int:
"""int: The maximum amount of health a crew member can have.
"""
return self._crew_health
@property
def crew_moves(self) -> int:
"""int: The number of moves Units with only crew are given each turn.
"""
return self._crew_moves
@property
def crew_range(self) -> float:
"""float: A crew's attack range. Range is circular.
"""
return self._crew_range
@property
def current_player(self) -> 'games.pirates.player.Player':
"""games.pirates.player.Player: The player whose turn it is currently. That player can send commands. Other players cannot.
"""
return self._current_player
@property
def current_turn(self) -> int:
"""int: The current turn number, starting at 0 for the first player's turn.
"""
return self._current_turn
@property
def game_objects(self) -> Dict[str, 'games.pirates.game_object.GameObject']:
"""dict[str, games.pirates.game_object.GameObject]: A mapping of every game object's ID to the actual game object. Primarily used by the server and client to easily refer to the game objects via ID.
"""
return self._game_objects
@property
def heal_factor(self) -> float:
"""float: How much health a Unit recovers when they rest.
"""
return self._heal_factor
@property
def map_height(self) -> int:
"""int: The number of Tiles in the map along the y (vertical) axis.
"""
return self._map_height
@property
def map_width(self) -> int:
"""int: The number of Tiles in the map along the x (horizontal) axis.
"""
return self._map_width
@property
def max_turns(self) -> int:
"""int: The maximum number of turns before the game will automatically end.
"""
return self._max_turns
@property
def merchant_gold_rate(self) -> float:
"""float: How much gold merchant Ports get each turn.
"""
return self._merchant_gold_rate
@property
def merchant_interest_rate(self) -> float:
"""float: When a merchant ship spawns, the amount of additional gold it has relative to the Port's investment.
"""
return self._merchant_interest_rate
@property
def min_interest_distance(self) -> float:
"""float: The Euclidean distance buried gold must be from the Player's Port to accumulate interest.
"""
return self._min_interest_distance
@property
def players(self) -> List['games.pirates.player.Player']:
"""list[games.pirates.player.Player]: List of all the players in the game.
"""
return self._players
@property
def ports(self) -> List['games.pirates.port.Port']:
"""list[games.pirates.port.Port]: Every Port in the game. Merchant ports have owner set to None.
"""
return self._ports
@property
def rest_range(self) -> float:
"""float: How far a Unit can be from a Port to rest. Range is circular.
"""
return self._rest_range
@property
def session(self) -> str:
"""str: A unique identifier for the game instance that is being played.
"""
return self._session
@property
def ship_cost(self) -> int:
"""int: How much gold it costs to construct a ship.
"""
return self._ship_cost
@property
def ship_damage(self) -> int:
"""int: How much damage ships deal to ships and ports.
"""
return self._ship_damage
@property
def ship_health(self) -> int:
"""int: The maximum amount of health a ship can have.
"""
return self._ship_health
@property
def ship_moves(self) -> int:
"""int: The number of moves Units with ships are given each turn.
"""
return self._ship_moves
@property
def ship_range(self) -> float:
"""float: A ship's attack range. Range is circular.
"""
return self._ship_range
@property
def tiles(self) -> List['games.pirates.tile.Tile']:
"""list[games.pirates.tile.Tile]: All the tiles in the map, stored in Row-major order. Use `x + y * mapWidth` to access the correct index.
"""
return self._tiles
@property
def time_added_per_turn(self) -> int:
"""int: The amount of time (in nano-seconds) added after each player performs a turn.
"""
return self._time_added_per_turn
@property
def units(self) -> List['games.pirates.unit.Unit']:
"""list[games.pirates.unit.Unit]: Every Unit in the game. Merchant units have targetPort set to a port.
"""
return self._units
def get_tile_at(self, x: int, y: int) -> Optional['games.pirates.tile.Tile']:
"""Gets the Tile at a specified (x, y) position.
Args:
x (int): An integer between 0 and the map_width.
y (int): An integer between 0 and the map_height.
Returns:
games.pirates.tile.Tile or None: The Tile at (x, y) or None if out of bounds.
"""
if x < 0 or y < 0 or x >= self.map_width or y >= self.map_height:
# out of bounds
return None
return self.tiles[x + y * self.map_width]
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| {
"content_hash": "022014bb5bff6f133c5a9bf3fec07971",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 206,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.5954433497536946,
"repo_name": "JacobFischer/Joueur.py",
"id": "91babe791e091a6b8573d0a632a57f29c56cfe6d",
"size": "8383",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "games/pirates/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "Python",
"bytes": "91770"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
} |
from svtplay_dl.service.aftonbladet import Aftonbladet
from svtplay_dl.service.aftonbladet import Aftonbladettv
from svtplay_dl.service.angelstudios import Angelstudios
from svtplay_dl.service.barnkanalen import Barnkanalen
from svtplay_dl.service.bigbrother import Bigbrother
from svtplay_dl.service.cmore import Cmore
from svtplay_dl.service.disney import Disney
from svtplay_dl.service.dplay import Discoveryplus
from svtplay_dl.service.dr import Dr
from svtplay_dl.service.efn import Efn
from svtplay_dl.service.eurosport import Eurosport
from svtplay_dl.service.expressen import Expressen
from svtplay_dl.service.facebook import Facebook
from svtplay_dl.service.filmarkivet import Filmarkivet
from svtplay_dl.service.flowonline import Flowonline
from svtplay_dl.service.koket import Koket
from svtplay_dl.service.lemonwhale import Lemonwhale
from svtplay_dl.service.mtvnn import Mtvnn
from svtplay_dl.service.mtvservices import Mtvservices
from svtplay_dl.service.nhl import NHL
from svtplay_dl.service.nrk import Nrk
from svtplay_dl.service.oppetarkiv import OppetArkiv
from svtplay_dl.service.picsearch import Picsearch
from svtplay_dl.service.plutotv import Plutotv
from svtplay_dl.service.pokemon import Pokemon
from svtplay_dl.service.radioplay import Radioplay
from svtplay_dl.service.raw import Raw
from svtplay_dl.service.riksdagen import Riksdagen
from svtplay_dl.service.ruv import Ruv
from svtplay_dl.service.solidtango import Solidtango
from svtplay_dl.service.sportlib import Sportlib
from svtplay_dl.service.sr import Sr
from svtplay_dl.service.svt import Svt
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.service.tv4play import Tv4
from svtplay_dl.service.tv4play import Tv4play
from svtplay_dl.service.twitch import Twitch
from svtplay_dl.service.urplay import Urplay
from svtplay_dl.service.vg import Vg
from svtplay_dl.service.viaplay import Viafree
from svtplay_dl.service.viasatsport import Viasatsport
from svtplay_dl.service.vimeo import Vimeo
from svtplay_dl.service.youplay import Youplay
sites = [
Aftonbladet,
Aftonbladettv,
Angelstudios,
Barnkanalen,
Bigbrother,
Cmore,
Disney,
Discoveryplus,
Dr,
Efn,
Eurosport,
Expressen,
Facebook,
Filmarkivet,
Flowonline,
Koket,
Twitch,
Lemonwhale,
Mtvservices,
Mtvnn,
NHL,
Nrk,
Picsearch,
Plutotv,
Pokemon,
Ruv,
Radioplay,
Solidtango,
Sportlib,
Sr,
Svt,
Svtplay,
OppetArkiv,
Tv4,
Tv4play,
Urplay,
Viafree,
Viasatsport,
Vimeo,
Vg,
Youplay,
Riksdagen,
Raw,
]
| {
"content_hash": "4eabff28db9a75e98077458adea43a31",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 56,
"avg_line_length": 29.40449438202247,
"alnum_prop": 0.7833397019487963,
"repo_name": "spaam/svtplay-dl",
"id": "3869c4c008018fb166fbb7a11207b4828d5b8c16",
"size": "2617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/svtplay_dl/service/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "372"
},
{
"name": "Makefile",
"bytes": "2958"
},
{
"name": "Python",
"bytes": "446163"
},
{
"name": "Shell",
"bytes": "2423"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2013 onwards University of Deusto
All rights reserved.
This software is licensed as described in the file COPYING, which
you should have received as part of this distribution.
This software consists of contributions made by many individuals,
listed below:
@author: Aitor Gómez Goiri <aitor.gomez@deusto.es>
'''
from rdflib.plugins.sparql import prepareQuery
from actuation.api.space import Space, AbstractSubscriptionTemplate
from otsopy.dataaccess.store import DataAccess
from actuation.utils.conversors import QueryLanguageConversor
class CoordinationSpace(Space):
GRAPH_LEVEL = 0
SPACE_LEVEL = 1
def __init__(self, space_name):
self._da = DataAccess( defaultSpace = space_name )
self._subscriptions = [] # tuple: (subscription template, callback, level)
self._observers = []
def write(self, triples, ignore_subscriptions = False):
ret = self._da.write(triples)
if not ignore_subscriptions: # only used to shorten the evaluation initialization process
# TODO do it in another thread!
activated = self._get_activated_subscriptions( triples )
if activated:
for ac in activated:
ac.call()
return ret
def _get_activated_subscriptions(self, graph):
ret = []
for template, callback, level in self._subscriptions:
if level == CoordinationSpace.GRAPH_LEVEL:
if template.matches( graph ):
ret.append( callback )
elif level == CoordinationSpace.SPACE_LEVEL:
# over all the space!
if template.matches( self._da.get_space(None).graphs ):
ret.append( callback )
else:
raise Exception( "Level %d does not exist" % level )
return ret
def read_by_wildcard(self, template):
return self._da.read_wildcard( *template )
def read_by_sparql(self, query):
return self._da.read_sparql( query )
def take_by_wildcard(self, template):
return self._da.take_wildcard( *template )
def take_by_sparql(self, query):
return self._da.take_sparql( query )
def take_by_uri(self, graph_uri):
return self._da.take_uri( graph_uri )
def query_by_sparql(self, query):
return self._da.query_sparql( query )
def subscribe(self, template, callback, level = 0 ):
self._subscriptions.append( (template, callback, level) )
# warn to the observers if any
for observer in self._observers:
if level == CoordinationSpace.SPACE_LEVEL: # not necessarily, but to filter in this scenario...
observer.notify_subscription( template )
def add_subscription_observer(self, observer):
self._observers.append( observer )
class SimpleSubscriptionTemplate(AbstractSubscriptionTemplate):
def __init__(self, template):
self.template = template
def __has_next(self, generator):
try:
generator.next()
return True
except:
# no triple in the generator
return False
def matches(self, graph):
t = graph.triples( self.template )
return self.__has_next(t)
# TODO deprecate
class AggregationSubscriptionTemplate(AbstractSubscriptionTemplate):
def __init__(self, templates):
"""
@param templates: A list of SimpleSubscriptionTemplate objects
"""
self.templates = templates
def matches(self, graph):
for t in self.templates:
if not t.matches( graph ):
return False
return True
class SPARQLSubscriptionTemplate(AbstractSubscriptionTemplate):
def __init__(self, query):
"""
@param query: A SPARQL query
"""
# E.g. 'select ?s where { ?person <http://xmlns.com/foaf/0.1/knows> ?s .}'
self.query = prepareQuery( query )
def matches(self, graph):
if not graph.query( self.query ):
return False
return True
class N3QLSubscriptionTemplate(SPARQLSubscriptionTemplate):
def __init__(self, n3ql_query):
"""
@param templates: A N3QL query
"""
# E.g. 'select ?s where { ?person <http://xmlns.com/foaf/0.1/knows> ?s .}'
super(N3QLSubscriptionTemplate, self).__init__( QueryLanguageConversor.n3ql_to_sparql( n3ql_query) ) | {
"content_hash": "6feaeb3aad8eb7b7333f7e37767f119f",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 108,
"avg_line_length": 32.219858156028366,
"alnum_prop": 0.611930442438917,
"repo_name": "gomezgoiri/reusingWebActuatorsFromSemanticSpace",
"id": "939a790e0d87be22696b1fe2053a662c47afaf1c",
"size": "4568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actuation/impl/space/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133730"
},
{
"name": "Shell",
"bytes": "1416"
}
],
"symlink_target": ""
} |
__author__ = 'thorwhalen'
import numpy as np
def idf_log10(num_of_docs_containing_term, num_of_docs):
return np.log10(float(num_of_docs) / np.array(num_of_docs_containing_term))
| {
"content_hash": "aa6f97fe60699591fe6154b870e41453",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 79,
"avg_line_length": 23.25,
"alnum_prop": 0.7043010752688172,
"repo_name": "thorwhalen/ut",
"id": "49aa0dfe7e4f5c342700e4b388236247f5ffc035",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/semantics/math.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
from . import bottle, werkzeug # noqa
| {
"content_hash": "00c7ebe620130d761fa810d647832af7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.717948717948718,
"repo_name": "lucuma/authcode",
"id": "f5e142e1cd5c351a17820189baabbf31bc9fe72a",
"size": "54",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "authcode/wsgi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5028"
},
{
"name": "HTML",
"bytes": "5444"
},
{
"name": "Makefile",
"bytes": "683"
},
{
"name": "Python",
"bytes": "128840"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pytest
@pytest.mark.xfail(reason="Deleting an existing pet always returns a 500")
def test_200_success(petstore):
pets = petstore.pet.findPetsByStatus(status=['available']).result()
if not pets:
pytest.mark.xtail(reason="No pets to delete")
pet_to_delete = pets.pop()
print(pet_to_delete.id)
result = petstore.pet.deletePet(petId=pet_to_delete.id).result()
print(result)
@pytest.mark.xfail(reason="Don't know how to induce a 400")
def test_400_invalid_pet_value(petstore):
result = petstore.pet.deletePet(petId=999).result()
print(result)
| {
"content_hash": "cc8e8532cd6e109be4eab8f547f9626e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 33.05263157894737,
"alnum_prop": 0.7101910828025477,
"repo_name": "vi4m/bravado",
"id": "5037c3b0ca3c210b48a75395e04208405f38f4c2",
"size": "628",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/petstore/pet/deletePet_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "281"
},
{
"name": "Python",
"bytes": "99546"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../..')
import web
from web.contrib.template import render_jinja
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from social.utils import setting_name
from social.apps.webpy_app.utils import strategy, backends
from social.apps.webpy_app import app as social_app
import local_settings
web.config.debug = False
web.config[setting_name('USER_MODEL')] = 'models.User'
web.config[setting_name('AUTHENTICATION_BACKENDS')] = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.twitter.TwitterOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.twilio.TwilioAuth',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.podio.PodioOAuth2',
)
web.config[setting_name('LOGIN_REDIRECT_URL')] = '/done/'
urls = (
'^/$', 'main',
'^/done/$', 'done',
'', social_app.app_social
)
render = render_jinja('templates/')
class main(object):
def GET(self):
return render.home()
class done(social_app.BaseViewClass):
@strategy()
def GET(self):
user = self.get_current_user()
return render.done(user=user, backends=backends(user))
engine = create_engine('sqlite:///test.db', echo=True)
def load_sqla(handler):
web.ctx.orm = scoped_session(sessionmaker(bind=engine))
try:
return handler()
except web.HTTPError:
web.ctx.orm.commit()
raise
except:
web.ctx.orm.rollback()
raise
finally:
web.ctx.orm.commit()
# web.ctx.orm.expunge_all()
Session = sessionmaker(bind=engine)
Session.configure(bind=engine)
app = web.application(urls, locals())
app.add_processor(load_sqla)
session = web.session.Session(app, web.session.DiskStore('sessions'))
web.db_session = Session()
web.web_session = session
if __name__ == "__main__":
app.run()
| {
"content_hash": "84d2451708cfcbaf66985ded3dac7968",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 69,
"avg_line_length": 28.723214285714285,
"alnum_prop": 0.7118433322971712,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "5321190b7c937e1d6e08cafe13f7290f8c1031cd",
"size": "3217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dataset/python/app (2).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
import unittest
import pika
from tavrida import config
class ConnectionConfigTestCase(unittest.TestCase):
def setUp(self):
super(ConnectionConfigTestCase, self).setUp()
self.user = "user"
self.password = "password"
self.credentials = config.Credentials(self.user, self.password)
self.host = "host"
self.config = config.ConnectionConfig(self.host, self.credentials)
def test_to_pika_params_no_client_reconenct(self):
res = self.config.to_pika_params()
self.assertEqual((lambda: isinstance(res,
pika.ConnectionParameters))(),
True)
| {
"content_hash": "7725c9117d8e93fe6167deb1c68652c2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.6194690265486725,
"repo_name": "sbunatyan/tavrida",
"id": "39a47b5c08350d21d64efff2e78d1f4cad914977",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228935"
}
],
"symlink_target": ""
} |
import os
import sys
import psycopg2
import json
from bson import json_util
from pymongo import MongoClient
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
def create_app():
app = Flask(__name__)
return app
app = create_app()
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "e4"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
# Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
POSTGRESDATABASE = "administrator"
POSTGRESUSER = "administrator"
POSTGRESPASS = "h59Ws*Rd2"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=POSTGRESPASS)
QUERIES_FILENAME = '/var/www/flaskr/flaskr/queries'
@app.route("/")
def home():
with open(QUERIES_FILENAME, 'r') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"]) for x in json_file]
return render_template('file.html', results=pairs)
@app.route("/mongo")
def mongo():
query = request.args.get("query")
results = eval('mongodb.'+query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('mongo.html', results=results)
else:
return "ok"
@app.route("/postgres")
def postgres():
query = request.args.get("query")
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
@app.route("/example")
def example():
return render_template('example.html')
if __name__ == "__main__":
app.run()
| {
"content_hash": "28634273b0579fecc033bda1141c6368",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 24.546666666666667,
"alnum_prop": 0.6653992395437263,
"repo_name": "bregonesi/IIC2413-FlaskDB",
"id": "6c384da2657ce197038fbdd2867f0af6a2abc5af",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1049"
},
{
"name": "HTML",
"bytes": "3526"
},
{
"name": "JavaScript",
"bytes": "812"
},
{
"name": "Python",
"bytes": "5622"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, host):
super(LibvirtNetVolumeDriver,
self).__init__(host, is_block_dev=False)
def _get_secret_uuid(self, conf, password=None):
# TODO(mriedem): Add delegation methods to connection (LibvirtDriver)
# to call through for these secret CRUD operations so the volume driver
# doesn't need to know the internal attributes of the connection
# object.
secret = self.host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.host.delete_secret(usage_type, usage_name)
def _set_auth_config_rbd(self, conf, netdisk_properties):
# The rbd volume driver in cinder sets auth_enabled if the rbd_user is
# set in cinder. The rbd auth values from the cinder connection take
# precedence over any local nova config values in case the cinder ceph
# backend is configured differently than the nova rbd ephemeral storage
# configuration.
auth_enabled = netdisk_properties.get('auth_enabled')
if auth_enabled:
conf.auth_username = netdisk_properties['auth_username']
# We started preferring Cinder config for rbd auth values starting
# in Ocata, but if we have a guest connection from before that when
# secret_uuid wasn't configured in Cinder, we need to fallback to
# get it from local nova.conf.
if netdisk_properties['secret_uuid'] is not None:
conf.auth_secret_uuid = netdisk_properties['secret_uuid']
else:
# If we're using the rbd_secret_uuid from nova.conf we need to
# use the rbd_user from nova.conf as well.
LOG.debug('Falling back to Nova configuration for RBD auth '
'secret_uuid and username values.')
conf.auth_username = CONF.libvirt.rbd_user
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
# secret_type is always hard-coded to 'ceph' in cinder
conf.auth_secret_type = netdisk_properties['secret_type']
elif CONF.libvirt.rbd_secret_uuid:
# Anyone relying on falling back to nova config is probably having
# this work accidentally and we'll remove that support in the
# future.
# NOTE(mriedem): We'll have to be extra careful about this in case
# the reason we got here is due to an old volume connection created
# before we started preferring the Cinder settings in Ocata.
LOG.warning('Falling back to Nova configuration values for '
'RBD authentication. Cinder should be configured '
'for auth with Ceph volumes. This fallback will '
'be dropped in a future release.')
# use the nova config values
conf.auth_username = CONF.libvirt.rbd_user
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
# secret_type is always hard-coded to 'ceph' in cinder
conf.auth_secret_type = netdisk_properties['secret_type']
def _set_auth_config_iscsi(self, conf, netdisk_properties):
if netdisk_properties.get('auth_method') == 'CHAP':
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
conf.auth_username = netdisk_properties['auth_username']
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
if conf.source_protocol == 'rbd':
self._set_auth_config_rbd(conf, netdisk_properties)
elif conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.InternalError(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.InternalError(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
self._set_auth_config_iscsi(conf, netdisk_properties)
return conf
def disconnect_volume(self, connection_info, instance):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, instance)
self._delete_secret_by_name(connection_info)
| {
"content_hash": "52b478135594adf71ca90b2cd58b3cdb",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 49.904,
"alnum_prop": 0.6059634498236615,
"repo_name": "mikalstill/nova",
"id": "fdc6842d14597f0d12cfe94a1e726c416eecd66a",
"size": "6811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/volume/net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22797282"
},
{
"name": "Shell",
"bytes": "32969"
},
{
"name": "Smarty",
"bytes": "418399"
}
],
"symlink_target": ""
} |
def main(request, response):
response.headers.set(b"Access-Control-Allow-Origin", b"*")
response.headers.set(b"Access-Control-Max-Age", 0)
response.headers.set(b'Access-Control-Allow-Headers', b"x-test")
if request.method == u"OPTIONS":
if not request.headers.get(b"User-Agent"):
response.content = b"FAIL: User-Agent header missing in preflight request."
response.status = 400
else:
if request.headers.get(b"User-Agent"):
response.content = b"PASS"
else:
response.content = b"FAIL: User-Agent header missing in request"
response.status = 400
| {
"content_hash": "95ec2d8e886e82374e8332384a885f9b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 87,
"avg_line_length": 43.2,
"alnum_prop": 0.6358024691358025,
"repo_name": "chromium/chromium",
"id": "ac6af13afe0fd6d8d44df51998f84100cf6f3414",
"size": "648",
"binary": false,
"copies": "23",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/xhr/resources/header-user-agent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Compatibility module defining operations on duck numpy-arrays.
Currently, this means Dask or NumPy arrays. None of these functions should
accept or return xarray objects.
"""
from __future__ import annotations
import contextlib
import datetime
import inspect
import warnings
from functools import partial
from importlib import import_module
import numpy as np
import pandas as pd
from numpy import all as array_all # noqa
from numpy import any as array_any # noqa
from numpy import zeros_like # noqa
from numpy import around, broadcast_to # noqa
from numpy import concatenate as _concatenate
from numpy import ( # noqa
einsum,
gradient,
isclose,
isin,
isnat,
take,
tensordot,
transpose,
unravel_index,
)
from numpy.lib.stride_tricks import sliding_window_view # noqa
from . import dask_array_ops, dtypes, nputils
from .nputils import nanfirst, nanlast
from .pycompat import array_type, is_duck_dask_array
from .utils import is_duck_array, module_available
dask_available = module_available("dask")
def get_array_namespace(x):
if hasattr(x, "__array_namespace__"):
return x.__array_namespace__()
else:
return np
def _dask_or_eager_func(
name,
eager_module=np,
dask_module="dask.array",
):
"""Create a function that dispatches to dask for dask array inputs."""
def f(*args, **kwargs):
if any(is_duck_dask_array(a) for a in args):
mod = (
import_module(dask_module)
if isinstance(dask_module, str)
else dask_module
)
wrapped = getattr(mod, name)
else:
wrapped = getattr(eager_module, name)
return wrapped(*args, **kwargs)
return f
def fail_on_dask_array_input(values, msg=None, func_name=None):
if is_duck_dask_array(values):
if msg is None:
msg = "%r is not yet a valid method on dask arrays"
if func_name is None:
func_name = inspect.stack()[1][3]
raise NotImplementedError(msg % func_name)
# Requires special-casing because pandas won't automatically dispatch to dask.isnull via NEP-18
pandas_isnull = _dask_or_eager_func("isnull", eager_module=pd, dask_module="dask.array")
# np.around has failing doctests, overwrite it so they pass:
# https://github.com/numpy/numpy/issues/19759
around.__doc__ = str.replace(
around.__doc__ or "",
"array([0., 2.])",
"array([0., 2.])",
)
around.__doc__ = str.replace(
around.__doc__ or "",
"array([0., 2.])",
"array([0., 2.])",
)
around.__doc__ = str.replace(
around.__doc__ or "",
"array([0.4, 1.6])",
"array([0.4, 1.6])",
)
around.__doc__ = str.replace(
around.__doc__ or "",
"array([0., 2., 2., 4., 4.])",
"array([0., 2., 2., 4., 4.])",
)
around.__doc__ = str.replace(
around.__doc__ or "",
(
' .. [2] "How Futile are Mindless Assessments of\n'
' Roundoff in Floating-Point Computation?", William Kahan,\n'
" https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf\n"
),
"",
)
def isnull(data):
data = asarray(data)
scalar_type = data.dtype.type
if issubclass(scalar_type, (np.datetime64, np.timedelta64)):
# datetime types use NaT for null
# note: must check timedelta64 before integers, because currently
# timedelta64 inherits from np.integer
return isnat(data)
elif issubclass(scalar_type, np.inexact):
# float types use NaN for null
xp = get_array_namespace(data)
return xp.isnan(data)
elif issubclass(scalar_type, (np.bool_, np.integer, np.character, np.void)):
# these types cannot represent missing values
return zeros_like(data, dtype=bool)
else:
# at this point, array should have dtype=object
if isinstance(data, np.ndarray):
return pandas_isnull(data)
else:
# Not reachable yet, but intended for use with other duck array
# types. For full consistency with pandas, we should accept None as
# a null value as well as NaN, but it isn't clear how to do this
# with duck typing.
return data != data
def notnull(data):
return ~isnull(data)
# TODO replace with simply np.ma.masked_invalid once numpy/numpy#16022 is fixed
masked_invalid = _dask_or_eager_func(
"masked_invalid", eager_module=np.ma, dask_module="dask.array.ma"
)
def trapz(y, x, axis):
if axis < 0:
axis = y.ndim + axis
x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1)
x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1)
slice1 = (slice(None),) * axis + (slice(1, None),)
slice2 = (slice(None),) * axis + (slice(None, -1),)
dx = x[x_sl1] - x[x_sl2]
integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])
return sum(integrand, axis=axis, skipna=False)
def cumulative_trapezoid(y, x, axis):
if axis < 0:
axis = y.ndim + axis
x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1)
x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1)
slice1 = (slice(None),) * axis + (slice(1, None),)
slice2 = (slice(None),) * axis + (slice(None, -1),)
dx = x[x_sl1] - x[x_sl2]
integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])
# Pad so that 'axis' has same length in result as it did in y
pads = [(1, 0) if i == axis else (0, 0) for i in range(y.ndim)]
integrand = np.pad(integrand, pads, mode="constant", constant_values=0.0)
return cumsum(integrand, axis=axis, skipna=False)
def astype(data, dtype, **kwargs):
if hasattr(data, "__array_namespace__"):
xp = get_array_namespace(data)
return xp.astype(data, dtype, **kwargs)
return data.astype(dtype, **kwargs)
def asarray(data, xp=np):
return data if is_duck_array(data) else xp.asarray(data)
def as_shared_dtype(scalars_or_arrays, xp=np):
"""Cast a arrays to a shared dtype using xarray's type promotion rules."""
if any(isinstance(x, array_type("cupy")) for x in scalars_or_arrays):
import cupy as cp
arrays = [asarray(x, xp=cp) for x in scalars_or_arrays]
else:
arrays = [asarray(x, xp=xp) for x in scalars_or_arrays]
# Pass arrays directly instead of dtypes to result_type so scalars
# get handled properly.
# Note that result_type() safely gets the dtype from dask arrays without
# evaluating them.
out_type = dtypes.result_type(*arrays)
return [astype(x, out_type, copy=False) for x in arrays]
def lazy_array_equiv(arr1, arr2):
"""Like array_equal, but doesn't actually compare values.
Returns True when arr1, arr2 identical or their dask tokens are equal.
Returns False when shapes are not equal.
Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays;
or their dask tokens are not equal
"""
if arr1 is arr2:
return True
arr1 = asarray(arr1)
arr2 = asarray(arr2)
if arr1.shape != arr2.shape:
return False
if dask_available and is_duck_dask_array(arr1) and is_duck_dask_array(arr2):
from dask.base import tokenize
# GH3068, GH4221
if tokenize(arr1) == tokenize(arr2):
return True
else:
return None
return None
def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):
"""Like np.allclose, but also allows values to be NaN in both arrays"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
return bool(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())
else:
return lazy_equiv
def array_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in both arrays"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2))
return bool(flag_array.all())
else:
return lazy_equiv
def array_notnull_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in either or both
arrays
"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2)
return bool(flag_array.all())
else:
return lazy_equiv
def count(data, axis=None):
"""Count the number of non-NA in this array along the given axis or axes"""
return np.sum(np.logical_not(isnull(data)), axis=axis)
def sum_where(data, axis=None, dtype=None, where=None):
xp = get_array_namespace(data)
if where is not None:
a = where_method(xp.zeros_like(data), where, data)
else:
a = data
result = xp.sum(a, axis=axis, dtype=dtype)
return result
def where(condition, x, y):
"""Three argument where() with better dtype promotion rules."""
xp = get_array_namespace(condition)
return xp.where(condition, *as_shared_dtype([x, y], xp=xp))
def where_method(data, cond, other=dtypes.NA):
if other is dtypes.NA:
other = dtypes.get_fill_value(data.dtype)
return where(cond, data, other)
def fillna(data, other):
# we need to pass data first so pint has a chance of returning the
# correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
return where(notnull(data), data, other)
def concatenate(arrays, axis=0):
"""concatenate() with better dtype promotion rules."""
return _concatenate(as_shared_dtype(arrays), axis=axis)
def stack(arrays, axis=0):
"""stack() with better dtype promotion rules."""
xp = get_array_namespace(arrays[0])
return xp.stack(as_shared_dtype(arrays, xp=xp), axis=axis)
def reshape(array, shape):
xp = get_array_namespace(array)
return xp.reshape(array, shape)
@contextlib.contextmanager
def _ignore_warnings_if(condition):
if condition:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield
else:
yield
def _create_nan_agg_method(name, coerce_strings=False, invariant_0d=False):
from . import nanops
def f(values, axis=None, skipna=None, **kwargs):
if kwargs.pop("out", None) is not None:
raise TypeError(f"`out` is not valid for {name}")
# The data is invariant in the case of 0d data, so do not
# change the data (and dtype)
# See https://github.com/pydata/xarray/issues/4885
if invariant_0d and axis == ():
return values
values = asarray(values)
if coerce_strings and values.dtype.kind in "SU":
values = values.astype(object)
func = None
if skipna or (skipna is None and values.dtype.kind in "cfO"):
nanname = "nan" + name
func = getattr(nanops, nanname)
else:
if name in ["sum", "prod"]:
kwargs.pop("min_count", None)
xp = get_array_namespace(values)
func = getattr(xp, name)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "All-NaN slice encountered")
return func(values, axis=axis, **kwargs)
except AttributeError:
if not is_duck_dask_array(values):
raise
try: # dask/dask#3133 dask sometimes needs dtype argument
# if func does not accept dtype, then raises TypeError
return func(values, axis=axis, dtype=values.dtype, **kwargs)
except (AttributeError, TypeError):
raise NotImplementedError(
f"{name} is not yet implemented on dask arrays"
)
f.__name__ = name
return f
# Attributes `numeric_only`, `available_min_count` is used for docs.
# See ops.inject_reduce_methods
argmax = _create_nan_agg_method("argmax", coerce_strings=True)
argmin = _create_nan_agg_method("argmin", coerce_strings=True)
max = _create_nan_agg_method("max", coerce_strings=True, invariant_0d=True)
min = _create_nan_agg_method("min", coerce_strings=True, invariant_0d=True)
sum = _create_nan_agg_method("sum", invariant_0d=True)
sum.numeric_only = True
sum.available_min_count = True
std = _create_nan_agg_method("std")
std.numeric_only = True
var = _create_nan_agg_method("var")
var.numeric_only = True
median = _create_nan_agg_method("median", invariant_0d=True)
median.numeric_only = True
prod = _create_nan_agg_method("prod", invariant_0d=True)
prod.numeric_only = True
prod.available_min_count = True
cumprod_1d = _create_nan_agg_method("cumprod", invariant_0d=True)
cumprod_1d.numeric_only = True
cumsum_1d = _create_nan_agg_method("cumsum", invariant_0d=True)
cumsum_1d.numeric_only = True
_mean = _create_nan_agg_method("mean", invariant_0d=True)
def _datetime_nanmin(array):
"""nanmin() function for datetime64.
Caveats that this function deals with:
- In numpy < 1.18, min() on datetime64 incorrectly ignores NaT
- numpy nanmin() don't work on datetime64 (all versions at the moment of writing)
- dask min() does not work on datetime64 (all versions at the moment of writing)
"""
assert array.dtype.kind in "mM"
dtype = array.dtype
# (NaT).astype(float) does not produce NaN...
array = where(pandas_isnull(array), np.nan, array.astype(float))
array = min(array, skipna=True)
if isinstance(array, float):
array = np.array(array)
# ...but (NaN).astype("M8") does produce NaT
return array.astype(dtype)
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):
"""Convert an array containing datetime-like data to numerical values.
Convert the datetime array to a timedelta relative to an offset.
Parameters
----------
array : array-like
Input data
offset : None, datetime or cftime.datetime
Datetime offset. If None, this is set by default to the array's minimum
value to reduce round off errors.
datetime_unit : {None, Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
If not None, convert output to a given datetime unit. Note that some
conversions are not allowed due to non-linear relationships between units.
dtype : dtype
Output dtype.
Returns
-------
array
Numerical representation of datetime object relative to an offset.
Notes
-----
Some datetime unit conversions won't work, for example from days to years, even
though some calendars would allow for them (e.g. no_leap). This is because there
is no `cftime.timedelta` object.
"""
# Set offset to minimum if not given
if offset is None:
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
else:
offset = min(array)
# Compute timedelta object.
# For np.datetime64, this can silently yield garbage due to overflow.
# One option is to enforce 1970-01-01 as the universal offset.
# This map_blocks call is for backwards compatibility.
# dask == 2021.04.1 does not support subtracting object arrays
# which is required for cftime
if is_duck_dask_array(array) and np.issubdtype(array.dtype, object):
array = array.map_blocks(lambda a, b: a - b, offset, meta=array._meta)
else:
array = array - offset
# Scalar is converted to 0d-array
if not hasattr(array, "dtype"):
array = np.array(array)
# Convert timedelta objects to float by first converting to microseconds.
if array.dtype.kind in "O":
return py_timedelta_to_float(array, datetime_unit or "ns").astype(dtype)
# Convert np.NaT to np.nan
elif array.dtype.kind in "mM":
# Convert to specified timedelta units.
if datetime_unit:
array = array / np.timedelta64(1, datetime_unit)
return np.where(isnull(array), np.nan, array.astype(dtype))
def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
"""Convert a timedelta-like object to numerical values.
Parameters
----------
value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str
Time delta representation.
datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
The time units of the output values. Note that some conversions are not allowed due to
non-linear relationships between units.
dtype : type
The output data type.
"""
import datetime as dt
if isinstance(value, dt.timedelta):
out = py_timedelta_to_float(value, datetime_unit)
elif isinstance(value, np.timedelta64):
out = np_timedelta64_to_float(value, datetime_unit)
elif isinstance(value, pd.Timedelta):
out = pd_timedelta_to_float(value, datetime_unit)
elif isinstance(value, str):
try:
a = pd.to_timedelta(value)
except ValueError:
raise ValueError(
f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta"
)
return py_timedelta_to_float(a, datetime_unit)
else:
raise TypeError(
f"Expected value of type str, pandas.Timedelta, datetime.timedelta "
f"or numpy.timedelta64, but received {type(value).__name__}"
)
return out.astype(dtype)
def _to_pytimedelta(array, unit="us"):
return array.astype(f"timedelta64[{unit}]").astype(datetime.timedelta)
def np_timedelta64_to_float(array, datetime_unit):
"""Convert numpy.timedelta64 to float.
Notes
-----
The array is first converted to microseconds, which is less likely to
cause overflow errors.
"""
array = array.astype("timedelta64[ns]").astype(np.float64)
conversion_factor = np.timedelta64(1, "ns") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def pd_timedelta_to_float(value, datetime_unit):
"""Convert pandas.Timedelta to float.
Notes
-----
Built on the assumption that pandas timedelta values are in nanoseconds,
which is also the numpy default resolution.
"""
value = value.to_timedelta64()
return np_timedelta64_to_float(value, datetime_unit)
def _timedelta_to_seconds(array):
if isinstance(array, datetime.timedelta):
return array.total_seconds() * 1e6
else:
return np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6
def py_timedelta_to_float(array, datetime_unit):
"""Convert a timedelta object to a float, possibly at a loss of resolution."""
array = asarray(array)
if is_duck_dask_array(array):
array = array.map_blocks(
_timedelta_to_seconds, meta=np.array([], dtype=np.float64)
)
else:
array = _timedelta_to_seconds(array)
conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def mean(array, axis=None, skipna=None, **kwargs):
"""inhouse mean that can handle np.datetime64 or cftime.datetime
dtypes"""
from .common import _contains_cftime_datetimes
array = asarray(array)
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
# xarray always uses np.datetime64[ns] for np.datetime64 data
dtype = "timedelta64[ns]"
return (
_mean(
datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs
).astype(dtype)
+ offset
)
elif _contains_cftime_datetimes(array):
offset = min(array)
timedeltas = datetime_to_numeric(array, offset, datetime_unit="us")
mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)
return _to_pytimedelta(mean_timedeltas, unit="us") + offset
else:
return _mean(array, axis=axis, skipna=skipna, **kwargs)
mean.numeric_only = True # type: ignore[attr-defined]
def _nd_cum_func(cum_func, array, axis, **kwargs):
array = asarray(array)
if axis is None:
axis = tuple(range(array.ndim))
if isinstance(axis, int):
axis = (axis,)
out = array
for ax in axis:
out = cum_func(out, axis=ax, **kwargs)
return out
def cumprod(array, axis=None, **kwargs):
"""N-dimensional version of cumprod."""
return _nd_cum_func(cumprod_1d, array, axis, **kwargs)
def cumsum(array, axis=None, **kwargs):
"""N-dimensional version of cumsum."""
return _nd_cum_func(cumsum_1d, array, axis, **kwargs)
_fail_on_dask_array_input_skipna = partial(
fail_on_dask_array_input,
msg="%r with skipna=True is not yet implemented on dask arrays",
)
def first(values, axis, skipna=None):
"""Return the first non-NA elements in this array along the given axis"""
if (skipna or skipna is None) and values.dtype.kind not in "iSU":
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanfirst(values, axis)
return take(values, 0, axis=axis)
def last(values, axis, skipna=None):
"""Return the last non-NA elements in this array along the given axis"""
if (skipna or skipna is None) and values.dtype.kind not in "iSU":
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanlast(values, axis)
return take(values, -1, axis=axis)
def least_squares(lhs, rhs, rcond=None, skipna=False):
"""Return the coefficients and residuals of a least-squares fit."""
if is_duck_dask_array(rhs):
return dask_array_ops.least_squares(lhs, rhs, rcond=rcond, skipna=skipna)
else:
return nputils.least_squares(lhs, rhs, rcond=rcond, skipna=skipna)
def push(array, n, axis):
from bottleneck import push
if is_duck_dask_array(array):
return dask_array_ops.push(array, n, axis)
else:
return push(array, n, axis)
| {
"content_hash": "b38878e29b105fb0a9082647519d1cd7",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 95,
"avg_line_length": 33.3532041728763,
"alnum_prop": 0.6370866845397677,
"repo_name": "jhamman/xarray",
"id": "29ab44bb8ba3878608e14e44134380cf01d52005",
"size": "22380",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "xarray/core/duck_array_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "HTML",
"bytes": "1343"
},
{
"name": "Python",
"bytes": "4753591"
},
{
"name": "Shell",
"bytes": "1262"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from pandas import DataFrame, Series, SparseDataFrame, bdate_range
from pandas.core import nanops
from pandas.core.sparse.api import SparseDtype
from pandas.util import testing as tm
@pytest.fixture
def dates():
return bdate_range('1/1/2011', periods=10)
@pytest.fixture
def empty():
return SparseDataFrame()
@pytest.fixture
def frame(dates):
data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
return SparseDataFrame(data, index=dates)
@pytest.fixture
def fill_frame(frame):
values = frame.values.copy()
values[np.isnan(values)] = 2
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=frame.index)
def test_apply(frame):
applied = frame.apply(np.sqrt)
assert isinstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
# agg / broadcast
with tm.assert_produces_warning(FutureWarning):
broadcasted = frame.apply(np.sum, broadcast=True)
assert isinstance(broadcasted, SparseDataFrame)
with tm.assert_produces_warning(FutureWarning):
exp = frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
applied = frame.apply(np.sum)
tm.assert_series_equal(applied,
frame.to_dense().apply(nanops.nansum).to_sparse())
def test_apply_fill(fill_frame):
applied = fill_frame.apply(np.sqrt)
assert applied['A'].fill_value == np.sqrt(2)
def test_apply_empty(empty):
assert empty.apply(np.sqrt) is empty
def test_apply_nonuq():
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
sparse = orig.to_sparse()
res = sparse.apply(lambda s: s[0], axis=1)
exp = orig.apply(lambda s: s[0], axis=1)
# dtype must be kept
assert res.dtype == SparseDtype(np.int64)
# ToDo: apply must return subclassed dtype
assert isinstance(res, Series)
tm.assert_series_equal(res.to_dense(), exp)
# df.T breaks
sparse = orig.T.to_sparse()
res = sparse.apply(lambda s: s[0], axis=0) # noqa
exp = orig.T.apply(lambda s: s[0], axis=0)
# TODO: no non-unique columns supported in sparse yet
# tm.assert_series_equal(res.to_dense(), exp)
def test_applymap(frame):
# just test that it works
result = frame.applymap(lambda x: x * 2)
assert isinstance(result, SparseDataFrame)
def test_apply_keep_sparse_dtype():
# GH 23744
sdf = SparseDataFrame(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 1]]),
columns=['b', 'a', 'c'], default_fill_value=1)
df = DataFrame(sdf)
expected = sdf.apply(np.exp)
result = df.apply(np.exp)
tm.assert_frame_equal(expected, result)
| {
"content_hash": "ff2c247116178c872dd40ef04499e422",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 28.742857142857144,
"alnum_prop": 0.621272365805169,
"repo_name": "MJuddBooth/pandas",
"id": "b5ea0a5c90e1ab6cdf56b45aa9392a93149e3433",
"size": "3018",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/sparse/frame/test_apply.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
"""
Created on 28 Mar 2014
@author: Max Demian
"""
# Ideally, we would do this with collections.Counter or just str.count but here
# are some ways to use data structures to count letters in a string.
#==============================================================================
# Dicts
#==============================================================================
# Using setdefault to count letters.
def letter_frequency(sentence):
frequencies = {}
for letter in sentence:
frequency = frequencies.setdefault(letter, 0)
frequencies[letter] += 1
return frequencies
# We could instead use the collections module.
# Int as constructor works because it returns 0 if called without
# arguments.
from collections import defaultdict
def letter_freq(sentence):
frequencies = defaultdict(int)
for letter in sentence:
frequencies[letter] += 1
return frequencies
print letter_frequency("banana")
print letter_freq("banana")
#==============================================================================
# Lists.& Tuples
#==============================================================================
import string
CHARS = list(string.ascii_letters) + [" "]
def letter_fre(sentence):
# Create our base list with tuple(letter, 0) as items.
frequencies = [(c, 0) for c in CHARS]
for letter in sentence:
index = CHARS.index(letter)
frequencies[index] = (letter, frequencies[index][1] + 1)
return [i for i in frequencies if i[1] > 0]
print letter_fre("the quick brown fox jumps over the lazy dog")
class WeirdSortee(object):
def __init__(self, string, number, sort_num):
self.string = string
self.number = number
self.sort_num = sort_num
def __lt__(self, object):
if self.sort_num:
return self.number < object.number
return self.string < object.string
def __repr__(self):
return "{}:{}".format(self.string, self.number)
a = WeirdSortee("a", 4, True)
b = WeirdSortee("b", 3, True)
c = WeirdSortee("c", 2, True)
d = WeirdSortee("d", 1, True)
L = [a, b, c, d]
L.sort()
print L
for i in L:
i.sort_num = False
L.sort()
print L
# Sorting tuple pairs by either the first or second value.
x = [(1, "c"), (2, "a"), (3, "b")]
print sorted(x)
# Return the index 1 instead of 0 as sort argument/key.
x.sort(key=lambda i: i[1])
print x
#==============================================================================
# Sets
#==============================================================================
my_artists = {"Sarah Brightman", "Guns N' Roses", "Opeth", "Vixy and Tony"}
auburns_artists = {"Nickelback", "Guns N' Roses", "Savage Garden"}
song_library = [("Phantom Of The Opera", "Sarah Brightman"),
("Knocking On Heaven's Door", "Guns N' Roses"),
("Captain Nemo", "Sarah Brightman"),
("Patterns In The Ivy", "Opeth"),
("November Rain", "Guns N' Roses"),
("Beautiful", "Sarah Brightman"),
("Mal's Song", "Vixy and Tony")]
artists = set()
for song, artist in song_library:
artists.add(artist)
print(artists)
{'key': 'value', 'key2': 'value2'} # Dict
{'key', 'value', 'key2', 'value2'} # Set
print "Opiates" in artists # yeah, right
# Get the common item (intersection) from sets with "&".
set1 = {1, 2, 3}
set2 = {3, 4, 5}
set3 = {0, 3, 6}
print set1 & set2 & set3
# The three major uses of sets: intersection, union and difference.
print("Both: {}".format(auburns_artists.intersection(my_artists)))
print("All: {}".format(my_artists.union(auburns_artists)))
print("Either but not both: {}".format(
my_artists.symmetric_difference(auburns_artists)))
# Subsets and supersets:
my_artists = {"Sarah Brightman", "Guns N' Roses",
"Opeth", "Vixy and Tony"}
bands = {"Guns N' Roses", "Opeth"}
print
print("my_artists is to bands:")
print("issuperset: {}".format(my_artists.issuperset(bands)))
print("issubset: {}".format(my_artists.issubset(bands)))
print("difference: {}".format(my_artists.difference(bands)))
print("*"*20)
print("bands is to my_artists:")
print("issuperset: {}".format(bands.issuperset(my_artists)))
print("issubset: {}".format(bands.issubset(my_artists)))
print("difference: {}".format(bands.difference(my_artists)))
# Extending the dictionary type to remember the order the keys are added in.
from collections import KeysView, ItemsView, ValuesView
class DictSorted(dict):
def __new__(*args, **kwargs):
new_dict = dict.__new__(*args, **kwargs)
new_dict.ordered_keys = []
return new_dict
def __setitem__(self, key, value):
'''self[key] = value syntax'''
if key not in self.ordered_keys:
self.ordered_keys.append(key)
super(DictSorted, self).__setitem__(key, value)
def setdefault(self, key, value):
if key not in self.ordered_keys:
self.ordered_keys.append(key)
return super(DictSorted, self).setdefault(key, value)
def keys(self):
return KeysView(self)
def values(self):
return ValuesView(self)
def items(self):
return ItemsView(self)
def __iter__(self):
'''for x in self syntax'''
return self.ordered_keys.__iter__()
ds = DictSorted()
d = {}
ds["a"] = 1
ds["b"] = 2
ds.setdefault("c", 3)
d["a"] = 1
d["b"] = 2
d.setdefault("c", 3)
for k, v in ds.items():
print k, v
for k, v in d.items():
print k, v
# Note: collections actually has this functionality with OrderedDict (along with
# other interesting data structures like namedtuple).
| {
"content_hash": "89209061f0aab20758ac8f3d537b2ff3",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 80,
"avg_line_length": 31.410112359550563,
"alnum_prop": 0.5845108209622608,
"repo_name": "mikar/60-days-of-python",
"id": "255ef6754b36d34d1658629fa13309ea0d1ae0c5",
"size": "5591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oop/datastructs/various.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10104"
},
{
"name": "Python",
"bytes": "349650"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
import copy
import datetime
import hashlib
import os
class Struct(dict):
"""Like dict but does not whine for non-existent attributes"""
def __getattr__(self, name):
if name == 'id':
name = '_id'
try:
return self[name]
except KeyError:
return None
def __setattr__(self, name, value):
if name == 'id':
name = '_id'
self[name] = value
def md5(s):
return hashlib.md5(s).hexdigest()
def generate_token(length=32):
return os.urandom(length).encode('hex')
# Datetime utils
def daterange(start_date, end_date):
for n in range((end_date - start_date).days + 1):
yield start_date + datetime.timedelta(n)
def hours(date):
for n in range(0, 24):
yield date + datetime.timedelta(hours=n)
def start_of_day(date):
return datetime.datetime(*date.timetuple()[0:3])
def start_of_tomorrow(date):
return start_of_day(date + datetime.timedelta(days=1))
def start_of_hour(date):
return datetime.datetime(*date.timetuple()[0:4])
def start_of_month(date):
return datetime.datetime(*list(date.timetuple()[:2]) + [1])
def last_day_of_month(date):
date = start_of_month(date)
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month+1, day=1) - datetime.timedelta(days=1)
def start_of_next_month(date):
return last_day_of_month(date) + datetime.timedelta(days=1)
# Copied from django with some modifications
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
| {
"content_hash": "a06b5301a66fbfd2cf04fff118b2924c",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 97,
"avg_line_length": 28.004444444444445,
"alnum_prop": 0.6246627519441359,
"repo_name": "haldun/tornado-mongodb-template",
"id": "69dae9e7d4a9724aa8b44194a26e43a6d8905ab7",
"size": "6301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10420"
}
],
"symlink_target": ""
} |
from django.contrib.gis.db.models import PointField
from location_field.forms import spatial as forms
from location_field.models.base import BaseLocationField
class LocationField(BaseLocationField, PointField):
formfield_class = forms.LocationField
def __init__(self, *args, **kwargs):
super(LocationField, self).__init__(*args, **kwargs)
kwargs.pop('based_fields', None)
kwargs.pop('zoom', None)
kwargs.pop('suffix', None)
PointField.__init__(self, *args, **kwargs)
| {
"content_hash": "5976d5b14ca88ace5860dff39fad2b8f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.6871401151631478,
"repo_name": "voodmania/django-location-field",
"id": "451a35f806aa93b8b0f77fd05c89b214cd8a3e34",
"size": "521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "location_field/models/spatial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10373"
},
{
"name": "HTML",
"bytes": "168"
},
{
"name": "JavaScript",
"bytes": "36268"
},
{
"name": "Python",
"bytes": "12811"
}
],
"symlink_target": ""
} |
"""Supporting definitions for the Python regression tests."""
# if __name__ != 'test.test_support':
# raise ImportError('test_support must be imported from the test package')
import contextlib
# import errno
# import functools
# import gc
# import socket
import sys
import os
# import platform
# import shutil
import warnings
import unittest
# import importlib
import UserDict
# import re
# import time
# import struct
# import sysconfig
try:
import thread
except ImportError:
thread = None
__all__ = [
"Error", "TestFailed", "have_unicode", "BasicTestRunner", "run_unittest",
"check_warnings", "check_py3k_warnings", "CleanImport",
"EnvironmentVarGuard"
]
# __all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
# "verbose", "use_resources", "max_memuse", "record_original_stdout",
# "get_original_stdout", "unload", "unlink", "rmtree", "forget",
# "is_resource_enabled", "requires", "requires_mac_ver",
# "find_unused_port", "bind_port",
# "fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
# "SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
# "open_urlresource", "check_warnings", "check_py3k_warnings",
# "CleanImport", "EnvironmentVarGuard", "captured_output",
# "captured_stdout", "TransientResource", "transient_internet",
# "run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
# "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
# "threading_cleanup", "reap_threads", "start_threads", "cpython_only",
# "check_impl_detail", "get_attribute", "py3k_bytes",
# "import_fresh_module", "threading_cleanup", "reap_children",
# "strip_python_stderr", "IPV6_ENABLED", "run_with_tz"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
# class ResourceDenied(unittest.SkipTest):
# """Test skipped because it requested a disallowed resource.
# This is raised when a test calls requires() for a resource that
# has not been enabled. It is used to distinguish between expected
# and unexpected skips.
# """
# @contextlib.contextmanager
# def _ignore_deprecated_imports(ignore=True):
# """Context manager to suppress package and module deprecation
# warnings when importing them.
# If ignore is False, this context manager has no effect."""
# if ignore:
# with warnings.catch_warnings():
# warnings.filterwarnings("ignore", ".+ (module|package)",
# DeprecationWarning)
# yield
# else:
# yield
# def import_module(name, deprecated=False):
# """Import and return the module to be tested, raising SkipTest if
# it is not available.
# If deprecated is True, any module or package deprecation messages
# will be suppressed."""
# with _ignore_deprecated_imports(deprecated):
# try:
# return importlib.import_module(name)
# except ImportError, msg:
# raise unittest.SkipTest(str(msg))
# def _save_and_remove_module(name, orig_modules):
# """Helper function to save and remove a module from sys.modules
# Raise ImportError if the module can't be imported."""
# # try to import the module and raise an error if it can't be imported
# if name not in sys.modules:
# __import__(name)
# del sys.modules[name]
# for modname in list(sys.modules):
# if modname == name or modname.startswith(name + '.'):
# orig_modules[modname] = sys.modules[modname]
# del sys.modules[modname]
# def _save_and_block_module(name, orig_modules):
# """Helper function to save and block a module in sys.modules
# Return True if the module was in sys.modules, False otherwise."""
# saved = True
# try:
# orig_modules[name] = sys.modules[name]
# except KeyError:
# saved = False
# sys.modules[name] = None
# return saved
# def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
# """Imports and returns a module, deliberately bypassing the sys.modules cache
# and importing a fresh copy of the module. Once the import is complete,
# the sys.modules cache is restored to its original state.
# Modules named in fresh are also imported anew if needed by the import.
# If one of these modules can't be imported, None is returned.
# Importing of modules named in blocked is prevented while the fresh import
# takes place.
# If deprecated is True, any module or package deprecation messages
# will be suppressed."""
# # NOTE: test_heapq, test_json, and test_warnings include extra sanity
# # checks to make sure that this utility function is working as expected
# with _ignore_deprecated_imports(deprecated):
# # Keep track of modules saved for later restoration as well
# # as those which just need a blocking entry removed
# orig_modules = {}
# names_to_remove = []
# _save_and_remove_module(name, orig_modules)
# try:
# for fresh_name in fresh:
# _save_and_remove_module(fresh_name, orig_modules)
# for blocked_name in blocked:
# if not _save_and_block_module(blocked_name, orig_modules):
# names_to_remove.append(blocked_name)
# fresh_module = importlib.import_module(name)
# except ImportError:
# fresh_module = None
# finally:
# for orig_name, module in orig_modules.items():
# sys.modules[orig_name] = module
# for name_to_remove in names_to_remove:
# del sys.modules[name_to_remove]
# return fresh_module
# def get_attribute(obj, name):
# """Get an attribute, raising SkipTest if AttributeError is raised."""
# try:
# attribute = getattr(obj, name)
# except AttributeError:
# raise unittest.SkipTest("module %s has no attribute %s" % (
# obj.__name__, name))
# else:
# return attribute
verbose = 1 # Flag set to 0 by regrtest.py
# use_resources = None # Flag set to [] by regrtest.py
# max_memuse = 0 # Disable bigmem tests (they will still be run with
# # small sizes, to make sure they work.)
# real_max_memuse = 0
# # _original_stdout is meant to hold stdout at the time regrtest began.
# # This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# # The point is to have some flavor of stdout the user can actually see.
# _original_stdout = None
# def record_original_stdout(stdout):
# global _original_stdout
# _original_stdout = stdout
# def get_original_stdout():
# return _original_stdout or sys.stdout
# def unload(name):
# try:
# del sys.modules[name]
# except KeyError:
# pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
# _rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
# def rmdir(dirname):
# try:
# _rmdir(dirname)
# except OSError as error:
# # The directory need not exist.
# if error.errno != errno.ENOENT:
# raise
# def rmtree(path):
# try:
# _rmtree(path)
# except OSError, e:
# # Unix returns ENOENT, Windows returns ESRCH.
# if e.errno not in (errno.ENOENT, errno.ESRCH):
# raise
# def forget(modname):
# '''"Forget" a module was ever imported by removing it from sys.modules and
# deleting any .pyc and .pyo files.'''
# unload(modname)
# for dirname in sys.path:
# unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# # Deleting the .pyo file cannot be within the 'try' for the .pyc since
# # the chance exists that there is no .pyc (and thus the 'try' statement
# # is exited) but there is a .pyo file.
# unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
# # Check whether a gui is actually available
# def _is_gui_available():
# if hasattr(_is_gui_available, 'result'):
# return _is_gui_available.result
# reason = None
# if sys.platform.startswith('win'):
# # if Python is running as a service (such as the buildbot service),
# # gui interaction may be disallowed
# import ctypes
# import ctypes.wintypes
# UOI_FLAGS = 1
# WSF_VISIBLE = 0x0001
# class USEROBJECTFLAGS(ctypes.Structure):
# _fields_ = [("fInherit", ctypes.wintypes.BOOL),
# ("fReserved", ctypes.wintypes.BOOL),
# ("dwFlags", ctypes.wintypes.DWORD)]
# dll = ctypes.windll.user32
# h = dll.GetProcessWindowStation()
# if not h:
# raise ctypes.WinError()
# uof = USEROBJECTFLAGS()
# needed = ctypes.wintypes.DWORD()
# res = dll.GetUserObjectInformationW(h,
# UOI_FLAGS,
# ctypes.byref(uof),
# ctypes.sizeof(uof),
# ctypes.byref(needed))
# if not res:
# raise ctypes.WinError()
# if not bool(uof.dwFlags & WSF_VISIBLE):
# reason = "gui not available (WSF_VISIBLE flag not set)"
# elif sys.platform == 'darwin':
# # The Aqua Tk implementations on OS X can abort the process if
# # being called in an environment where a window server connection
# # cannot be made, for instance when invoked by a buildbot or ssh
# # process not running under the same user id as the current console
# # user. To avoid that, raise an exception if the window manager
# # connection is not available.
# from ctypes import cdll, c_int, pointer, Structure
# from ctypes.util import find_library
# app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
# if app_services.CGMainDisplayID() == 0:
# reason = "gui tests cannot run without OS X window manager"
# else:
# class ProcessSerialNumber(Structure):
# _fields_ = [("highLongOfPSN", c_int),
# ("lowLongOfPSN", c_int)]
# psn = ProcessSerialNumber()
# psn_p = pointer(psn)
# if ( (app_services.GetCurrentProcess(psn_p) < 0) or
# (app_services.SetFrontProcess(psn_p) < 0) ):
# reason = "cannot run without OS X gui process"
# # check on every platform whether tkinter can actually do anything
# if not reason:
# try:
# from Tkinter import Tk
# root = Tk()
# root.update()
# root.destroy()
# except Exception as e:
# err_string = str(e)
# if len(err_string) > 50:
# err_string = err_string[:50] + ' [...]'
# reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
# err_string)
# _is_gui_available.reason = reason
# _is_gui_available.result = not reason
# return _is_gui_available.result
# def is_resource_enabled(resource):
# """Test whether a resource is enabled.
# Known resources are set by regrtest.py. If not running under regrtest.py,
# all resources are assumed enabled unless use_resources has been set.
# """
# return use_resources is None or resource in use_resources
# def requires(resource, msg=None):
# """Raise ResourceDenied if the specified resource is not available."""
# if resource == 'gui' and not _is_gui_available():
# raise ResourceDenied(_is_gui_available.reason)
# if not is_resource_enabled(resource):
# if msg is None:
# msg = "Use of the `%s' resource not enabled" % resource
# raise ResourceDenied(msg)
# def requires_mac_ver(*min_version):
# """Decorator raising SkipTest if the OS is Mac OS X and the OS X
# version if less than min_version.
# For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
# is lesser than 10.5.
# """
# def decorator(func):
# @functools.wraps(func)
# def wrapper(*args, **kw):
# if sys.platform == 'darwin':
# version_txt = platform.mac_ver()[0]
# try:
# version = tuple(map(int, version_txt.split('.')))
# except ValueError:
# pass
# else:
# if version < min_version:
# min_version_txt = '.'.join(map(str, min_version))
# raise unittest.SkipTest(
# "Mac OS X %s or higher required, not %s"
# % (min_version_txt, version_txt))
# return func(*args, **kw)
# wrapper.min_version = min_version
# return wrapper
# return decorator
# # Don't use "localhost", since resolving it uses the DNS under recent
# # Windows versions (see issue #18792).
# HOST = "127.0.0.1"
# HOSTv6 = "::1"
# def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
# """Returns an unused port that should be suitable for binding. This is
# achieved by creating a temporary socket with the same family and type as
# the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
# the specified host address (defaults to 0.0.0.0) with the port set to 0,
# eliciting an unused ephemeral port from the OS. The temporary socket is
# then closed and deleted, and the ephemeral port is returned.
# Either this method or bind_port() should be used for any tests where a
# server socket needs to be bound to a particular port for the duration of
# the test. Which one to use depends on whether the calling code is creating
# a python socket, or if an unused port needs to be provided in a constructor
# or passed to an external program (i.e. the -accept argument to openssl's
# s_server mode). Always prefer bind_port() over find_unused_port() where
# possible. Hard coded ports should *NEVER* be used. As soon as a server
# socket is bound to a hard coded port, the ability to run multiple instances
# of the test simultaneously on the same host is compromised, which makes the
# test a ticking time bomb in a buildbot environment. On Unix buildbots, this
# may simply manifest as a failed test, which can be recovered from without
# intervention in most cases, but on Windows, the entire python process can
# completely and utterly wedge, requiring someone to log in to the buildbot
# and manually kill the affected process.
# (This is easy to reproduce on Windows, unfortunately, and can be traced to
# the SO_REUSEADDR socket option having different semantics on Windows versus
# Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
# listen and then accept connections on identical host/ports. An EADDRINUSE
# socket.error will be raised at some point (depending on the platform and
# the order bind and listen were called on each socket).
# However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
# will ever be raised when attempting to bind two identical host/ports. When
# accept() is called on each socket, the second caller's process will steal
# the port from the first caller, leaving them both in an awkwardly wedged
# state where they'll no longer respond to any signals or graceful kills, and
# must be forcibly killed via OpenProcess()/TerminateProcess().
# The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
# instead of SO_REUSEADDR, which effectively affords the same semantics as
# SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
# Source world compared to Windows ones, this is a common mistake. A quick
# look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
# openssl.exe is called with the 's_server' option, for example. See
# http://bugs.python.org/issue2550 for more info. The following site also
# has a very thorough description about the implications of both REUSEADDR
# and EXCLUSIVEADDRUSE on Windows:
# http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
# XXX: although this approach is a vast improvement on previous attempts to
# elicit unused ports, it rests heavily on the assumption that the ephemeral
# port returned to us by the OS won't immediately be dished back out to some
# other process when we close and delete our temporary socket but before our
# calling code has a chance to bind the returned port. We can deal with this
# issue if/when we come across it."""
# tempsock = socket.socket(family, socktype)
# port = bind_port(tempsock)
# tempsock.close()
# del tempsock
# return port
# def bind_port(sock, host=HOST):
# """Bind the socket to a free port and return the port number. Relies on
# ephemeral ports in order to ensure we are using an unbound port. This is
# important as many tests may be running simultaneously, especially in a
# buildbot environment. This method raises an exception if the sock.family
# is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
# or SO_REUSEPORT set on it. Tests should *never* set these socket options
# for TCP/IP sockets. The only case for setting these options is testing
# multicasting via multiple UDP sockets.
# Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
# on Windows), it will be set on the socket. This will prevent anyone else
# from bind()'ing to our host/port for the duration of the test.
# """
# if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
# if hasattr(socket, 'SO_REUSEADDR'):
# if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
# raise TestFailed("tests should never set the SO_REUSEADDR " \
# "socket option on TCP/IP sockets!")
# if hasattr(socket, 'SO_REUSEPORT'):
# try:
# if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
# raise TestFailed("tests should never set the SO_REUSEPORT " \
# "socket option on TCP/IP sockets!")
# except EnvironmentError:
# # Python's socket module was compiled using modern headers
# # thus defining SO_REUSEPORT but this process is running
# # under an older kernel that does not support SO_REUSEPORT.
# pass
# if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
# sock.bind((host, 0))
# port = sock.getsockname()[1]
# return port
# def _is_ipv6_enabled():
# """Check whether IPv6 is enabled on this host."""
# if socket.has_ipv6:
# sock = None
# try:
# sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
# sock.bind((HOSTv6, 0))
# return True
# except socket.error:
# pass
# finally:
# if sock:
# sock.close()
# return False
# IPV6_ENABLED = _is_ipv6_enabled()
# def system_must_validate_cert(f):
# """Skip the test on TLS certificate validation failures."""
# @functools.wraps(f)
# def dec(*args, **kwargs):
# try:
# f(*args, **kwargs)
# except IOError as e:
# if "CERTIFICATE_VERIFY_FAILED" in str(e):
# raise unittest.SkipTest("system does not contain "
# "necessary certificates")
# raise
# return dec
# FUZZ = 1e-6
# def fcmp(x, y): # fuzzy comparison function
# if isinstance(x, float) or isinstance(y, float):
# try:
# fuzz = (abs(x) + abs(y)) * FUZZ
# if abs(x-y) <= fuzz:
# return 0
# except:
# pass
# elif type(x) == type(y) and isinstance(x, (tuple, list)):
# for i in range(min(len(x), len(y))):
# outcome = fcmp(x[i], y[i])
# if outcome != 0:
# return outcome
# return (len(x) > len(y)) - (len(x) < len(y))
# return (x > y) - (x < y)
# # A constant likely larger than the underlying OS pipe buffer size, to
# # make writes blocking.
# # Windows limit seems to be around 512 B, and many Unix kernels have a
# # 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# # (see issue #17835 for a discussion of this number).
# PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# # A constant likely larger than the underlying OS socket buffer size, to make
# # writes blocking.
# # The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# # on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# # for a discussion of this number).
# SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# is_jython = sys.platform.startswith('java')
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
requires_unicode = unittest.skipUnless(have_unicode, 'no unicode support')
# def u(s):
# return unicode(s, 'unicode-escape')
# FS_NONASCII: non-ASCII Unicode character encodable by
# sys.getfilesystemencoding(), or None if there is no such character.
FS_NONASCII = None
# if have_unicode:
# for character in (
# # First try printable and common characters to have a readable filename.
# # For each character, the encoding list are just example of encodings able
# # to encode the character (the list is not exhaustive).
# # U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
# unichr(0x00E6),
# # U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
# unichr(0x0130),
# # U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
# unichr(0x0141),
# # U+03C6 (Greek Small Letter Phi): cp1253
# unichr(0x03C6),
# # U+041A (Cyrillic Capital Letter Ka): cp1251
# unichr(0x041A),
# # U+05D0 (Hebrew Letter Alef): Encodable to cp424
# unichr(0x05D0),
# # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
# unichr(0x060C),
# # U+062A (Arabic Letter Teh): cp720
# unichr(0x062A),
# # U+0E01 (Thai Character Ko Kai): cp874
# unichr(0x0E01),
# # Then try more "special" characters. "special" because they may be
# # interpreted or displayed differently depending on the exact locale
# # encoding and the font.
# # U+00A0 (No-Break Space)
# unichr(0x00A0),
# # U+20AC (Euro Sign)
# unichr(0x20AC),
# ):
# try:
# character.encode(sys.getfilesystemencoding())\
# .decode(sys.getfilesystemencoding())
# except UnicodeError:
# pass
# else:
# FS_NONASCII = character
# break
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# # Unicode name only used if TEST_FN_ENCODING exists for the platform.
# if have_unicode:
# # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# # TESTFN_UNICODE is a filename that can be encoded using the
# # file system encoding, but *not* with the default (ascii) encoding
# if isinstance('', unicode):
# # python -U
# # XXX perhaps unicode() should accept Unicode strings?
# TESTFN_UNICODE = "@test-\xe0\xf2"
# else:
# # 2 latin characters.
# TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
# TESTFN_ENCODING = sys.getfilesystemencoding()
# # TESTFN_UNENCODABLE is a filename that should *not* be
# # able to be encoded by *either* the default or filesystem encoding.
# # This test really only makes sense on Windows NT platforms
# # which have special Unicode support in posixmodule.
# if (not hasattr(sys, "getwindowsversion") or
# sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
# TESTFN_UNENCODABLE = None
# else:
# # Japanese characters (I think - from bug 846133)
# TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
# try:
# # XXX - Note - should be using TESTFN_ENCODING here - but for
# # Windows, "mbcs" currently always operates as if in
# # errors=ignore' mode - hence we get '?' characters rather than
# # the exception. 'Latin1' operates as we expect - ie, fails.
# # See [ 850997 ] mbcs encoding ignores errors
# TESTFN_UNENCODABLE.encode("Latin1")
# except UnicodeEncodeError:
# pass
# else:
# print \
# 'WARNING: The filename %r CAN be encoded by the filesystem. ' \
# 'Unicode filename tests may not be effective' \
# % TESTFN_UNENCODABLE
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "%s_%s_tmp" % (TESTFN, os.getpid())
# # Save the initial cwd
# SAVEDCWD = os.getcwd()
# @contextlib.contextmanager
# def change_cwd(path, quiet=False):
# """Return a context manager that changes the current working directory.
# Arguments:
# path: the directory to use as the temporary current working directory.
# quiet: if False (the default), the context manager raises an exception
# on error. Otherwise, it issues only a warning and keeps the current
# working directory the same.
# """
# saved_dir = os.getcwd()
# try:
# os.chdir(path)
# except OSError:
# if not quiet:
# raise
# warnings.warn('tests may fail, unable to change CWD to: ' + path,
# RuntimeWarning, stacklevel=3)
# try:
# yield os.getcwd()
# finally:
# os.chdir(saved_dir)
# @contextlib.contextmanager
# def temp_cwd(name='tempcwd', quiet=False):
# """
# Context manager that creates a temporary directory and set it as CWD.
# The new CWD is created in the current directory and it's named *name*.
# If *quiet* is False (default) and it's not possible to create or change
# the CWD, an error is raised. If it's True, only a warning is raised
# and the original CWD is used.
# """
# if (have_unicode and isinstance(name, unicode) and
# not os.path.supports_unicode_filenames):
# try:
# name = name.encode(sys.getfilesystemencoding() or 'ascii')
# except UnicodeEncodeError:
# if not quiet:
# raise unittest.SkipTest('unable to encode the cwd name with '
# 'the filesystem encoding.')
# saved_dir = os.getcwd()
# is_temporary = False
# try:
# os.mkdir(name)
# os.chdir(name)
# is_temporary = True
# except OSError:
# if not quiet:
# raise
# warnings.warn('tests may fail, unable to change the CWD to ' + name,
# RuntimeWarning, stacklevel=3)
# try:
# yield os.getcwd()
# finally:
# os.chdir(saved_dir)
# if is_temporary:
# rmtree(name)
# def findfile(file, here=__file__, subdir=None):
# """Try to find a file on sys.path and the working directory. If it is not
# found the argument passed to the function is returned (this does not
# necessarily signal failure; could still be the legitimate path)."""
# if os.path.isabs(file):
# return file
# if subdir is not None:
# file = os.path.join(subdir, file)
# path = sys.path
# path = [os.path.dirname(here)] + path
# for dn in path:
# fn = os.path.join(dn, file)
# if os.path.exists(fn): return fn
# return file
# def sortdict(dict):
# "Like repr(dict), but in sorted order."
# items = dict.items()
# items.sort()
# reprpairs = ["%r: %r" % pair for pair in items]
# withcommas = ", ".join(reprpairs)
# return "{%s}" % withcommas
# def make_bad_fd():
# """
# Create an invalid file descriptor by opening and closing a file and return
# its fd.
# """
# file = open(TESTFN, "wb")
# try:
# return file.fileno()
# finally:
# file.close()
# unlink(TESTFN)
# def check_syntax_error(testcase, statement):
# testcase.assertRaises(SyntaxError, compile, statement,
# '<test string>', 'exec')
# def open_urlresource(url, check=None):
# import urlparse, urllib2
# filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
# fn = os.path.join(os.path.dirname(__file__), "data", filename)
# def check_valid_file(fn):
# f = open(fn)
# if check is None:
# return f
# elif check(f):
# f.seek(0)
# return f
# f.close()
# if os.path.exists(fn):
# f = check_valid_file(fn)
# if f is not None:
# return f
# unlink(fn)
# # Verify the requirement before downloading the file
# requires('urlfetch')
# print >> get_original_stdout(), '\tfetching %s ...' % url
# f = urllib2.urlopen(url, timeout=15)
# try:
# with open(fn, "wb") as out:
# s = f.read()
# while s:
# out.write(s)
# s = f.read()
# finally:
# f.close()
# f = check_valid_file(fn)
# if f is not None:
# return f
# raise TestFailed('invalid resource "%s"' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
# frame = sys._getframe(2)
# registry = frame.f_globals.get('__warningregistry__')
# if registry:
# registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = [warning.message for warning in w]
missing = []
for msg, cat in filters:
seen = False
for exc in reraise[:]:
message = str(exc)
# Filter out the matching messages
if (re.match(msg, message, re.I) and
issubclass(exc.__class__, cat)):
seen = True
reraise.remove(exc)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %r" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_py3k_warnings(*filters, **kwargs):
"""Context manager to silence py3k warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default False)
Without argument, it defaults to:
check_py3k_warnings(("", DeprecationWarning), quiet=False)
"""
if sys.py3kwarning:
if not filters:
filters = (("", DeprecationWarning),)
else:
# It should not raise any py3k warning
filters = ()
return _filterwarnings(filters, kwargs.get('quiet'))
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(UserDict.DictMixin):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
# class DirsOnSysPath(object):
# """Context manager to temporarily add directories to sys.path.
# This makes a copy of sys.path, appends any directories given
# as positional arguments, then reverts sys.path to the copied
# settings when the context ends.
# Note that *all* sys.path modifications in the body of the
# context manager, including replacement of the object,
# will be reverted at the end of the block.
# """
# def __init__(self, *paths):
# self.original_value = sys.path[:]
# self.original_object = sys.path
# sys.path.extend(paths)
# def __enter__(self):
# return self
# def __exit__(self, *ignore_exc):
# sys.path = self.original_object
# sys.path[:] = self.original_value
# class TransientResource(object):
# """Raise ResourceDenied if an exception is raised while the context manager
# is in effect that matches the specified exception and attributes."""
# def __init__(self, exc, **kwargs):
# self.exc = exc
# self.attrs = kwargs
# def __enter__(self):
# return self
# def __exit__(self, type_=None, value=None, traceback=None):
# """If type_ is a subclass of self.exc and value has attributes matching
# self.attrs, raise ResourceDenied. Otherwise let the exception
# propagate (if any)."""
# if type_ is not None and issubclass(self.exc, type_):
# for attr, attr_value in self.attrs.iteritems():
# if not hasattr(value, attr):
# break
# if getattr(value, attr) != attr_value:
# break
# else:
# raise ResourceDenied("an optional resource is not available")
# @contextlib.contextmanager
# def transient_internet(resource_name, timeout=30.0, errnos=()):
# """Return a context manager that raises ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions."""
# default_errnos = [
# ('ECONNREFUSED', 111),
# ('ECONNRESET', 104),
# ('EHOSTUNREACH', 113),
# ('ENETUNREACH', 101),
# ('ETIMEDOUT', 110),
# ]
# default_gai_errnos = [
# ('EAI_AGAIN', -3),
# ('EAI_FAIL', -4),
# ('EAI_NONAME', -2),
# ('EAI_NODATA', -5),
# # Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# # implementation actually returns WSANO_DATA i.e. 11004.
# ('WSANO_DATA', 11004),
# ]
# denied = ResourceDenied("Resource '%s' is not available" % resource_name)
# captured_errnos = errnos
# gai_errnos = []
# if not captured_errnos:
# captured_errnos = [getattr(errno, name, num)
# for (name, num) in default_errnos]
# gai_errnos = [getattr(socket, name, num)
# for (name, num) in default_gai_errnos]
# def filter_error(err):
# n = getattr(err, 'errno', None)
# if (isinstance(err, socket.timeout) or
# (isinstance(err, socket.gaierror) and n in gai_errnos) or
# n in captured_errnos):
# if not verbose:
# sys.stderr.write(denied.args[0] + "\n")
# raise denied
# old_timeout = socket.getdefaulttimeout()
# try:
# if timeout is not None:
# socket.setdefaulttimeout(timeout)
# yield
# except IOError as err:
# # urllib can wrap original socket errors multiple times (!), we must
# # unwrap to get at the original error.
# while True:
# a = err.args
# if len(a) >= 1 and isinstance(a[0], IOError):
# err = a[0]
# # The error can also be wrapped as args[1]:
# # except socket.error as msg:
# # raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
# elif len(a) >= 2 and isinstance(a[1], IOError):
# err = a[1]
# else:
# break
# filter_error(err)
# raise
# # XXX should we catch generic exceptions and look for their
# # __cause__ or __context__?
# finally:
# socket.setdefaulttimeout(old_timeout)
# @contextlib.contextmanager
# def captured_output(stream_name):
# """Return a context manager used by captured_stdout and captured_stdin
# that temporarily replaces the sys stream *stream_name* with a StringIO."""
# import StringIO
# orig_stdout = getattr(sys, stream_name)
# setattr(sys, stream_name, StringIO.StringIO())
# try:
# yield getattr(sys, stream_name)
# finally:
# setattr(sys, stream_name, orig_stdout)
# def captured_stdout():
# """Capture the output of sys.stdout:
# with captured_stdout() as s:
# print "hello"
# self.assertEqual(s.getvalue(), "hello")
# """
# return captured_output("stdout")
# def captured_stderr():
# return captured_output("stderr")
# def captured_stdin():
# return captured_output("stdin")
# def gc_collect():
# """Force as many objects as possible to be collected.
# In non-CPython implementations of Python, this is needed because timely
# deallocation is not guaranteed by the garbage collector. (Even in CPython
# this can be the case in case of reference cycles.) This means that __del__
# methods may be called later than expected and weakrefs may remain alive for
# longer than expected. This function tries its best to force all garbage
# objects to disappear.
# """
# gc.collect()
# if is_jython:
# time.sleep(0.1)
# gc.collect()
# gc.collect()
# _header = '2P'
# if hasattr(sys, "gettotalrefcount"):
# _header = '2P' + _header
# _vheader = _header + 'P'
# def calcobjsize(fmt):
# return struct.calcsize(_header + fmt + '0P')
# def calcvobjsize(fmt):
# return struct.calcsize(_vheader + fmt + '0P')
# _TPFLAGS_HAVE_GC = 1<<14
# _TPFLAGS_HEAPTYPE = 1<<9
# def check_sizeof(test, o, size):
# import _testcapi
# result = sys.getsizeof(o)
# # add GC header size
# if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
# ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
# size += _testcapi.SIZEOF_PYGC_HEAD
# msg = 'wrong size for %s: got %d, expected %d' \
# % (type(o), result, size)
# test.assertEqual(result, size, msg)
# #=======================================================================
# # Decorator for running a function in a different locale, correctly resetting
# # it afterwards.
# def run_with_locale(catstr, *locales):
# def decorator(func):
# def inner(*args, **kwds):
# try:
# import locale
# category = getattr(locale, catstr)
# orig_locale = locale.setlocale(category)
# except AttributeError:
# # if the test author gives us an invalid category string
# raise
# except:
# # cannot retrieve original locale, so do nothing
# locale = orig_locale = None
# else:
# for loc in locales:
# try:
# locale.setlocale(category, loc)
# break
# except:
# pass
# # now run the function, resetting the locale on exceptions
# try:
# return func(*args, **kwds)
# finally:
# if locale and orig_locale:
# locale.setlocale(category, orig_locale)
# inner.func_name = func.func_name
# inner.__doc__ = func.__doc__
# return inner
# return decorator
# #=======================================================================
# # Decorator for running a function in a specific timezone, correctly
# # resetting it afterwards.
# def run_with_tz(tz):
# def decorator(func):
# def inner(*args, **kwds):
# try:
# tzset = time.tzset
# except AttributeError:
# raise unittest.SkipTest("tzset required")
# if 'TZ' in os.environ:
# orig_tz = os.environ['TZ']
# else:
# orig_tz = None
# os.environ['TZ'] = tz
# tzset()
# # now run the function, resetting the tz on exceptions
# try:
# return func(*args, **kwds)
# finally:
# if orig_tz is None:
# del os.environ['TZ']
# else:
# os.environ['TZ'] = orig_tz
# time.tzset()
# inner.__name__ = func.__name__
# inner.__doc__ = func.__doc__
# return inner
# return decorator
# #=======================================================================
# # Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# # Some handy shorthands. Note that these are used for byte-limits as well
# # as size-limits, in the various bigmem tests
# _1M = 1024*1024
# _1G = 1024 * _1M
# _2G = 2 * _1G
# _4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
# def set_memlimit(limit):
# global max_memuse
# global real_max_memuse
# sizes = {
# 'k': 1024,
# 'm': _1M,
# 'g': _1G,
# 't': 1024*_1G,
# }
# m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
# re.IGNORECASE | re.VERBOSE)
# if m is None:
# raise ValueError('Invalid memory limit %r' % (limit,))
# memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
# real_max_memuse = memlimit
# if memlimit > MAX_Py_ssize_t:
# memlimit = MAX_Py_ssize_t
# if memlimit < _2G - 1:
# raise ValueError('Memory limit %r too low to be useful' % (limit,))
# max_memuse = memlimit
# def bigmemtest(minsize, memuse, overhead=5*_1M):
# """Decorator for bigmem tests.
# 'minsize' is the minimum useful size for the test (in arbitrary,
# test-interpreted units.) 'memuse' is the number of 'bytes per size' for
# the test, or a good estimate of it. 'overhead' specifies fixed overhead,
# independent of the testsize, and defaults to 5Mb.
# The decorator tries to guess a good value for 'size' and passes it to
# the decorated test function. If minsize * memuse is more than the
# allowed memory use (as defined by max_memuse), the test is skipped.
# Otherwise, minsize is adjusted upward to use up to max_memuse.
# """
# def decorator(f):
# def wrapper(self):
# if not max_memuse:
# # If max_memuse is 0 (the default),
# # we still want to run the tests with size set to a few kb,
# # to make sure they work. We still want to avoid using
# # too much memory, though, but we do that noisily.
# maxsize = 5147
# self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
# else:
# maxsize = int((max_memuse - overhead) / memuse)
# if maxsize < minsize:
# # Really ought to print 'test skipped' or something
# if verbose:
# sys.stderr.write("Skipping %s because of memory "
# "constraint\n" % (f.__name__,))
# return
# # Try to keep some breathing room in memory use
# maxsize = max(maxsize - 50 * _1M, minsize)
# return f(self, maxsize)
# wrapper.minsize = minsize
# wrapper.memuse = memuse
# wrapper.overhead = overhead
# return wrapper
# return decorator
# def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
# def decorator(f):
# def wrapper(self):
# if not real_max_memuse:
# maxsize = 5147
# else:
# maxsize = size
# if ((real_max_memuse or not dry_run)
# and real_max_memuse < maxsize * memuse):
# if verbose:
# sys.stderr.write("Skipping %s because of memory "
# "constraint\n" % (f.__name__,))
# return
# return f(self, maxsize)
# wrapper.size = size
# wrapper.memuse = memuse
# wrapper.overhead = overhead
# return wrapper
# return decorator
# def bigaddrspacetest(f):
# """Decorator for tests that fill the address space."""
# def wrapper(self):
# if max_memuse < MAX_Py_ssize_t:
# if verbose:
# sys.stderr.write("Skipping %s because of memory "
# "constraint\n" % (f.__name__,))
# else:
# return f(self)
# return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner(object):
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
# def requires_resource(resource):
# if resource == 'gui' and not _is_gui_available():
# return unittest.skip(_is_gui_available.reason)
# if is_resource_enabled(resource):
# return _id
# else:
# return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
return lambda *arg, **kw: None
# def cpython_only(test):
# """
# Decorator for tests only applicable on CPython.
# """
# return impl_detail(cpython=True)(test)
# def impl_detail(msg=None, **guards):
# if check_impl_detail(**guards):
# return _id
# if msg is None:
# guardnames, default = _parse_guards(guards)
# if default:
# msg = "implementation detail not available on {0}"
# else:
# msg = "implementation detail specific to {0}"
# guardnames = sorted(guardnames.keys())
# msg = msg.format(' or '.join(guardnames))
# return unittest.skip(msg)
# def _parse_guards(guards):
# # Returns a tuple ({platform_name: run_me}, default_value)
# if not guards:
# return ({'cpython': True}, False)
# is_true = guards.values()[0]
# assert guards.values() == [is_true] * len(guards) # all True or all False
# return (guards, not is_true)
# # Use the following check to guard CPython's implementation-specific tests --
# # or to run them only on the implementation(s) guarded by the arguments.
# def check_impl_detail(**guards):
# """This function returns True or False depending on the host platform.
# Examples:
# if check_impl_detail(): # only on CPython (default)
# if check_impl_detail(jython=True): # only on Jython
# if check_impl_detail(cpython=False): # everywhere except on CPython
# """
# guards, default = _parse_guards(guards)
# return guards.get(platform.python_implementation().lower(), default)
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
# #=======================================================================
# # Check for the presence of docstrings.
# HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
# sys.platform == 'win32' or
# sysconfig.get_config_var('WITH_DOC_STRINGS'))
# requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
# "test requires docstrings")
# #=======================================================================
# # doctest driver.
# def run_doctest(module, verbosity=None):
# """Run doctest on the given module. Return (#failures, #tests).
# If optional argument verbosity is not specified (or is None), pass
# test_support's belief about verbosity on to doctest. Else doctest's
# usual behavior is used (it searches sys.argv for -v).
# """
# import doctest
# if verbosity is None:
# verbosity = verbose
# else:
# verbosity = None
# # Direct doctest output (normally just errors) to real stdout; doctest
# # output shouldn't be compared by regrtest.
# save_stdout = sys.stdout
# sys.stdout = get_original_stdout()
# try:
# f, t = doctest.testmod(module, verbose=verbosity)
# if f:
# raise TestFailed("%d of %d doctests failed" % (f, t))
# finally:
# sys.stdout = save_stdout
# if verbose:
# print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
# return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if thread:
return (thread._count(),)
else:
return (1,)
def threading_cleanup(nb_threads):
if not thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
n = thread._count()
if n == nb_threads:
break
time.sleep(0.1)
# XXX print a warning in case of failure?
# def reap_threads(func):
# """Use this function when threads are being used. This will
# ensure that the threads are cleaned up even when the test fails.
# If threading is unavailable this function does nothing.
# """
# if not thread:
# return func
# @functools.wraps(func)
# def decorator(*args):
# key = threading_setup()
# try:
# return func(*args)
# finally:
# threading_cleanup(*key)
# return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
# @contextlib.contextmanager
# def start_threads(threads, unlock=None):
# threads = list(threads)
# started = []
# try:
# try:
# for t in threads:
# t.start()
# started.append(t)
# except:
# if verbose:
# print("Can't start %d threads, only %d threads started" %
# (len(threads), len(started)))
# raise
# yield
# finally:
# if unlock:
# unlock()
# endtime = starttime = time.time()
# for timeout in range(1, 16):
# endtime += 60
# for t in started:
# t.join(max(endtime - time.time(), 0.01))
# started = [t for t in started if t.isAlive()]
# if not started:
# break
# if verbose:
# print('Unable to join %d threads during a period of '
# '%d minutes' % (len(started), timeout))
# started = [t for t in started if t.isAlive()]
# if started:
# raise AssertionError('Unable to join %d threads' % len(started))
# @contextlib.contextmanager
# def swap_attr(obj, attr, new_val):
# """Temporary swap out an attribute with a new object.
# Usage:
# with swap_attr(obj, "attr", 5):
# ...
# This will set obj.attr to 5 for the duration of the with: block,
# restoring the old value at the end of the block. If `attr` doesn't
# exist on `obj`, it will be created and then deleted at the end of the
# block.
# """
# if hasattr(obj, attr):
# real_val = getattr(obj, attr)
# setattr(obj, attr, new_val)
# try:
# yield
# finally:
# setattr(obj, attr, real_val)
# else:
# setattr(obj, attr, new_val)
# try:
# yield
# finally:
# delattr(obj, attr)
# def py3k_bytes(b):
# """Emulate the py3k bytes() constructor.
# NOTE: This is only a best effort function.
# """
# try:
# # memoryview?
# return b.tobytes()
# except AttributeError:
# try:
# # iterable of ints?
# return b"".join(chr(x) for x in b)
# except TypeError:
# return bytes(b)
# def args_from_interpreter_flags():
# """Return a list of command-line arguments reproducing the current
# settings in sys.flags."""
# import subprocess
# return subprocess._args_from_interpreter_flags()
# def strip_python_stderr(stderr):
# """Strip the stderr of a Python process from potential debug output
# emitted by the interpreter.
# This will typically be run on the result of the communicate() method
# of a subprocess.Popen object.
# """
# stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
# return stderr
# def check_free_after_iterating(test, iter, cls, args=()):
# class A(cls):
# def __del__(self):
# done[0] = True
# try:
# next(it)
# except StopIteration:
# pass
# done = [False]
# it = iter(A(*args))
# # Issue 26494: Shouldn't crash
# test.assertRaises(StopIteration, next, it)
# # The sequence should be deallocated just after the end of iterating
# gc_collect()
# test.assertTrue(done[0])
| {
"content_hash": "724634087778ee38365cc765a6266bfb",
"timestamp": "",
"source": "github",
"line_count": 1687,
"max_line_length": 97,
"avg_line_length": 36.84113811499704,
"alnum_prop": 0.5814065743109523,
"repo_name": "pombredanne/grumpy",
"id": "906ace0c0b7db6d923d06eb4a19ee0980f081ee1",
"size": "62151",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "grumpy-runtime-src/third_party/stdlib/test/test_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1015748"
},
{
"name": "Makefile",
"bytes": "13386"
},
{
"name": "Python",
"bytes": "352996"
}
],
"symlink_target": ""
} |
"""
This is a general function for all broadening. Importing this
will let the user do rotational broadening, macroturbulent
broadening, and Gaussian broadening (reducing the resolution)
"""
from __future__ import print_function, division, absolute_import
from scipy.special import erf # Error function
from scipy.signal import fftconvolve
import numpy as np
from astropy import constants, units
from .RotBroad_Fast import Broaden as RotBroad
def MacroBroad(data, vmacro, extend=True):
"""
This broadens the data by a given macroturbulent velocity.
It works for small wavelength ranges. I need to make a better
version that is accurate for large wavelength ranges! Sorry
for the terrible variable names, it was copied from
convol.pro in AnalyseBstar (Karolien Lefever)
Parameters:
===========
-data: kglib.utils.DataStructures.xypoint instance
Stores the data to be broadened. The data MUST
be equally-spaced before calling this!
-vmacro: float
The macroturbulent velocity, in km/s
-extend: boolean
If true, the y-axis will be extended to avoid edge-effects
Returns:
========
A broadened version of data.
"""
# Make the kernel
c = constants.c.cgs.value * units.cm.to(units.km)
sq_pi = np.sqrt(np.pi)
lambda0 = np.median(data.x)
xspacing = data.x[1] - data.x[0]
mr = vmacro * lambda0 / c
ccr = 2 / (sq_pi * mr)
px = np.arange(-data.size() / 2, data.size() / 2 + 1) * xspacing
pxmr = abs(px) / mr
profile = ccr * (np.exp(-pxmr ** 2) + sq_pi * pxmr * (erf(pxmr) - 1.0))
# Extend the xy axes to avoid edge-effects, if desired
if extend:
before = data.y[-profile.size / 2 + 1:]
after = data.y[:profile.size / 2]
extended = np.r_[before, data.y, after]
first = data.x[0] - float(int(profile.size / 2.0 + 0.5)) * xspacing
last = data.x[-1] + float(int(profile.size / 2.0 + 0.5)) * xspacing
x2 = np.linspace(first, last, extended.size)
conv_mode = "valid"
else:
extended = data.y.copy()
x2 = data.x.copy()
conv_mode = "same"
# Do the convolution
newdata = data.copy()
newdata.y = fftconvolve(extended, profile / profile.sum(), mode=conv_mode)
return newdata
| {
"content_hash": "ab8c5edfdc5ea6a6f6b8962d41e0c3a4",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 30.256410256410255,
"alnum_prop": 0.627542372881356,
"repo_name": "kgullikson88/gullikson-scripts",
"id": "26f29b76eefc85acff68e087f2d46fd91b8b6d77",
"size": "2360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kglib/stellar_models/Broaden.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "450023"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import itertools
import os
import re
from django import forms
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.admin import helpers
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db import router, models
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, ugettext_lazy
from filer import settings
from filer.admin.forms import (CopyFilesAndFoldersForm, ResizeImagesForm,
RenameFilesForm)
from filer.admin.permissions import PrimitivePermissionAwareModelAdmin
from filer.admin.patched.admin_utils import get_deleted_objects
from filer.admin.tools import (userperms_for_request,
check_folder_edit_permissions,
check_files_edit_permissions,
check_files_read_permissions,
check_folder_read_permissions,
admin_each_context)
from filer.models import (Folder, FolderRoot, UnfiledImages, File, tools,
ImagesWithMissingData, FolderPermission, Image)
from filer.settings import FILER_PAGINATE_BY
from filer.thumbnail_processors import normalize_subject_location
from filer.utils.compatibility import (
get_delete_permission, quote, unquote, capfirst)
from filer.utils.filer_easy_thumbnails import FilerActionThumbnailer
from filer.views import (popup_status, popup_param, selectfolder_status,
selectfolder_param)
class AddFolderPopupForm(forms.ModelForm):
folder = forms.HiddenInput()
class Meta(object):
model = Folder
fields = ('name',)
class FolderAdmin(PrimitivePermissionAwareModelAdmin):
list_display = ('name',)
exclude = ('parent',)
list_per_page = 20
list_filter = ('owner',)
search_fields = ['name', ]
raw_id_fields = ('owner',)
save_as = True # see ImageAdmin
actions = ['files_set_public', 'files_set_private',
'delete_files_or_folders', 'move_files_and_folders',
'copy_files_and_folders', 'resize_images', 'rename_files']
directory_listing_template = 'admin/filer/folder/directory_listing.html'
order_by_file_fields = ('_file_size', 'original_filename', 'name', 'owner',
'uploaded_at', 'modified_at')
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = Folder.objects.filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError(
'Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form class
folder_form.clean = folder_form_clean
return folder_form
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
r = form.save(commit=False)
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
parent = Folder.objects.get(id=parent_id)
r.parent = parent
return r
def response_change(self, request, obj):
"""
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
"""
r = super(FolderAdmin, self).response_change(request, obj)
# Code from django ModelAdmin to determine changelist on the fly
if 'Location' in r and r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
if obj.parent:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r
def render_change_form(self, request, context, add=False, change=False,
form_url='', obj=None):
extra_context = {'show_delete': True,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request), }
context.update(extra_context)
return super(FolderAdmin, self).render_change_form(
request=request, context=context, add=False,
change=False, form_url=form_url, obj=obj)
def delete_view(self, request, object_id, extra_context=None):
"""
Overrides the default to enable redirecting to the directory view after
deletion of a folder.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it
impossible to find out the parent folder to redirect to.
"""
parent_folder = None
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
parent_folder = obj.parent
except self.model.DoesNotExist:
obj = None
r = super(FolderAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../", "../../"] or url == self._get_post_url(obj):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r
def icon_img(self, xs):
return mark_safe(('<img src="%simg/icons/plainfolder_32x32.png" '
'alt="Folder Icon" />') % django_settings.MEDIA_ROOT)
icon_img.allow_tags = True
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(FolderAdmin, self).get_urls()
from filer import views
url_patterns = patterns('',
# we override the default list view with our own directory listing
# of the root directories
url(r'^$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing-root'),
url(r'^last/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'last'},
name='filer-directory_listing-last'),
url(r'^(?P<folder_id>\d+)/list/$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing'),
url(r'^(?P<folder_id>\d+)/make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_folder'),
url(r'^make_folder/$',
self.admin_site.admin_view(views.make_folder),
name='filer-directory_listing-make_root_folder'),
url(r'^images_with_missing_data/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'images_with_missing_data'},
name='filer-directory_listing-images_with_missing_data'),
url(r'^unfiled_images/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'unfiled_images'},
name='filer-directory_listing-unfiled_images'),
)
url_patterns.extend(urls)
return url_patterns
# custom views
def directory_listing(self, request, folder_id=None, viewtype=None):
clipboard = tools.get_user_clipboard(request.user)
if viewtype == 'images_with_missing_data':
folder = ImagesWithMissingData()
elif viewtype == 'unfiled_images':
folder = UnfiledImages()
elif viewtype == 'last':
last_folder_id = request.session.get('filer_last_folder_id')
try:
Folder.objects.get(id=last_folder_id)
except Folder.DoesNotExist:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&"))
else:
url = reverse('admin:filer-directory_listing', kwargs={'folder_id': last_folder_id})
url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
elif folder_id is None:
folder = FolderRoot()
else:
folder = get_object_or_404(Folder, id=folder_id)
request.session['filer_last_folder_id'] = folder_id
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
# search
q = request.GET.get('q', None)
if q:
search_terms = unquote(q).split(" ")
else:
search_terms = []
q = ''
# Limit search results to current folder.
limit_search_to_folder = request.GET.get('limit_search_to_folder',
False) in (True, 'on')
if len(search_terms) > 0:
if folder and limit_search_to_folder and not folder.is_root:
# Do not include current folder itself in search results.
folder_qs = folder.get_descendants(include_self=False)
# Limit search results to files in the current folder or any
# nested folder.
file_qs = File.objects.filter(
folder__in=folder.get_descendants(include_self=True))
else:
folder_qs = Folder.objects.all()
file_qs = File.objects.all()
folder_qs = self.filter_folder(folder_qs, search_terms)
file_qs = self.filter_file(file_qs, search_terms)
show_result_count = True
else:
folder_qs = folder.children.all()
file_qs = folder.files.all()
show_result_count = False
folder_qs = folder_qs.order_by('name')
order_by = request.GET.get('order_by', None)
if order_by is not None:
order_by = order_by.split(',')
order_by = [field for field in order_by
if re.sub(r'^-', '', field) in self.order_by_file_fields]
if len(order_by) > 0:
file_qs = file_qs.order_by(*order_by)
folder_children = []
folder_files = []
if folder.is_root:
folder_children += folder.virtual_folders
perms = FolderPermission.objects.get_read_id_list(request.user)
root_exclude_kw = {'parent__isnull': False, 'parent__id__in': perms}
if perms != 'All':
file_qs = file_qs.filter(models.Q(folder__id__in=perms) | models.Q(owner=request.user))
folder_qs = folder_qs.filter(models.Q(id__in=perms) | models.Q(owner=request.user))
else:
root_exclude_kw.pop('parent__id__in')
if folder.is_root:
folder_qs = folder_qs.exclude(**root_exclude_kw)
folder_children += folder_qs
folder_files += file_qs
try:
permissions = {
'has_edit_permission': folder.has_edit_permission(request),
'has_read_permission': folder.has_read_permission(request),
'has_add_children_permission':
folder.has_add_children_permission(request),
}
except:
permissions = {}
if order_by is None or len(order_by) == 0:
folder_files.sort()
items = folder_children + folder_files
items_permissions = [(item, {'change': self.has_change_permission(request, item)}) for item in items]
paginator = Paginator(items_permissions, FILER_PAGINATE_BY)
# Are we moving to clipboard?
if request.method == 'POST' and '_save' not in request.POST:
# TODO: Refactor/remove clipboard parts
for f in folder_files:
if "move-to-clipboard-%d" % (f.id,) in request.POST:
clipboard = tools.get_user_clipboard(request.user)
if f.has_edit_permission(request):
tools.move_file_to_clipboard([f], clipboard)
return HttpResponseRedirect(request.get_full_path())
else:
raise PermissionDenied
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, files_queryset=file_qs, folders_queryset=folder_qs)
if response:
return response
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', paginator.count)
# If page request (9999) is out of range, deliver last page of results.
try:
paginated_items = paginator.page(request.GET.get('page', 1))
except PageNotAnInteger:
paginated_items = paginator.page(1)
except EmptyPage:
paginated_items = paginator.page(paginator.num_pages)
context = admin_each_context(self.admin_site, request)
context.update({
'folder': folder,
'clipboard_files': File.objects.filter(
in_clipboards__clipboarditem__clipboard__user=request.user
).distinct(),
'paginator': paginator,
'paginated_items': paginated_items, # [(item, item_perms), ]
'uploader_connections': settings.FILER_UPLOADER_CONNECTIONS,
'permissions': permissions,
'permstest': userperms_for_request(folder, request),
'current_url': request.path,
'title': 'Directory listing for %s' % folder.name,
'search_string': ' '.join(search_terms),
'q': urlquote(q),
'show_result_count': show_result_count,
'folder_children': folder_children,
'folder_files': folder_files,
'limit_search_to_folder': limit_search_to_folder,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
# needed in the admin/base.html template for logout links
'root_path': reverse('admin:index'),
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(paginated_items.object_list)},
'selection_note_all': selection_note_all % {'total_count': paginator.count},
'media': self.media,
'enable_permissions': settings.FILER_ENABLE_PERMISSIONS,
'can_make_folder': request.user.is_superuser or (folder.is_root and settings.FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS) or permissions.get("has_add_children_permission"),
})
return render(request, self.directory_listing_template, context)
def filter_folder(self, qs, terms=()):
# Source: https://github.com/django/django/blob/1.7.1/django/contrib/admin/options.py#L939-L947 flake8: noqa
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
for term in terms:
filters = models.Q()
for filter_ in self.search_fields:
filters |= models.Q(**{construct_search(filter_): term})
for filter_ in self.get_owner_filter_lookups():
filters |= models.Q(**{filter_: term})
qs = qs.filter(filters)
return qs
def filter_file(self, qs, terms=()):
for term in terms:
filters = (models.Q(name__icontains=term) |
models.Q(description__icontains=term) |
models.Q(original_filename__icontains=term))
for filter_ in self.get_owner_filter_lookups():
filters |= models.Q(**{filter_: term})
qs = qs.filter(filters)
return qs
@property
def owner_search_fields(self):
"""
Returns all the fields that are CharFields except for password from the
User model. For the built-in User model, that means username,
first_name, last_name, and email.
"""
try:
from django.contrib.auth import get_user_model
except ImportError: # Django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
return [
field.name for field in User._meta.fields
if isinstance(field, models.CharField) and field.name != 'password'
]
def get_owner_filter_lookups(self):
return [
'owner__{field}__icontains'.format(field=field)
for field in self.owner_search_fields
]
def response_action(self, request, files_queryset, folders_queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing
# will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
selected_files = []
selected_folders = []
for pk in selected:
if pk[:5] == "file-":
selected_files.append(pk[5:])
else:
selected_folders.append(pk[7:])
# Perform the action only on the selected objects
files_queryset = files_queryset.filter(pk__in=selected_files)
folders_queryset = folders_queryset.filter(
pk__in=selected_folders)
response = func(self, request, files_queryset, folders_queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
def get_actions(self, request):
actions = super(FolderAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def move_to_clipboard(self, request, files_queryset, folders_queryset):
"""
Action which moves the selected files and files in selected folders
to clipboard.
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
clipboard = tools.get_user_clipboard(request.user)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# TODO: Display a confirmation page if moving more than X files to
# clipboard?
# We define it like that so that we can modify it inside the
# move_files function
files_count = [0]
def move_files(files):
files_count[0] += tools.move_file_to_clipboard(files, clipboard)
def move_folders(folders):
for f in folders:
move_files(f.files)
move_folders(f.children.all())
move_files(files_queryset)
move_folders(folders_queryset)
self.message_user(request, _("Successfully moved %(count)d files to "
"clipboard.") % {"count": files_count[0]})
return None
move_to_clipboard.short_description = ugettext_lazy(
"Move selected files to clipboard")
def files_set_public_or_private(self, request, set_public, files_queryset,
folders_queryset):
"""
Action which enables or disables permissions for selected files and
files in selected folders to clipboard (set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
# We define it like that so that we can modify it inside the
# set_files function
files_count = [0]
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request, _("Successfully disabled permissions for %(count)d files.") % {"count": files_count[0], })
else:
self.message_user(request, _("Successfully enabled permissions for %(count)d files.") % {"count": files_count[0], })
return None
def files_set_private(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, False, files_queryset,
folders_queryset)
files_set_private.short_description = ugettext_lazy(
"Enable permissions for selected files")
def files_set_public(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(request, True, files_queryset,
folders_queryset)
files_set_public.short_description = ugettext_lazy(
"Disable permissions for selected files")
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects
# that will also be deleted. Hopefully this also checks for necessary
# permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion. Do the deletion and
# return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete
# cascade, but then the individual .delete() methods won't be
# called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(
folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {"count": n, })
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = admin_each_context(self.admin_site, request)
context.update({
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(
request,
"admin/filer/delete_selected_files_confirmation.html",
context
)
delete_files_or_folders.short_description = ugettext_lazy(
"Delete selected files and/or folders")
# Copied from django.contrib.admin.util
def _format_callback(self, obj, user, admin_site, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = get_delete_permission(opts)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name), force_text(obj))
def _check_copy_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _check_move_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
check_folder_edit_permissions(request, folders_queryset)
except PermissionDenied:
return True
return False
def _get_current_action_folder(self, request, files_queryset,
folders_queryset):
if files_queryset:
return files_queryset[0].folder
elif folders_queryset:
return folders_queryset[0].parent
else:
return None
def _list_folders_to_copy_or_move(self, request, folders):
for fo in folders:
yield self._format_callback(fo, request.user, self.admin_site, set())
children = list(self._list_folders_to_copy_or_move(request, fo.children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files)])
if children:
yield children
def _list_all_to_copy_or_move(self, request, files_queryset, folders_queryset):
to_copy_or_move = list(self._list_folders_to_copy_or_move(request, folders_queryset))
to_copy_or_move.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset)])
return to_copy_or_move
def _list_all_destination_folders_recursive(self, request, folders_queryset, current_folder, folders, allow_self, level):
for fo in folders:
if not allow_self and fo in folders_queryset:
# We do not allow moving to selected folders or their descendants
continue
if not fo.has_read_permission(request):
continue
# We do not allow copying/moving back to the folder itself
enabled = (allow_self or fo != current_folder) and fo.has_add_children_permission(request)
yield (fo, (mark_safe((" " * level) + force_text(fo)), enabled))
for c in self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, fo.children.all(), allow_self, level + 1):
yield c
def _list_all_destination_folders(self, request, folders_queryset, current_folder, allow_self):
root_folders = Folder.objects.filter(parent__isnull=True).order_by('name')
return list(self._list_all_destination_folders_recursive(request, folders_queryset, current_folder, root_folders, allow_self, 0))
def _move_files_and_folders_impl(self, files_queryset, folders_queryset, destination):
for f in files_queryset:
f.folder = destination
f.save()
for f in folders_queryset:
f.move_to(destination, 'last-child')
f.save()
def move_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_move = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
# We count only topmost files and folders here
n = files_queryset.count() + folders_queryset.count()
conflicting_names = [folder.name for folder in Folder.objects.filter(parent=destination, name__in=folders_queryset.values('name'))]
if conflicting_names:
messages.error(request, _("Folders with names %s already exist at the selected "
"destination") % ", ".join(conflicting_names))
elif n:
self._move_files_and_folders_impl(files_queryset, folders_queryset, destination)
self.message_user(request, _("Successfully moved %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Move files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Move files and/or folders"),
"to_move": to_move,
"destination_folders": folders,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_move_destination.html", context)
move_files_and_folders.short_description = ugettext_lazy("Move selected files and/or folders")
def _rename_file(self, file_obj, form_data, counter, global_counter):
original_basename, original_extension = os.path.splitext(file_obj.original_filename)
if file_obj.name:
current_basename, current_extension = os.path.splitext(file_obj.name)
else:
current_basename = ""
current_extension = ""
file_obj.name = form_data['rename_format'] % {
'original_filename': file_obj.original_filename,
'original_basename': original_basename,
'original_extension': original_extension,
'current_filename': file_obj.name or "",
'current_basename': current_basename,
'current_extension': current_extension,
'current_folder': file_obj.folder.name,
'counter': counter + 1, # 1-based
'global_counter': global_counter + 1, # 1-based
}
file_obj.save()
def _rename_files(self, files, form_data, global_counter):
n = 0
for f in sorted(files):
self._rename_file(f, form_data, n, global_counter + n)
n += 1
return n
def _rename_folder(self, folder, form_data, global_counter):
return self._rename_files_impl(folder.files.all(), folder.children.all(), form_data, global_counter)
def _rename_files_impl(self, files_queryset, folders_queryset, form_data, global_counter):
n = 0
for f in folders_queryset:
n += self._rename_folder(f, form_data, global_counter + n)
n += self._rename_files(files_queryset, form_data, global_counter + n)
return n
def rename_files(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_move_perms(request, files_queryset, folders_queryset)
to_rename = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = RenameFilesForm(request.POST)
if form.is_valid():
if files_queryset.count() + folders_queryset.count():
n = self._rename_files_impl(files_queryset, folders_queryset, form.cleaned_data, 0)
self.message_user(request, _("Successfully renamed %(count)d files.") % {
"count": n,
})
return None
else:
form = RenameFilesForm()
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Rename files"),
"instance": current_folder,
"breadcrumbs_action": _("Rename files"),
"to_rename": to_rename,
"rename_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the rename format selection page
return render(request, "admin/filer/folder/choose_rename_format.html", context)
rename_files.short_description = ugettext_lazy("Rename files")
def _generate_new_filename(self, filename, suffix):
basename, extension = os.path.splitext(filename)
return basename + suffix + extension
def _copy_file(self, file_obj, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# We are assuming here that we are operating on an already saved database objects with current database state available
filename = self._generate_new_filename(file_obj.file.name, suffix)
# Due to how inheritance works, we have to set both pk and id to None
file_obj.pk = None
file_obj.id = None
file_obj.save()
file_obj.folder = destination
file_obj.file = file_obj._copy_file(filename)
file_obj.original_filename = self._generate_new_filename(file_obj.original_filename, suffix)
file_obj.save()
def _copy_files(self, files, destination, suffix, overwrite):
for f in files:
self._copy_file(f, destination, suffix, overwrite)
return len(files)
def _get_available_name(self, destination, name):
count = itertools.count(1)
original = name
while destination.contains_folder(name):
name = "%s_%s" % (original, next(count))
return name
def _copy_folder(self, folder, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable (for different storage backends) way to overwrite files
raise NotImplementedError
# TODO: Should we also allow not to overwrite the folder if it exists, but just copy into it?
# TODO: Is this a race-condition? Would this be a problem?
foldername = self._get_available_name(destination, folder.name)
old_folder = Folder.objects.get(pk=folder.pk)
# Due to how inheritance works, we have to set both pk and id to None
folder.pk = None
folder.id = None
folder.name = foldername
folder.insert_at(destination, 'last-child', True) # We save folder here
for perm in FolderPermission.objects.filter(folder=old_folder):
perm.pk = None
perm.id = None
perm.folder = folder
perm.save()
return 1 + self._copy_files_and_folders_impl(old_folder.files.all(), old_folder.children.all(), folder, suffix, overwrite)
def _copy_files_and_folders_impl(self, files_queryset, folders_queryset, destination, suffix, overwrite):
n = self._copy_files(files_queryset, destination, suffix, overwrite)
for f in folders_queryset:
n += self._copy_folder(f, destination, suffix, overwrite)
return n
def copy_files_and_folders(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_copy_perms(request, files_queryset, folders_queryset)
to_copy = self._list_all_to_copy_or_move(request, files_queryset, folders_queryset)
folders = self._list_all_destination_folders(request, folders_queryset, current_folder, False)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = CopyFilesAndFoldersForm(request.POST)
if form.is_valid():
try:
destination = Folder.objects.get(pk=request.POST.get('destination'))
except Folder.DoesNotExist:
raise PermissionDenied
folders_dict = dict(folders)
if destination not in folders_dict or not folders_dict[destination][1]:
raise PermissionDenied
if files_queryset.count() + folders_queryset.count():
# We count all files and folders here (recursivelly)
n = self._copy_files_and_folders_impl(files_queryset, folders_queryset, destination, form.cleaned_data['suffix'], False)
self.message_user(request, _("Successfully copied %(count)d files and/or folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
else:
form = CopyFilesAndFoldersForm()
try:
selected_destination_folder = int(request.POST.get('destination', 0))
except ValueError:
if current_folder:
selected_destination_folder = current_folder.pk
else:
selected_destination_folder = 0
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Copy files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Copy files and/or folders"),
"to_copy": to_copy,
"destination_folders": folders,
"selected_destination_folder": selected_destination_folder,
"copy_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_copy_destination.html", context)
copy_files_and_folders.short_description = ugettext_lazy("Copy selected files and/or folders")
def _check_resize_perms(self, request, files_queryset, folders_queryset):
try:
check_files_read_permissions(request, files_queryset)
check_folder_read_permissions(request, folders_queryset)
check_files_edit_permissions(request, files_queryset)
except PermissionDenied:
return True
return False
def _list_folders_to_resize(self, request, folders):
for fo in folders:
children = list(self._list_folders_to_resize(request, fo.children.all()))
children.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(fo.files) if isinstance(f, Image)])
if children:
yield self._format_callback(fo, request.user, self.admin_site, set())
yield children
def _list_all_to_resize(self, request, files_queryset, folders_queryset):
to_resize = list(self._list_folders_to_resize(request, folders_queryset))
to_resize.extend([self._format_callback(f, request.user, self.admin_site, set()) for f in sorted(files_queryset) if isinstance(f, Image)])
return to_resize
def _new_subject_location(self, original_width, original_height, new_width, new_height, x, y, crop):
# TODO: We could probably do better
return (round(new_width / 2), round(new_height / 2))
def _resize_image(self, image, form_data):
original_width = float(image.width)
original_height = float(image.height)
thumbnailer = FilerActionThumbnailer(file=image.file.file, name=image.file.name, source_storage=image.file.source_storage, thumbnail_storage=image.file.source_storage)
# This should overwrite the original image
new_image = thumbnailer.get_thumbnail({
'size': tuple(int(form_data[d] or 0) for d in ('width', 'height')),
'crop': form_data['crop'],
'upscale': form_data['upscale'],
'subject_location': image.subject_location,
})
image.file.file = new_image.file
image.generate_sha1()
image.save() # Also gets new width and height
subject_location = normalize_subject_location(image.subject_location)
if subject_location:
(x, y) = subject_location
x = float(x)
y = float(y)
new_width = float(image.width)
new_height = float(image.height)
(new_x, new_y) = self._new_subject_location(original_width, original_height, new_width, new_height, x, y, form_data['crop'])
image.subject_location = "%d,%d" % (new_x, new_y)
image.save()
def _resize_images(self, files, form_data):
n = 0
for f in files:
if isinstance(f, Image):
self._resize_image(f, form_data)
n += 1
return n
def _resize_folder(self, folder, form_data):
return self._resize_images_impl(folder.files.all(), folder.children.all(), form_data)
def _resize_images_impl(self, files_queryset, folders_queryset, form_data):
n = self._resize_images(files_queryset, form_data)
for f in folders_queryset:
n += self._resize_folder(f, form_data)
return n
def resize_images(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(request, files_queryset, folders_queryset)
perms_needed = self._check_resize_perms(request, files_queryset, folders_queryset)
to_resize = self._list_all_to_resize(request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
if perms_needed:
raise PermissionDenied
form = ResizeImagesForm(request.POST)
if form.is_valid():
if form.cleaned_data.get('thumbnail_option'):
form.cleaned_data['width'] = form.cleaned_data['thumbnail_option'].width
form.cleaned_data['height'] = form.cleaned_data['thumbnail_option'].height
form.cleaned_data['crop'] = form.cleaned_data['thumbnail_option'].crop
form.cleaned_data['upscale'] = form.cleaned_data['thumbnail_option'].upscale
if files_queryset.count() + folders_queryset.count():
# We count all files here (recursivelly)
n = self._resize_images_impl(files_queryset, folders_queryset, form.cleaned_data)
self.message_user(request, _("Successfully resized %(count)d images.") % {"count": n, })
return None
else:
form = ResizeImagesForm()
context = admin_each_context(self.admin_site, request)
context.update({
"title": _("Resize images"),
"instance": current_folder,
"breadcrumbs_action": _("Resize images"),
"to_resize": to_resize,
"resize_form": form,
"cmsplugin_enabled": 'cmsplugin_filer_image' in django_settings.INSTALLED_APPS,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the resize options page
return render(request, "admin/filer/folder/choose_images_resize_options.html", context)
resize_images.short_description = ugettext_lazy("Resize selected images")
| {
"content_hash": "1c383d0c3ae46548c89d10772e9ee357",
"timestamp": "",
"source": "github",
"line_count": 1238,
"max_line_length": 186,
"avg_line_length": 44.11066235864297,
"alnum_prop": 0.5892801552857587,
"repo_name": "DylannCordel/django-filer",
"id": "2d167de17275706c22d02e5796251670b46d2269",
"size": "54634",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "filer/admin/folderadmin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84475"
},
{
"name": "HTML",
"bytes": "72813"
},
{
"name": "JavaScript",
"bytes": "46053"
},
{
"name": "Python",
"bytes": "465559"
},
{
"name": "Ruby",
"bytes": "1157"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
} |
import unittest
import re
import os
import os.path
import shutil
import tempfile
import tarfile
import xml.etree.ElementTree as ET
import io
import stat
import mimetypes
import flask
from werkzeug.utils import cached_property
import browsepy
import browsepy.file
import browsepy.manager
import browsepy.__main__
import browsepy.compat
import browsepy.tests.utils as test_utils
PY_LEGACY = browsepy.compat.PY_LEGACY
range = browsepy.compat.range # noqa
class FileMock(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class PluginMock(object):
registered_arguments_manager = None
registered_arguments = False
registered_plugin_manager = None
registered_plugin = False
def register_arguments(self, manager):
self.registered_arguments_manager = manager
self.registered_arguments = True
manager.register_argument('--pluginmock', action='store_true')
def register_plugin(self, manager):
self.registered_plugin_manager = manager
self.registered_plugin = True
class AppMock(object):
config = browsepy.app.config.copy()
class Page(object):
if hasattr(ET.Element, 'itertext'):
@classmethod
def itertext(cls, element):
return element.itertext()
else:
# Old 2.7 minors
@classmethod
def itertext(cls, element):
yield element.text or ''
for child in element:
for text in cls.itertext(child):
yield text
yield child.tail or ''
def __init__(self, data, response=None):
self.data = data
self.response = response
@classmethod
def innerText(cls, element):
return ''.join(cls.itertext(element))
@classmethod
def from_source(cls, source, response=None):
return cls(source, response)
class ListPage(Page):
path_strip_re = re.compile('\s+/\s+')
def __init__(self, path, directories, files, removable, upload, source,
response=None):
self.path = path
self.directories = directories
self.files = files
self.removable = removable
self.upload = upload
self.source = source
self.response = response
@classmethod
def from_source(cls, source, response=None):
html = ET.fromstring(source)
rows = [
(
row[0].attrib.get('class') == 'icon inode',
row[1].find('.//a').attrib['href'],
any(button.attrib.get('class') == 'button remove'
for button in row[2].findall('.//a'))
)
for row in html.findall('.//table/tbody/tr')
]
return cls(
cls.path_strip_re.sub(
'/',
cls.innerText(html.find('.//h1'))
).strip(),
[url for isdir, url, removable in rows if isdir],
[url for isdir, url, removable in rows if not isdir],
all(removable
for isdir, url, removable in rows
) if rows else False,
html.find('.//form//input[@type=\'file\']') is not None,
source,
response
)
class ConfirmPage(Page):
def __init__(self, path, name, back, source, response=None):
self.path = path
self.name = name
self.back = back
self.source = source
self.response = response
@classmethod
def from_source(cls, source, response=None):
html = ET.fromstring(source)
name = cls.innerText(html.find('.//strong')).strip()
prefix = html.find('.//strong').attrib.get('data-prefix', '')
return cls(
prefix + name,
name,
html.find('.//form[@method=\'get\']').attrib['action'],
source,
response
)
class PageException(Exception):
def __init__(self, status, *args):
self.status = status
super(PageException, self).__init__(status, *args)
class Page404Exception(PageException):
pass
class Page302Exception(PageException):
pass
class TestCompat(unittest.TestCase):
module = browsepy.compat
def _warn(self, message, category=None, stacklevel=None):
if not hasattr(self, '_warnings'):
self._warnings = []
self._warnings.append({
'message': message,
'category': category,
'stacklevel': stacklevel
})
@cached_property
def assertWarnsRegex(self):
supa = super(TestCompat, self)
if hasattr(supa, 'assertWarnsRegex'):
return supa.assertWarnsRegex
return self.customAssertWarnsRegex
def customAssertWarnsRegex(self, expected_warning, expected_regex, fnc,
*args, **kwargs):
import warnings
old_warn = warnings.warn
warnings.warn = self._warn
try:
fnc(*args, **kwargs)
finally:
warnings.warn = old_warn
warnings = ()
if hasattr(self, '_warnings'):
warnings = self._warnings
del self._warnings
regex = re.compile(expected_regex)
self.assertTrue(any(
warn['category'] == expected_warning and
regex.match(warn['message'])
for warn in warnings
))
def test_which(self):
self.assertTrue(self.module.which('python'))
self.assertIsNone(self.module.which('lets-put-a-wrong-executable'))
def test_fsdecode(self):
path = b'/a/\xc3\xb1'
self.assertEqual(
self.module.fsdecode(path, os_name='posix', fs_encoding='utf-8'),
path.decode('utf-8')
)
path = b'/a/\xf1'
self.assertEqual(
self.module.fsdecode(path, os_name='nt', fs_encoding='latin-1'),
path.decode('latin-1')
)
path = b'/a/\xf1'
self.assertRaises(
UnicodeDecodeError,
self.module.fsdecode,
path,
fs_encoding='utf-8',
errors='strict'
)
def test_fsencode(self):
path = b'/a/\xc3\xb1'
self.assertEqual(
self.module.fsencode(
path.decode('utf-8'),
fs_encoding='utf-8'
),
path
)
path = b'/a/\xf1'
self.assertEqual(
self.module.fsencode(
path.decode('latin-1'),
fs_encoding='latin-1'
),
path
)
path = b'/a/\xf1'
self.assertEqual(
self.module.fsencode(path, fs_encoding='utf-8'),
path
)
def test_getcwd(self):
self.assertIsInstance(self.module.getcwd(), self.module.unicode)
self.assertIsInstance(
self.module.getcwd(
fs_encoding='latin-1',
cwd_fnc=lambda: b'\xf1'
),
self.module.unicode
)
self.assertIsInstance(
self.module.getcwd(
fs_encoding='utf-8',
cwd_fnc=lambda: b'\xc3\xb1'
),
self.module.unicode
)
def test_getdebug(self):
enabled = ('TRUE', 'true', 'True', '1', 'yes', 'enabled')
for case in enabled:
self.assertTrue(self.module.getdebug({'DEBUG': case}))
disabled = ('FALSE', 'false', 'False', '', '0', 'no', 'disabled')
for case in disabled:
self.assertFalse(self.module.getdebug({'DEBUG': case}))
def test_deprecated(self):
environ = {'DEBUG': 'true'}
self.assertWarnsRegex(
DeprecationWarning,
'DEPRECATED',
self.module.deprecated('DEPRECATED', environ)(lambda: None)
)
class TestApp(unittest.TestCase):
module = browsepy
generic_page_class = Page
list_page_class = ListPage
confirm_page_class = ConfirmPage
page_exceptions = {
404: Page404Exception,
302: Page302Exception,
None: PageException
}
def setUp(self):
self.app = self.module.app
self.base = tempfile.mkdtemp()
self.start = os.path.join(self.base, 'start')
self.remove = os.path.join(self.base, 'remove')
self.upload = os.path.join(self.base, 'upload')
os.mkdir(self.start)
os.mkdir(self.remove)
os.mkdir(self.upload)
open(os.path.join(self.start, 'testfile.txt'), 'w').close()
open(os.path.join(self.remove, 'testfile.txt'), 'w').close()
self.app.config.update(
directory_base=self.base,
directory_start=self.start,
directory_remove=self.remove,
directory_upload=self.upload,
SERVER_NAME='test',
)
self.base_directories = [
self.url_for('browse', path='remove'),
self.url_for('browse', path='start'),
self.url_for('browse', path='upload'),
]
self.start_files = [self.url_for('open', path='start/testfile.txt')]
self.remove_files = [self.url_for('open', path='remove/testfile.txt')]
self.upload_files = []
def clear(self, path):
assert path.startswith(self.base + os.sep), \
'Cannot clear directories out of base'
for sub in os.listdir(path):
sub = os.path.join(path, sub)
if os.path.isdir(sub):
shutil.rmtree(sub)
else:
os.remove(sub)
def tearDown(self):
shutil.rmtree(self.base)
test_utils.clear_flask_context()
def get(self, endpoint, **kwargs):
status_code = kwargs.pop('status_code', 200)
follow_redirects = kwargs.pop('follow_redirects', False)
if endpoint in ('index', 'browse'):
page_class = self.list_page_class
elif endpoint == 'remove':
page_class = self.confirm_page_class
elif endpoint == 'sort' and follow_redirects:
page_class = self.list_page_class
else:
page_class = self.generic_page_class
with kwargs.pop('client', None) or self.app.test_client() as client:
response = client.get(
self.url_for(endpoint, **kwargs),
follow_redirects=follow_redirects
)
if response.status_code != status_code:
raise self.page_exceptions.get(
response.status_code,
self.page_exceptions[None]
)(response.status_code)
result = page_class.from_source(response.data, response)
response.close()
test_utils.clear_flask_context()
return result
def post(self, endpoint, **kwargs):
status_code = kwargs.pop('status_code', 200)
data = kwargs.pop('data') if 'data' in kwargs else {}
with kwargs.pop('client', None) or self.app.test_client() as client:
response = client.post(
self.url_for(endpoint, **kwargs),
data=data,
follow_redirects=True
)
if response.status_code != status_code:
raise self.page_exceptions.get(
response.status_code,
self.page_exceptions[None]
)(response.status_code)
result = self.list_page_class.from_source(response.data, response)
test_utils.clear_flask_context()
return result
def url_for(self, endpoint, **kwargs):
with self.app.app_context():
return flask.url_for(endpoint, _external=False, **kwargs)
def test_index(self):
page = self.get('index')
self.assertEqual(page.path, '%s/start' % os.path.basename(self.base))
self.app.config['directory_start'] = os.path.join(self.base, '..')
self.assertRaises(
Page404Exception,
self.get, 'index'
)
self.app.config['directory_start'] = self.start
def test_browse(self):
basename = os.path.basename(self.base)
page = self.get('browse')
self.assertEqual(page.path, basename)
self.assertEqual(page.directories, self.base_directories)
self.assertFalse(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='start')
self.assertEqual(page.path, '%s/start' % basename)
self.assertEqual(page.files, self.start_files)
self.assertFalse(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='remove')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
self.assertTrue(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='upload')
self.assertEqual(page.path, '%s/upload' % basename)
self.assertEqual(page.files, self.upload_files)
self.assertFalse(page.removable)
self.assertTrue(page.upload)
self.assertRaises(
Page404Exception,
self.get, 'browse', path='..'
)
def test_open(self):
content = b'hello world'
with open(os.path.join(self.start, 'testfile3.txt'), 'wb') as f:
f.write(content)
page = self.get('open', path='start/testfile3.txt')
self.assertEqual(page.data, content)
self.assertRaises(
Page404Exception,
self.get, 'open', path='../shall_not_pass.txt'
)
def test_remove(self):
open(os.path.join(self.remove, 'testfile2.txt'), 'w').close()
page = self.get('remove', path='remove/testfile2.txt')
self.assertEqual(page.name, 'testfile2.txt')
self.assertEqual(page.path, 'remove/testfile2.txt')
self.assertEqual(page.back, self.url_for('browse', path='remove'))
basename = os.path.basename(self.base)
page = self.post('remove', path='remove/testfile2.txt')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
os.mkdir(os.path.join(self.remove, 'directory'))
page = self.post('remove', path='remove/directory')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
self.assertRaises(
Page404Exception,
self.get, 'remove', path='start/testfile.txt'
)
self.assertRaises(
Page404Exception,
self.post, 'remove', path='start/testfile.txt'
)
self.app.config['directory_remove'] = None
self.assertRaises(
Page404Exception,
self.get, 'remove', path='remove/testfile.txt'
)
self.app.config['directory_remove'] = self.remove
self.assertRaises(
Page404Exception,
self.get, 'remove', path='../shall_not_pass.txt'
)
def test_download_file(self):
binfile = os.path.join(self.base, 'testfile.bin')
bindata = bytes(range(256))
with open(binfile, 'wb') as f:
f.write(bindata)
page = self.get('download_file', path='testfile.bin')
os.remove(binfile)
self.assertEqual(page.data, bindata)
self.assertRaises(
Page404Exception,
self.get, 'download_file', path='../shall_not_pass.txt'
)
def test_download_directory(self):
binfile = os.path.join(self.start, 'testfile.bin')
bindata = bytes(range(256))
with open(binfile, 'wb') as f:
f.write(bindata)
page = self.get('download_directory', path='start')
os.remove(binfile)
iodata = io.BytesIO(page.data)
with tarfile.open('start.tgz', mode="r:gz", fileobj=iodata) as tgz:
tgz_files = [
member.name
for member in tgz.getmembers()
if member.name
]
tgz_files.sort()
self.assertEqual(tgz_files, ['testfile.bin', 'testfile.txt'])
self.assertRaises(
Page404Exception,
self.get, 'download_directory', path='../../shall_not_pass'
)
def test_upload(self):
def genbytesio(nbytes, encoding):
c = unichr if PY_LEGACY else chr # noqa
return io.BytesIO(''.join(map(c, range(nbytes))).encode(encoding))
files = {
'testfile.txt': genbytesio(127, 'ascii'),
'testfile.bin': genbytesio(255, 'utf-8'),
}
output = self.post(
'upload',
path='upload',
data={
'file%d' % n: (data, name)
for n, (name, data) in enumerate(files.items())
}
)
expected_links = sorted(
self.url_for('open', path='upload/%s' % i)
for i in files
)
self.assertEqual(sorted(output.files), expected_links)
self.clear(self.upload)
def test_upload_duplicate(self):
c = unichr if PY_LEGACY else chr # noqa
files = (
('testfile.txt', 'something'),
('testfile.txt', 'something_new'),
)
output = self.post(
'upload',
path='upload',
data={
'file%d' % n: (io.BytesIO(data.encode('ascii')), name)
for n, (name, data) in enumerate(files)
}
)
self.assertEqual(len(files), len(output.files))
first_file_url = self.url_for('open', path='upload/%s' % files[0][0])
self.assertIn(first_file_url, output.files)
file_contents = []
for filename in os.listdir(self.upload):
with open(os.path.join(self.upload, filename), 'r') as f:
file_contents.append(f.read())
file_contents.sort()
expected_file_contents = sorted(content for filename, content in files)
self.assertEqual(file_contents, expected_file_contents)
self.clear(self.upload)
def test_sort(self):
files = {
'a.txt': 'aaa',
'b.png': 'aa',
'c.zip': 'a'
}
by_name = [
self.url_for('open', path=name)
for name in sorted(files)
]
by_name_desc = list(reversed(by_name))
by_type = [
self.url_for('open', path=name)
for name in sorted(files, key=lambda x: mimetypes.guess_type(x)[0])
]
by_type_desc = list(reversed(by_type))
by_size = [
self.url_for('open', path=name)
for name in sorted(files, key=lambda x: len(files[x]))
]
by_size_desc = list(reversed(by_size))
for name, content in files.items():
path = os.path.join(self.base, name)
with open(path, 'wb') as f:
f.write(content.encode('ascii'))
client = self.app.test_client()
page = self.get('browse', client=client)
self.assertListEqual(page.files, by_name)
self.assertRaises(
Page302Exception,
self.get, 'sort', property='text', client=client
)
page = self.get('browse', client=client)
self.assertListEqual(page.files, by_name)
page = self.get('sort', property='-text', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_name_desc)
page = self.get('sort', property='type', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_type)
page = self.get('sort', property='-type', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_type_desc)
page = self.get('sort', property='size', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_size)
page = self.get('sort', property='-size', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_size_desc)
# We're unable to test modified sorting due filesystem time resolution
page = self.get('sort', property='modified', client=client,
follow_redirects=True)
page = self.get('sort', property='-modified', client=client,
follow_redirects=True)
def test_sort_cookie_size(self):
files = [chr(i) * 255 for i in range(97, 123)]
for name in files:
path = os.path.join(self.base, name)
os.mkdir(path)
client = self.app.test_client()
for name in files:
page = self.get('sort', property='modified', path=name,
client=client, status_code=302)
for cookie in page.response.headers.getlist('set-cookie'):
if cookie.startswith('browse-sorting='):
self.assertLessEqual(len(cookie), 4000)
class TestFile(unittest.TestCase):
module = browsepy.file
def setUp(self):
self.app = browsepy.app # FIXME
self.workbench = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workbench)
test_utils.clear_flask_context()
def textfile(self, name, text):
tmp_txt = os.path.join(self.workbench, name)
with open(tmp_txt, 'w') as f:
f.write(text)
return tmp_txt
def test_iter_listdir(self):
directory = self.module.Directory(path=self.workbench, app=self.app)
tmp_txt = self.textfile('somefile.txt', 'a')
content = list(directory._listdir(precomputed_stats=True))
self.assertEqual(len(content), 1)
self.assertEqual(content[0].size, '1 B')
self.assertEqual(content[0].path, tmp_txt)
content = list(directory._listdir(precomputed_stats=False))
self.assertEqual(len(content), 1)
self.assertEqual(content[0].size, '1 B')
self.assertEqual(content[0].path, tmp_txt)
def test_check_forbidden_filename(self):
cff = self.module.check_forbidden_filename
self.assertFalse(cff('myfilename', destiny_os='posix'))
self.assertTrue(cff('.', destiny_os='posix'))
self.assertTrue(cff('..', destiny_os='posix'))
self.assertTrue(cff('::', destiny_os='posix'))
self.assertTrue(cff('/', destiny_os='posix'))
self.assertTrue(cff('com1', destiny_os='nt'))
self.assertTrue(cff('LPT2', destiny_os='nt'))
self.assertTrue(cff('nul', destiny_os='nt'))
self.assertFalse(cff('com1', destiny_os='posix'))
def test_secure_filename(self):
sf = self.module.secure_filename
self.assertEqual(sf('a/a'), 'a')
self.assertEqual(sf('//'), '')
self.assertEqual(sf('c:\\', destiny_os='nt'), '')
self.assertEqual(sf('c:\\COM1', destiny_os='nt'), '')
self.assertEqual(sf('COM1', destiny_os='nt'), '')
self.assertEqual(sf('COM1', destiny_os='posix'), 'COM1')
def test_mime(self):
f = self.module.File('non_working_path', app=self.app)
self.assertEqual(f.mimetype, 'application/octet-stream')
f = self.module.File('non_working_path_with_ext.txt', app=self.app)
self.assertEqual(f.mimetype, 'text/plain')
tmp_txt = self.textfile('ascii_text_file', 'ascii text')
# test file command
f = self.module.File(tmp_txt, app=self.app)
self.assertEqual(f.mimetype, 'text/plain; charset=us-ascii')
self.assertEqual(f.type, 'text/plain')
self.assertEqual(f.encoding, 'us-ascii')
# test non-working file command
bad_path = os.path.join(self.workbench, 'path')
os.mkdir(bad_path)
bad_file = os.path.join(bad_path, 'file')
with open(bad_file, 'w') as f:
f.write('#!/usr/bin/env bash\nexit 1\n')
os.chmod(bad_file, os.stat(bad_file).st_mode | stat.S_IEXEC)
old_path = os.environ['PATH']
os.environ['PATH'] = bad_path + os.pathsep + old_path
f = self.module.File(tmp_txt, app=self.app)
self.assertEqual(f.mimetype, 'application/octet-stream')
os.environ['PATH'] = old_path
def test_size(self):
test_file = os.path.join(self.workbench, 'test.csv')
with open(test_file, 'w') as f:
f.write(',\n' * 512)
f = self.module.File(test_file, app=self.app)
default = self.app.config['use_binary_multiples']
self.app.config['use_binary_multiples'] = True
self.assertEqual(f.size, '1.00 KiB')
self.app.config['use_binary_multiples'] = False
self.assertEqual(f.size, '1.02 KB')
self.app.config['use_binary_multiples'] = default
self.assertEqual(f.encoding, 'default')
def test_properties(self):
empty_file = os.path.join(self.workbench, 'empty.txt')
open(empty_file, 'w').close()
f = self.module.File(empty_file, app=self.app)
self.assertEqual(f.name, 'empty.txt')
self.assertEqual(f.can_download, True)
self.assertEqual(f.can_remove, False)
self.assertEqual(f.can_upload, False)
self.assertEqual(f.parent.path, self.workbench)
self.assertEqual(f.is_directory, False)
def test_choose_filename(self):
f = self.module.Directory(self.workbench, app=self.app)
first_file = os.path.join(self.workbench, 'testfile.txt')
filename = f.choose_filename('testfile.txt', attempts=0)
self.assertEqual(filename, 'testfile.txt')
open(first_file, 'w').close()
filename = f.choose_filename('testfile.txt', attempts=0)
self.assertNotEqual(filename, 'testfile (2).txt')
filename = f.choose_filename('testfile.txt', attempts=2)
self.assertEqual(filename, 'testfile (2).txt')
second_file = os.path.join(self.workbench, filename)
open(second_file, 'w').close()
filename = f.choose_filename('testfile.txt', attempts=3)
self.assertEqual(filename, 'testfile (3).txt')
filename = f.choose_filename('testfile.txt', attempts=2)
self.assertNotEqual(filename, 'testfile (2).txt')
class TestFileFunctions(unittest.TestCase):
module = browsepy.file
def test_fmt_size(self):
fnc = self.module.fmt_size
for n, unit in enumerate(self.module.binary_units):
self.assertEqual(fnc(2**(10 * n)), (1, unit))
for n, unit in enumerate(self.module.standard_units):
self.assertEqual(fnc(1000**n, False), (1, unit))
def test_secure_filename(self):
self.assertEqual(self.module.secure_filename('/path'), 'path')
self.assertEqual(self.module.secure_filename('..'), '')
self.assertEqual(self.module.secure_filename('::'), '')
self.assertEqual(self.module.secure_filename('\0'), '_')
self.assertEqual(self.module.secure_filename('/'), '')
self.assertEqual(self.module.secure_filename('C:\\'), '')
self.assertEqual(
self.module.secure_filename('COM1.asdf', destiny_os='nt'),
'')
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='ascii'),
'_')
if PY_LEGACY:
expected = unicode('\xf1', encoding='latin-1') # noqa
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='utf-8'),
expected)
self.assertEqual(
self.module.secure_filename(expected, fs_encoding='utf-8'),
expected)
else:
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='utf-8'),
'\xf1')
def test_alternative_filename(self):
self.assertEqual(
self.module.alternative_filename('test', 2),
'test (2)')
self.assertEqual(
self.module.alternative_filename('test.txt', 2),
'test (2).txt')
self.assertEqual(
self.module.alternative_filename('test.tar.gz', 2),
'test (2).tar.gz')
self.assertEqual(
self.module.alternative_filename('test.longextension', 2),
'test (2).longextension')
self.assertEqual(
self.module.alternative_filename('test.tar.tar.tar', 2),
'test.tar (2).tar.tar')
self.assertNotEqual(
self.module.alternative_filename('test'),
'test')
def test_relativize_path(self):
self.assertEqual(
self.module.relativize_path('/parent/child', '/parent'),
'child')
self.assertEqual(
self.module.relativize_path(
'/grandpa/parent/child',
'/grandpa/parent'),
'child')
self.assertEqual(
self.module.relativize_path('/grandpa/parent/child', '/grandpa'),
'parent/child')
self.assertRaises(
browsepy.OutsideDirectoryBase,
self.module.relativize_path, '/other', '/parent'
)
def test_under_base(self):
self.assertTrue(
self.module.check_under_base('C:\\as\\df\\gf', 'C:\\as\\df', '\\'))
self.assertTrue(self.module.check_under_base('/as/df', '/as', '/'))
self.assertFalse(
self.module.check_under_base('C:\\cc\\df\\gf', 'C:\\as\\df', '\\'))
self.assertFalse(self.module.check_under_base('/cc/df', '/as', '/'))
class TestMain(unittest.TestCase):
module = browsepy.__main__
def setUp(self):
self.app = browsepy.app
self.parser = self.module.ArgParse()
self.base = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.base)
def test_defaults(self):
result = self.parser.parse_args([])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, os.getcwd())
self.assertEqual(result.initial, None)
self.assertEqual(result.removable, None)
self.assertEqual(result.upload, None)
self.assertEqual(result.plugin, [])
def test_params(self):
plugins = ['plugin_1', 'plugin_2', 'namespace.plugin_3']
result = self.parser.parse_args([
'127.1.1.1',
'5000',
'--directory=%s' % self.base,
'--initial=%s' % self.base,
'--removable=%s' % self.base,
'--upload=%s' % self.base,
'--plugin=%s' % ','.join(plugins),
])
self.assertEqual(result.host, '127.1.1.1')
self.assertEqual(result.port, 5000)
self.assertEqual(result.directory, self.base)
self.assertEqual(result.initial, self.base)
self.assertEqual(result.removable, self.base)
self.assertEqual(result.upload, self.base)
self.assertEqual(result.plugin, plugins)
result = self.parser.parse_args([
'--directory=%s' % self.base,
'--initial='
])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, self.base)
self.assertIsNone(result.initial)
self.assertIsNone(result.removable)
self.assertIsNone(result.upload)
self.assertListEqual(result.plugin, [])
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--directory=%s' % __file__]
)
def test_main(self):
params = {}
self.module.main(
argv=[],
run_fnc=lambda app, **kwargs: params.update(kwargs)
)
defaults = {
'host': '127.0.0.1',
'port': 8080,
'debug': False,
'threaded': True
}
params_subset = {k: v for k, v in params.items() if k in defaults}
self.assertEqual(defaults, params_subset)
class TestMimetypePluginManager(unittest.TestCase):
module = browsepy.manager
def test_mimetype(self):
manager = self.module.MimetypePluginManager()
self.assertEqual(
manager.get_mimetype('potato'),
'application/octet-stream'
)
self.assertEqual(
manager.get_mimetype('potato.txt'),
'text/plain'
)
manager.register_mimetype_function(
lambda x: 'application/xml' if x == 'potato' else None
)
self.assertEqual(
manager.get_mimetype('potato.txt'),
'text/plain'
)
self.assertEqual(
manager.get_mimetype('potato'),
'application/xml'
)
class TestPlugins(unittest.TestCase):
app_module = browsepy
manager_module = browsepy.manager
def setUp(self):
self.app = self.app_module.app
self.original_namespaces = self.app.config['plugin_namespaces']
self.plugin_namespace, self.plugin_name = __name__.rsplit('.', 1)
self.app.config['plugin_namespaces'] = (self.plugin_namespace,)
self.manager = self.manager_module.PluginManager(self.app)
def tearDown(self):
self.app.config['plugin_namespaces'] = self.original_namespaces
self.manager.clear()
test_utils.clear_flask_context()
def test_manager(self):
self.manager.load_plugin(self.plugin_name)
self.assertTrue(self.manager._plugin_loaded)
endpoints = sorted(
action.endpoint
for action in self.manager.get_widgets(FileMock(mimetype='a/a'))
)
self.assertEqual(
endpoints,
sorted(('test_x_x', 'test_a_x', 'test_x_a', 'test_a_a'))
)
self.assertEqual(
self.app.view_functions['test_plugin.root'](),
'test_plugin_root'
)
self.assertIn('test_plugin', self.app.blueprints)
self.assertRaises(
self.manager_module.PluginNotFoundError,
self.manager.load_plugin,
'non_existent_plugin_module'
)
self.assertRaises(
self.manager_module.InvalidArgumentError,
self.manager.register_widget
)
def test_namespace_prefix(self):
self.assertTrue(self.manager.import_plugin(self.plugin_name))
self.app.config['plugin_namespaces'] = (
self.plugin_namespace + '.test_',
)
self.assertTrue(self.manager.import_plugin('module'))
def register_plugin(manager):
manager._plugin_loaded = True
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_x_x',
filter=lambda f: True
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_a_x',
filter=lambda f: f.mimetype.startswith('a/')
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_x_a',
filter=lambda f: f.mimetype.endswith('/a')
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_a_a',
filter=lambda f: f.mimetype == 'a/a'
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_b_x',
filter=lambda f: f.mimetype.startswith('b/')
)
test_plugin_blueprint = flask.Blueprint(
'test_plugin',
__name__,
url_prefix='/test_plugin_blueprint')
test_plugin_blueprint.add_url_rule(
'/',
endpoint='root',
view_func=lambda: 'test_plugin_root')
manager.register_blueprint(test_plugin_blueprint)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "22e52ea033b4923109caa003172d5e5d",
"timestamp": "",
"source": "github",
"line_count": 1089,
"max_line_length": 79,
"avg_line_length": 32.97337006427915,
"alnum_prop": 0.563774089339423,
"repo_name": "dolphx/browsepy",
"id": "93ecc206c46decd59275f88736b9d7acf95ea7fe",
"size": "35955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browsepy/tests/test_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19053"
},
{
"name": "HTML",
"bytes": "79062"
},
{
"name": "JavaScript",
"bytes": "2119"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "Python",
"bytes": "156285"
}
],
"symlink_target": ""
} |
"""
Module for working with Data Observatory tokens
.. module:: carto.DoToken
:platform: Unix, Windows
:synopsis: Module for working with Data Observatory tokens
.. moduleauthor:: Simon Martin <simon@carto.com>
"""
from pyrestcli.fields import CharField, BooleanField
from .paginators import CartoPaginator
from .resources import WarnResource, Manager
API_VERSION = "v4"
API_ENDPOINT = "api/{api_version}/do/token"
class DoToken(WarnResource):
"""
Represents a Data Observatory token in CARTO.
.. warning:: Non-public API. It may change with no previous notice
"""
access_token = CharField()
bq_public_project = CharField()
gcp_execution_project = CharField()
bq_project = CharField()
bq_dataset = CharField()
gcs_bucket = CharField()
instant_licensing = BooleanField()
class Meta:
collection_endpoint = API_ENDPOINT.format(api_version=API_VERSION)
name_field = "access_token"
class DoTokenManager(Manager):
"""
Manager for the DoToken class.
.. warning:: Non-public API. It may change with no previous notice
"""
resource_class = DoToken
json_collection_attribute = None
paginator_class = CartoPaginator
def get(self):
return super(DoTokenManager, self).get('token')
| {
"content_hash": "9a7e52547c663932d41ed7090cdac85d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 24.865384615384617,
"alnum_prop": 0.6937354988399071,
"repo_name": "CartoDB/carto-python",
"id": "48d78b91d7a6a53ee5a2b9dbb36ad8b842bdf78b",
"size": "1293",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "carto/do_token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148350"
}
],
"symlink_target": ""
} |
import functools
import IECore
import Gaffer
import GafferUI
from . import _Algo
from ._RowsPlugValueWidget import _RowsPlugValueWidget
from ._SectionChooser import _SectionChooser
# _Menus
# ------
#
# Code that provides extensions to standard Gaffer menus.
# Plug context menu
# =================
def __spreadsheetSubMenu( plug, command, showSections = True ) :
menuDefinition = IECore.MenuDefinition()
if isinstance( plug.node(), Gaffer.ScriptNode ) :
spreadsheetParent = plug.node()
else :
spreadsheetParent = plug.node().parent()
alreadyConnected = []
other = []
for spreadsheet in Gaffer.Spreadsheet.Range( spreadsheetParent ) :
if spreadsheet == plug.ancestor( Gaffer.Spreadsheet ) :
continue
connected = False
for output in spreadsheet["out"] :
for destination in output.outputs() :
if destination.node() == plug.node() :
connected = True
break
if connected :
break
if connected :
alreadyConnected.append( spreadsheet )
else :
other.append( spreadsheet )
if not alreadyConnected and not other :
menuDefinition.append(
"/No Spreadsheets Available",
{
"active" : False,
}
)
return menuDefinition
alreadyConnected.sort( key = Gaffer.GraphComponent.getName )
other.sort( key = Gaffer.GraphComponent.getName )
def addItems( spreadsheet ) :
sectionNames = _SectionChooser.sectionNames( spreadsheet["rows"].source() ) if showSections else None
if sectionNames :
for sectionName in sectionNames :
menuDefinition.append(
"/{}/{}".format( spreadsheet.getName(), sectionName ),
{
"command" : functools.partial( command, spreadsheet, sectionName )
}
)
else :
menuDefinition.append(
"/" + spreadsheet.getName(),
{
"command" : functools.partial( command, spreadsheet )
}
)
if alreadyConnected and other :
menuDefinition.append( "/__ConnectedDivider__", { "divider" : True, "label" : "Connected" } )
for spreadsheet in alreadyConnected :
addItems( spreadsheet )
if alreadyConnected and other :
menuDefinition.append( "/__OtherDivider__", { "divider" : True, "label" : "Other" } )
for spreadsheet in other :
addItems( spreadsheet )
return menuDefinition
def __prependSpreadsheetCreationMenuItems( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if not isinstance( plug, Gaffer.ValuePlug ) :
return
node = plug.node()
if node is None or node.parent() is None :
return
if plug.getInput() is not None or not plugValueWidget._editable() or Gaffer.MetadataAlgo.readOnly( plug ) :
return
plugsAndSuffixes = [ ( plug, "" ) ]
ancestorPlug = plug.parent()
while isinstance( ancestorPlug, Gaffer.Plug ) :
if any( p.getInput() is not None for p in Gaffer.Plug.RecursiveRange( ancestorPlug ) ) :
break
if Gaffer.Metadata.value( ancestorPlug, "spreadsheet:plugMenu:includeAsAncestor" ) :
label = Gaffer.Metadata.value( ancestorPlug, "spreadsheet:plugMenu:ancestorLabel" )
label = label or ancestorPlug.typeName().rpartition( ":" )[2]
plugsAndSuffixes.append( ( ancestorPlug, " ({})".format( label ) ) )
ancestorPlug = ancestorPlug.parent()
for plug, suffix in reversed( plugsAndSuffixes ) :
menuDefinition.prepend( "/__SpreadsheetCreationDivider__" + suffix, { "divider" : True } )
menuDefinition.prepend(
"/Add to Spreadsheet{}".format( suffix ),
{
"subMenu" : functools.partial( __spreadsheetSubMenu, plug, functools.partial( _Algo.addToSpreadsheet, plug ) )
}
)
menuDefinition.prepend(
"/Create Spreadsheet{}...".format( suffix ),
{
"command" : functools.partial( _Algo.createSpreadsheet, plug )
}
)
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
## \todo We're prepending rather than appending so that we get the ordering we
# want with respect to the Expression menu items. Really we need external control
# over this ordering.
__prependSpreadsheetCreationMenuItems( menuDefinition, plugValueWidget )
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
for plugType in ( Gaffer.TransformPlug, Gaffer.Transform2DPlug ) :
Gaffer.Metadata.registerValue( plugType, "spreadsheet:plugMenu:includeAsAncestor", True )
Gaffer.Metadata.registerValue( plugType, "spreadsheet:plugMenu:ancestorLabel", "Transform" )
# NodeEditor tool menu
# ====================
def __nodeEditorToolMenu( nodeEditor, node, menuDefinition ) :
if node.parent() is None :
return
activeRowNamesConnection = Gaffer.Metadata.value( node, "ui:spreadsheet:activeRowNamesConnection" )
if not activeRowNamesConnection :
return
else :
activeRowNamesConnection = node.descendant( activeRowNamesConnection )
assert( activeRowNamesConnection is not None )
selectorContextVariablePlug = Gaffer.Metadata.value( node, "ui:spreadsheet:selectorContextVariablePlug" )
if selectorContextVariablePlug :
selectorContextVariablePlug = node.descendant( selectorContextVariablePlug )
assert( selectorContextVariablePlug is not None )
selectorValue = Gaffer.Metadata.value( node, "ui:spreadsheet:selectorValue" )
assert( not ( selectorValue and selectorContextVariablePlug ) )
menuDefinition.append( "/SpreadsheetDivider", { "divider" : True } )
itemsActive = (
not nodeEditor.getReadOnly()
and not Gaffer.MetadataAlgo.readOnly( node )
and not Gaffer.MetadataAlgo.readOnly( activeRowNamesConnection )
and activeRowNamesConnection.getInput() is None
)
menuDefinition.append(
"/Create Spreadsheet...",
{
"command" : functools.partial( _Algo.createSpreadsheetForNode, node, activeRowNamesConnection, selectorContextVariablePlug, selectorValue ),
"active" : itemsActive
}
)
connectCommand = functools.partial( _Algo.connectPlugToRowNames, activeRowNamesConnection, selectorContextVariablePlug, selectorValue )
menuDefinition.append(
"/Connect to Spreadsheet",
{
"subMenu" : functools.partial( __spreadsheetSubMenu, activeRowNamesConnection, connectCommand, showSections = False ),
"active" : itemsActive
}
)
GafferUI.NodeEditor.toolMenuSignal().connect( __nodeEditorToolMenu, scoped = False )
| {
"content_hash": "fd93609c0a0727160a95bf422b5d5470",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 143,
"avg_line_length": 29.70873786407767,
"alnum_prop": 0.7271241830065359,
"repo_name": "ImageEngine/gaffer",
"id": "4d5467dcefe92f7af63e0866db2fef122a1fc94d",
"size": "7923",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/GafferUI/SpreadsheetUI/_Menus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4486"
},
{
"name": "C++",
"bytes": "5353598"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "5296193"
},
{
"name": "Shell",
"bytes": "8008"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.conf import settings
from django.core.mail import mail_admins
import cronjobs
import requests
from kitsune.sumo.tasks import measure_queue_lag
@cronjobs.register
def enqueue_lag_monitor_task():
"""Fires a task that measures the queue lag."""
measure_queue_lag.delay(datetime.now())
@cronjobs.register
def send_postatus_errors():
"""Looks at postatus file and sends an email with errors"""
# Gah! Don't do this on stage!
if settings.STAGE:
return
def new_section(line):
return (line.startswith('dennis ') or
line.startswith('Totals') or
line.startswith('BUSTED') or
line.startswith('COMPILED'))
# Download the postatus file
postatus = requests.get('https://support.mozilla.org/media/postatus.txt')
# Parse it to see which locales have issues
lines = postatus.content.splitlines()
datestamp = lines.pop(0)
errordata = []
while lines:
line = lines.pop(0)
if line.startswith('>>> '):
while lines and not new_section(line):
errordata.append(line)
line = lines.pop(0)
# If we have errors to send, send them
if errordata:
mail_admins(
subject='[SUMO] postatus errors %s' % datestamp,
message=(
'These are the errors in the SUMO postatus file.\n' +
'See http://postatus.paas.allizom.org/p/SUMO for details\n' +
'and bug generation links.\n\n' +
'\n'.join(errordata)
)
)
| {
"content_hash": "ebaaafe8f954f8dae42ae078c5d9cd98",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 28.03448275862069,
"alnum_prop": 0.6014760147601476,
"repo_name": "brittanystoroz/kitsune",
"id": "177cf7601a8a909c92e4420bb40e0ac168aafeda",
"size": "1626",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kitsune/sumo/cron.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2502"
},
{
"name": "CSS",
"bytes": "288314"
},
{
"name": "HTML",
"bytes": "619431"
},
{
"name": "JavaScript",
"bytes": "773968"
},
{
"name": "Python",
"bytes": "2824944"
},
{
"name": "Shell",
"bytes": "12697"
},
{
"name": "Smarty",
"bytes": "1957"
}
],
"symlink_target": ""
} |
import unittest
from integrationtest_support import IntegrationTestSupport
class Test (IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import init, task
name = "integration-test"
default_task = "any_task"
@init(environments="test_environment")
def initialize (project):
setattr(project, "INITIALIZER_EXECUTED", True)
@task
def any_task (project):
if not hasattr(project, "INITIALIZER_EXECUTED"):
raise Exception("Initializer has not been executed")
""")
reactor = self.prepare_reactor()
reactor.build(environments=["test_environment"])
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "04055e530e20c5ba48294c110ff204d4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 60,
"avg_line_length": 22.533333333333335,
"alnum_prop": 0.6982248520710059,
"repo_name": "onesfreedom/pybuilder",
"id": "c324a73a1e957d10089de6748f2f45dbbeaf080e",
"size": "1345",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/integrationtest/python/should_invoke_initializer_when_environments_match_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "3144"
},
{
"name": "Python",
"bytes": "550265"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class IsValid(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the IsValid Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(IsValid, self).__init__(temboo_session, '/Library/Utilities/TokenStorage/IsValid')
def new_input_set(self):
return IsValidInputSet()
def _make_result_set(self, result, path):
return IsValidResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return IsValidChoreographyExecution(session, exec_id, path)
class IsValidInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the IsValid
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of the token to check.)
"""
super(IsValidInputSet, self)._set_input('Name', value)
class IsValidResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the IsValid Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Valid(self):
"""
Retrieve the value for the "Valid" output from this Choreo execution. ((boolean) Returns true or false depending on whether the token is valid or not.)
"""
return self._output.get('Valid', None)
class IsValidChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return IsValidResultSet(response, path)
| {
"content_hash": "0d1c0f44dbcd70585a03bd891c6a9bd0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 159,
"avg_line_length": 35.589285714285715,
"alnum_prop": 0.6984445559458103,
"repo_name": "jordanemedlock/psychtruths",
"id": "2ee735cda6219e43f52ca957f37f31ae6b7cd1df",
"size": "2875",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "temboo/Library/Utilities/TokenStorage/IsValid.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from random import *
from voronoi import *
import wx
class Canvas(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, name = "Canvas"):
wx.Panel.__init__(self, parent, id, pos, size, wx.NO_BORDER, name)
self.Bind(wx.EVT_PAINT, self._OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self._OnClick)
self.Bind(wx.EVT_KEY_UP, self._OnChar)
self.delaunay = None
self.voronoi = None
self.initPoints(10, size)
self.initTriangulation()
def initPoints(self, count, limits = (200, 200)):
self.points = []
## for i in range(count):
## x = random()*limits[0]
## y = random()*limits[1]
## self.points.append(Point(x,y))
self.points += [Point(100, 120), Point(100,180), Point(50, 150)]
def initTriangulation(self):
self.triangulation = Triangulation(self.points)
self.delaunay = Delaunay(self.triangulation)
self.voronoi = Voronoi(self.triangulation, (0,0, self.GetSize()[0], self.GetSize()[1]))
def _OnChar(self, evt):
if evt.GetKeyCode() == wx.WXK_RETURN:
saveFileDialog = wx.FileDialog(self, "Save SVG file", "", "",
"SVG files (*.svg)|*.svg", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveFileDialog.ShowModal() == wx.ID_CANCEL:
return
self.voronoi.saveSVG(saveFileDialog.GetPath(), self.points)
def _OnPaint(self, evt):
dc = wx.BufferedPaintDC(self)
dc.Clear()
if self.delaunay is None or self.voronoi is None:
return
dc.SetPen(wx.Pen(wx.LIGHT_GREY, 1))
for e in self.triangulation.edges:
a = self.points[e.a]
b = self.points[e.b]
dc.DrawLine(a.x, a.y, b.x, b.y)
#dc.SetPen(wx.Pen(wx.GREEN, 3))
#for e in self.voronoi.edges:
# a = self.voronoi.points[e.a]
# b = self.voronoi.points[e.b]
# dc.DrawLine(a.x, a.y, b.x, b.y)
dc.SetPen(wx.Pen(wx.RED, 1))
dc.SetBrush(wx.Brush(wx.RED))
for p in self.points:
dc.DrawCircle(p.x, p.y, 2)
def _OnClick(self, evt):
pos = evt.GetPosition()
self.points.append(Point(pos.x, pos.y))
self.initTriangulation()
self.Refresh(False, wx.RectS(self.GetSize()))
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "Voronoi/Delaunay Test", size = (400, 400))
whatever = Canvas(self, wx.ID_ANY, pos = (5,5), size = self.GetSize())
whatever.SetFocus()
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
| {
"content_hash": "5ff7ef88677ca054fe1326ce1df0bfcf",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 98,
"avg_line_length": 34.51851851851852,
"alnum_prop": 0.5507868383404864,
"repo_name": "shinjin-cr/voronoi",
"id": "8e9d3e69c56d9aea6cd539c1a8ebda1251ca4228",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wxVoronoi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12674"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.interpolate as interpolate
import scipy.optimize as optimize
import kcorrect.response
import multiprocessing
class Fitter(object):
"""Nonnegative SED fitting object
Parameters
----------
abcorrect : bool
correct maggies to AB (default False)
responses : list of str
names of responses to use
templates : kcorrect.template.Template object
templates to use
redshift_range : list of np.float32
minimum and maximum redshifts (default [0., 1.])
nredshift : int or np.int32
number of redshifts in interpolation grid
Attributes
----------
abcorrect : bool
correct maggies to AB
Amatrix : scipy.interpolate.interp1d object
interpolator to produce A matrix (set to None until set_Amatrix called)
responses : list of str
names of responses to use
nredshift : int or np.int32
number of redshifts in interpolation grid
redshift_range : list of np.float32
minimum and maximum redshifts (default [0., 1.])
redshifts : ndarray of np.float32
redshifts in grid
templates : kcorrect.template.Template object
templates to use
"""
def __init__(self, responses=None, templates=None, redshift_range=[0., 1.],
nredshift=2000, abcorrect=False):
self.abcorrect = abcorrect
self.Amatrix = None
self.responses = responses
self.templates = templates
self.nredshift = nredshift
self.redshift_range = np.float32(redshift_range)
self.redshifts = (self.redshift_range[0] +
(self.redshift_range[1] - self.redshift_range[0]) *
np.arange(nredshift, dtype=np.float32) /
np.float32(nredshift - 1))
return
def _calc_Amatrix(self, responses=None):
"""Create an A matrix and return it (don't set attribute)"""
# Make sure responses are loaded
f = kcorrect.response.ResponseDict()
for response in responses:
f.load_response(response)
# Create rmatrix
rmatrix = np.zeros((self.nredshift,
len(responses),
self.templates.nsed), dtype=np.float32)
for iz, z in enumerate(self.redshifts):
self.templates.set_redshift(redshift=z)
for ir, response in enumerate(responses):
rmatrix[iz, ir, :] = f[response].project(sed=self.templates)
# Now create Amatrix interpolator
Amatrix = interpolate.interp1d(self.redshifts, rmatrix, axis=0)
# Return templates to z=0
self.templates.set_redshift(redshift=0.)
return(Amatrix)
def to_ab(self, maggies=None, ivar=None):
"""Convert input maggies to AB
Parameters
----------
maggies : ndarray of np.float32
array of fluxes in standard SDSS system
ivar : ndarray of np.float32
inverse variances in standard SDSS system (optional)
Returns
-------
ab_maggies : ndarray of np.float32
array of fluxes converted to AB
ab_ivar : ndarray of np.float32
inverse variances converted to AB (if ivar input)
Notes
-----
This method just returns maggies and/or ivar unchanged,
as for this object we expect AB maggies on input.
fit_coeffs() calls this on its inputs.
"""
if(ivar is not None):
return(maggies, ivar)
else:
return(maggies)
def set_Amatrix(self):
"""Set Amatrix, interpolator for the design matrix"""
self.Amatrix = self._calc_Amatrix(responses=self.responses)
return
def _fit_coeffs(self, redshift=None, maggies=None, ivar=None):
"""Fit coefficients to single object
Parameters
----------
redshift : np.float32
redshift
maggies : ndarray of np.float32
fluxes of each band in AB maggies
ivar : ndarray of np.float32
inverse variance of each band
Returns
-------
coeffs : ndarray of np.float32
coefficients for each template
"""
default_zeros = np.zeros(self.templates.nsed, dtype=np.float32)
inverr = np.sqrt(ivar)
inverr_matrix = np.transpose(np.tile(inverr, (self.templates.nsed, 1)))
try:
A = inverr_matrix * self.Amatrix(redshift)
except ValueError:
return(default_zeros)
b = maggies * inverr
try:
coeffs, rnorm = optimize.nnls(A, b)
except RuntimeError:
coeffs, rnorm = optimize.nnls(A, b, maxiter=A.shape[1] * 100)
return(coeffs)
def _process_inputs(self, redshift=None, maggies=None, ivar=None,
coeffs=None):
"""Returns whether input should be an array, and casts everything right
Parameters
----------
redshift : a quantity or ndarray
input redshift defining whether we have an array or scalar
maggies : a quantity or ndarray, or None
input maggies
ivar : a quantity or ndarray, or None
input ivar
coeffs : a quantity or ndarray, or None
input coeffs
Returns
-------
array : bool
True if it is an array
n : int
number of redshifts (1 if not an array)
redshift : np.float32 or [n] ndarray thereof
redshift to use
maggies : ndarray of np.float32 or None
AB maggies to use
ivar : ndarray of np.float32 or None
ivar to use
coeffs : ndarray of np.float32 or None
coeffs to use
Notes
-----
Uses redshift to determine whether we should treat the calculation
as an array or scalar. Then checks the appropriate sizes (based
on the number of responses and the number of seds in the object).
If maggies, ivar, or coeffs are None, then the corresponding
output is None.
Applies this class's to_ab() function on maggies and ivar to
convert to return AB maggies.
"""
if(redshift is None):
raise ValueError("redshift must be defined")
redshift = np.float32(redshift)
if(len(redshift.shape) == 0):
array = False
n = 1
elif(len(redshift.shape) == 1):
array = True
n = redshift.size
else:
raise TypeError("redshift must be 0- or 1-dimensional")
if(maggies is not None):
maggies = np.float32(maggies)
if(array):
if(len(maggies.shape) != 2):
raise ValueError("maggies must be 2-dimensional if redshift is 1-dimensional")
if(maggies.shape[0] != n):
raise ValueError("maggies must have values for each redshift")
if(maggies.shape[1] != len(self.responses)):
raise ValueError("maggies must have one value for each band")
else:
if(len(maggies.shape) != 1):
raise ValueError("maggies must be 1-dimensional if redshift is 0-dimensional")
if(maggies.shape[0] != len(self.responses)):
raise ValueError("maggies must have values for each band")
if(ivar is not None):
ivar = np.float32(ivar)
if(array):
if(len(ivar.shape) != 2):
raise ValueError("ivar must be 2-dimensional if redshift is 1-dimensional")
if(ivar.shape[0] != n):
raise ValueError("ivar must have values for each redshift")
if(ivar.shape[1] != len(self.responses)):
raise ValueError("ivar must have values for each band")
else:
if(len(ivar.shape) != 1):
raise ValueError("ivar must be 1-dimensional if redshift is 0-dimensional")
if(ivar.shape[0] != len(self.responses)):
raise ValueError("ivar must have values for each band")
if(coeffs is not None):
coeffs = np.float32(coeffs)
if(array):
if(len(coeffs.shape) != 2):
raise ValueError("coeffs must be 2-dimensional if redshift is 1-dimensional")
if(coeffs.shape[0] != n):
raise ValueError("ivar must have values for each redshift")
if(coeffs.shape[1] != self.templates.nsed):
raise ValueError("ivar must have values for each template")
else:
if(len(coeffs.shape) != 1):
raise ValueError("coeffs must be 1-dimensional if redshift is 0-dimensional")
if(coeffs.shape[0] != self.templates.nsed):
raise ValueError("ivar must have values for each band")
if(self.abcorrect):
if(maggies is not None):
if(ivar is not None):
maggies, ivar = self.to_ab(maggies=maggies, ivar=ivar)
else:
maggies = self.to_ab(maggies=maggies)
return(array, n, redshift, maggies, ivar, coeffs)
def fit_coeffs(self, redshift=None, maggies=None, ivar=None):
"""Fit coefficients
Parameters
----------
redshift : np.float32 or ndarray of np.float32
redshift(s)
maggies : ndarray of np.float32
fluxes of each band in AB maggies
ivar : ndarray of np.float32
inverse variance of each band
Returns
-------
coeffs : ndarray of np.float32
coefficients for each template
Notes
-----
maggies are assumed to be Galactic-extinction corrected already.
Calls this class's to_ab() method on input maggies.
If redshift is an array, even with just one element, coeffs is
returned as an [nredshift, ntemplate] array.
Otherwise coeffs is returned as an [ntemplate] array.
Occasionally the optimizer will report "NNLS quitting on
iteration count." This indicates that the default number of
iterations for scipy.optimize.nnls was not enough. Under these
conditions, this code tries a much larger number of
iterations. If that still fails, you will receive a traceback.
"""
if(redshift is None):
raise TypeError("Must specify redshift to fit coefficients")
# Check a bunch of things about the input arrays
(array, n, redshift, maggies, ivar,
dumdum) = self._process_inputs(redshift=redshift, maggies=maggies,
ivar=ivar)
# Call single
if(n == 1):
coeffs = self._fit_coeffs(redshift=np.squeeze(redshift),
maggies=np.squeeze(maggies),
ivar=np.squeeze(ivar))
if(array):
coeffs = coeffs.reshape(1, len(coeffs))
return(coeffs)
# Loop for multiple
coeffs = np.zeros((n, self.templates.nsed), dtype=np.float32)
for i, r in enumerate(redshift):
coeffs[i, :] = self._fit_coeffs(redshift=r, maggies=maggies[i, :],
ivar=ivar[i, :])
return(coeffs)
def _reconstruct(self, Amatrix=None, redshift=None, coeffs=None,
band_shift=0.):
"""Reconstruct maggies associated with coefficients
Parameters
----------
Amatrix : scipy.interpolate.interp1d
interpolator to use for Amatrix
redshift : np.float32
redshift
coeffs : ndarray of np.float32
coefficients
band_shift : np.float32
blueshift to apply to reconstructed bandpasses
Returns
-------
maggies : ndarray of np.float32
maggies in each band
Notes
-----
Amatrix should be an interpolator over redshift that returns
an array that is number of responses by number of templates.
"""
default_zeros = np.zeros(len(self.responses), dtype=np.float32)
# Check a bunch of things about the input arrays
(array, n, redshift, d1, d2,
coeffs) = self._process_inputs(redshift=redshift, coeffs=coeffs)
# Consider blueward shift of bandpass due to redshift
# of observation and due to band_shift
shift = (1. + redshift) * (1. + band_shift) - 1.
# Calculate maggies
try:
A = Amatrix(shift)
except ValueError:
return(default_zeros)
if(array):
maggies = np.einsum('ijk,ki->ij', A,
coeffs.T.reshape(self.templates.nsed, n))
else:
maggies = A.dot(coeffs)
# For band_shift !=0, require this normalization given that
# AB source is not altered.
maggies = maggies / (1. + band_shift)
return(maggies)
def reconstruct(self, redshift=None, coeffs=None, band_shift=0.):
"""Reconstruct AB maggies associated with coefficients
Parameters
----------
redshift : np.float32 or ndarray of np.float32
redshift
coeffs : ndarray of np.float32
coefficients
band_shift : np.float32
blueshift to apply to reconstructed bandpasses
Returns
-------
maggies : ndarray of np.float32
AB maggies in each band
Notes
-----
Returns AB maggies, but note that if to_ab() is non-trivial,
these may not be directly comparable to the input maggies.
"""
return(self._reconstruct(Amatrix=self.Amatrix, redshift=redshift,
coeffs=coeffs, band_shift=band_shift))
| {
"content_hash": "aafdacbd51dc0569b4abd118f053464b",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 98,
"avg_line_length": 31.988636363636363,
"alnum_prop": 0.5686678507992895,
"repo_name": "blanton144/kcorrect",
"id": "774c884f75f0006fbb6783f8ba1be16877725e13",
"size": "14221",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/kcorrect/fitter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1172"
},
{
"name": "Fortran",
"bytes": "1949"
},
{
"name": "HTML",
"bytes": "24302"
},
{
"name": "IDL",
"bytes": "2990"
},
{
"name": "Perl",
"bytes": "11151"
},
{
"name": "PostScript",
"bytes": "198128"
},
{
"name": "Prolog",
"bytes": "623"
},
{
"name": "PureBasic",
"bytes": "34165"
},
{
"name": "Python",
"bytes": "118518"
},
{
"name": "Shell",
"bytes": "2039"
}
],
"symlink_target": ""
} |
"""
protoc.py: Protoc Builder for SCons
This Builder invokes protoc to generate C++ and Python
from a .proto file.
NOTE: Java is not currently supported.
Based off https://bitbucket.org/scons/scons/wiki/ProtocBuilder
"""
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
from SCons.Script import File, Dir, Action, Builder
import os.path
def ProtocGenerator(com, comstr, postfixes, deps=[]):
_protoc_builder = Builder(
action = Action(com, comstr),
src_suffix='$PROTOCSRCSUFFIX',
)
def Protoc(env, target_dir, source, genfiles=None):
target_dir = Dir(target_dir)
targets = []
# TODO: multiple sources
if genfiles:
# Allow manually-specified generated file, for when system can't automatically determine it
targets = [File(genfile) for genfile in genfiles]
else:
filename = os.path.basename(str(source))
modulename = os.path.splitext(filename)[0]
for postfix in postfixes:
targets.append(target_dir.File(modulename + postfix))
# must run in source dir, otherwise protoc creates source dir hierarchy in target dir
pb_env = env.Clone()
pb_env['PBTARGET'] = target_dir
targets = _protoc_builder.__call__(pb_env, target=targets, source=source)
for target in targets:
for dep in deps:
env.Depends(target, dep)
return targets
return Protoc
def generate(env):
"""Add Builders and construction variables for protoc to an Environment."""
env['PROTOC_GENERATOR'] = ProtocGenerator # allow other tools the use of this feature
env['PROTOC'] = 'protoc'
env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCPROTOPATH'] = SCons.Util.CLVar('')
env['PROTOCSRCSUFFIX'] = '.proto'
# protoc must run in source dir, otherwise protoc creates source dir hierarchy in target dir
# Scons's native chdir=1 isn't used since that breaks parallel builds
env['PROTOCCOM'] = '$PROTOC -I${SOURCES.dir} ${["-I%s"%x for x in PROTOCPROTOPATH]} $PROTOCFLAGS ${SOURCES}'
env.AddMethod(ProtocGenerator('$PROTOCPYTHONCOM', '$PROTOCPYTHONCOMSTR', ['_pb2.py']), 'ProtocPython')
env['PROTOCPYTHONCOM'] = '$PROTOCCOM --python_out=$PBTARGET'
env.AddMethod(ProtocGenerator('$PROTOCCPPCOM', '$PROTOCCPPCOMSTR', ['.pb.cc', '.pb.h']), 'ProtocCpp')
env['PROTOCCPPCOM'] = '$PROTOCCOM --cpp_out=$PBTARGET'
env.AddMethod(ProtocGenerator('$PROTOCJAVACOM', '$PROTOCJAVACOMSTR', ['.java']), 'ProtocJava')
env['PROTOCJAVACOM'] = '$PROTOCCOM --java_out=$PBTARGET'
def exists(env):
return env.Detect(protocs)
| {
"content_hash": "28eef6e3df36b4dda402f70b7f235d2c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 116,
"avg_line_length": 34.02597402597402,
"alnum_prop": 0.6870229007633588,
"repo_name": "CALISCO/common-proto",
"id": "20647665af186c95a0b358d9968dd6e892509498",
"size": "2629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site_scons/site_tools/protoc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "29003"
},
{
"name": "Protocol Buffer",
"bytes": "95958"
},
{
"name": "Python",
"bytes": "11425"
}
],
"symlink_target": ""
} |
import json
import time
import urllib
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_grayhatwarfare(SpiderFootPlugin):
meta = {
'name': "Grayhat Warfare",
'summary': "Find bucket names matching the keyword extracted from a domain from Grayhat API.",
'flags': ["apikey"],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "https://buckets.grayhatwarfare.com/",
'model': "FREE_AUTH_LIMITED",
'references': [
"https://buckets.grayhatwarfare.com/docs/api/v1"
],
'apiKeyInstructions': [
"Visit https://grayhatwarfare.com/register",
"Register an account",
"Visit https://grayhatwarfare.com/account/settings",
"Your API key is listed under 'Api Key'",
],
'favIcon': "https://buckets.grayhatwarfare.com/assets/template/images/favicon.png",
'logo': "https://buckets.grayhatwarfare.com/assets/images/logo/logo-sm.png",
'description': "It is a searchable database of open buckets."
"Has up to million results of each bucket."
"Full text search with binary logic (can search for keywords and also stopwords)",
}
}
# Default options
opts = {
'api_key': '',
'per_page': 1000,
'max_pages': 2,
'pause': 1
}
# Option descriptions
optdescs = {
'api_key': 'Grayhat Warfare API key.',
'per_page': 'Maximum number of results per page (Max: 1000).',
'max_pages': 'Maximum number of pages to fetch.',
'pause': 'Number of seconds to wait between each API call.'
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [
"DOMAIN_NAME",
]
# What events this module produces
def producedEvents(self):
return [
'CLOUD_STORAGE_BUCKET',
'CLOUD_STORAGE_BUCKET_OPEN',
'RAW_RIR_DATA'
]
# Query Grayhat Warfare
def query(self, keyword, start):
params = urllib.parse.urlencode({
'keywords': keyword.encode('raw_unicode_escape'),
'access_token': self.opts['api_key']
})
headers = {
'Accept': 'application/json',
}
res = self.sf.fetchUrl(
f"https://buckets.grayhatwarfare.com/api/v1/buckets/{start}/{self.opts['per_page']}?{params}",
headers=headers,
timeout=15,
useragent=self.opts['_useragent'],
verify=True
)
time.sleep(self.opts['pause'])
if res['code'] != "200":
self.error("Unable to fetch data from Grayhat Warfare API.")
self.errorState = True
return None
if res['content'] is None:
self.debug('No response from Grayhat Warfare API.')
return None
try:
return json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return
if self.errorState:
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.opts['api_key'] == "":
self.error("You enabled sfp_grayhatwarfare but did not set an API key!")
self.errorState = True
return
currentIndex = 0
currentPage = 0
maxPages = self.opts['max_pages']
perPage = self.opts['per_page']
keyword = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
while currentPage < maxPages:
currentIndex = currentPage * perPage
if self.checkForStop():
return
if self.errorState:
break
data = self.query(keyword=keyword, start=currentIndex)
if not data:
return
for row in data.get('buckets'):
bucketName = row.get('bucket')
bucketKeyword = bucketName.split('.')[0]
self.debug(bucketKeyword)
if bucketKeyword.startswith(keyword) or bucketKeyword.endswith(keyword):
evt = SpiderFootEvent('CLOUD_STORAGE_BUCKET', bucketName, self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent('CLOUD_STORAGE_BUCKET_OPEN', f"{bucketName}: {row.get('fileCount')} files found.", self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent('RAW_RIR_DATA', str(row), self.__name__, event)
self.notifyListeners(evt)
currentPage += 1
if data.get('buckets_count') < perPage:
break
# End of sfp_grayhatwarfare class
| {
"content_hash": "1a0eea97ca6fca805c82860e38294f84",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 146,
"avg_line_length": 31.970760233918128,
"alnum_prop": 0.5560636546552039,
"repo_name": "smicallef/spiderfoot",
"id": "7fc2ef53853bab78032f138f48bd93c95a124800",
"size": "5885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/sfp_grayhatwarfare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from pyquery import PyQuery
from django.core.urlresolvers import reverse
import debug # pyflakes:ignore
from ietf.utils.test_utils import TestCase
from ietf.doc.models import StateType
class StateHelpTest(TestCase):
def test_state_index(self):
url = reverse('ietf.help.views.state_index')
r = self.client.get(url)
q = PyQuery(r.content)
content = [ e.text for e in q('#content table td a ') ]
names = StateType.objects.values_list('slug', flat=True)
# The following doesn't cover all doc types, only a selection
for name in names:
if not '-' in name:
self.assertIn(name, content)
| {
"content_hash": "b341f0c8d15dcee9f731f844819d0f18",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 30.869565217391305,
"alnum_prop": 0.6225352112676056,
"repo_name": "wpjesus/codematch",
"id": "6f03e433eb660f770bad4450aa6622d6f46e474c",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ietf/help/tests_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "139492"
},
{
"name": "CSS",
"bytes": "733662"
},
{
"name": "Groff",
"bytes": "2349"
},
{
"name": "HTML",
"bytes": "2149789"
},
{
"name": "JavaScript",
"bytes": "1003699"
},
{
"name": "Makefile",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "17323"
},
{
"name": "PostScript",
"bytes": "35"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "4536908"
},
{
"name": "Shell",
"bytes": "74113"
},
{
"name": "TeX",
"bytes": "2556"
}
],
"symlink_target": ""
} |
from django.contrib.auth import logout
from django_roa.db.exceptions import ROAException
from django_roa.remoteauth.backends import reset_api_auth
from .models import Profile
def get_profile(request):
if not hasattr(request, '_cached_profile'):
try:
request._cached_profile = Profile.objects.get(user_id=request.user.id)
except Profile.DoesNotExist:
request._cached_profile = None
return request._cached_profile
class ProfileMiddleware(object):
def process_request(self, request):
try:
user = request.user
if user.is_authenticated():
request.profile = get_profile(request)
request.user.profile = request.profile
except ROAException as exc:
# Try to make an API request due to user is authenticated.
# But no API auth credentials were provided. We can't use API: logout.
if exc.status_code == 401:
reset_api_auth()
logout(request)
else:
raise exc
| {
"content_hash": "0412e35712678eec5a2b3ff6165db3af",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 34.54838709677419,
"alnum_prop": 0.6237161531279178,
"repo_name": "aaee-enscachan/aaee-front",
"id": "3de8ea7d1f8fed2053e2c4d2334139d5d15d8eb4",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aaee_front/apps/main/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "70101"
},
{
"name": "JavaScript",
"bytes": "11861"
},
{
"name": "Python",
"bytes": "50245"
},
{
"name": "Shell",
"bytes": "233"
}
],
"symlink_target": ""
} |
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_cifar, plot_layers, plot_images
logging = climate.get_logger('cifar')
g = climate.add_group('CIFAR Example')
g.add_argument('--features', type=int, default=0, metavar='N',
help='train a model using N^2 hidden-layer features')
K = 655 # this retains 99% of the variance in the cifar images.
def pca(dataset):
mean = dataset[:3000].mean(axis=0)
logging.info('computing whitening transform')
x = dataset[:3000] - mean
vals, vecs = np.linalg.eigh(np.dot(x.T, x) / len(x))
vals = vals[::-1]
vecs = vecs[:, ::-1]
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
return whiten, color
def main(args):
train, valid, _ = load_cifar()
whiten, color = pca(train)
feat = args.features or int(np.sqrt(2 * K))
n = theanets.Autoencoder([K, feat ** 2, K])
n.train(whiten(train), whiten(valid), input_noise=1, train_batches=313)
plot_layers([
color(n.find('hid1', 'w').get_value().T).T,
color(n.find('out', 'w').get_value())], channels=3)
plt.tight_layout()
plt.show()
valid = whiten(valid[:100])
plot_images(color(valid), 121, 'Sample data', channels=3)
plot_images(color(n.predict(valid)), 122,
'Reconstructed data', channels=3)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
| {
"content_hash": "854f2b6a5288248bb04b043239e5b21d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 25.41269841269841,
"alnum_prop": 0.6083697688944409,
"repo_name": "devdoer/theanets",
"id": "0e65436564a632c98d6fd44c1fe832ed19387f8e",
"size": "1624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/cifar-autoencoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "227443"
}
],
"symlink_target": ""
} |
"""
Django settings for milo_test project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+$-q9n8elkwlc3fjh80ijom(h3%yr!x9v+=hj##(_+v!80i)^t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'app.accounts'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'files', 'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'files', 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# REST settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
),
'PAGE_SIZE': 10
}
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
) | {
"content_hash": "c214abfb37e8ac63d9cda6084413fbf6",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 91,
"avg_line_length": 24.71698113207547,
"alnum_prop": 0.6737913486005089,
"repo_name": "klekhaav/django-rest-custom-user",
"id": "9b5364c6a154a73247726c39a614325e36287ad6",
"size": "3930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1938"
},
{
"name": "Python",
"bytes": "14200"
}
],
"symlink_target": ""
} |
from calvin.actor.actor import Actor, manage, condition
class Identity(Actor):
"""
forward a token unchanged
Inputs:
token : a token
Outputs:
token : the same token
"""
@manage(['dump'])
def init(self, dump=False):
self.dump = dump
def log(self, data):
print "%s<%s>: %s" % (self.__class__.__name__, self.id, data)
@condition(['token'], ['token'])
def donothing(self, input):
if self.dump:
self.log(input)
return (input, )
action_priority = (donothing, )
test_set = [
{
'in': {'token': [1, 2, 3]},
'out': {'token': [1, 2, 3]}
}
]
| {
"content_hash": "956667930992421f56cfaec4ac0213f4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 69,
"avg_line_length": 21.375,
"alnum_prop": 0.49415204678362573,
"repo_name": "EricssonResearch/calvin-base",
"id": "e071e8f040bdf582004f39746a4acd5bb5f0e5cc",
"size": "1289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calvin/tests/security_test/dht_store/std/Identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
} |
"""Test Mongo Connector's behavior when its source MongoDB system is
experiencing a rollback.
"""
import os
import sys
import time
from pymongo.read_preferences import ReadPreference
from pymongo import MongoClient
sys.path[0:0] = [""]
from mongo_connector.util import retry_until_ok
from mongo_connector.locking_dict import LockingDict
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.oplog_manager import OplogThread
from tests import unittest, STRESS_COUNT
from tests.util import assert_soon
from tests.setup_cluster import ReplicaSet
class TestRollbacks(unittest.TestCase):
def tearDown(self):
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mc"]
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
self.opman.start()
# Insert a document into each namespace
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'],
'secondary was never promoted')
# Insert more documents. This will be rolled back later
# Some of these documents will be manually removed from
# certain doc managers, to emulate the effect of certain
# target systems being ahead/behind others
secondary_ids = []
for i in range(1, 10):
secondary_ids.append(
retry_until_ok(self.main_conn["test"]["mc"].insert,
{"i": i}))
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)
# Wait for replication to the doc managers
def docmans_done():
for dm in self.opman.doc_managers:
if len(dm._search()) != 10:
return False
return True
assert_soon(docmans_done,
"not all writes were replicated to doc managers")
# Remove some documents from the doc managers to simulate
# uneven replication
ts = self.opman.doc_managers[0].get_last_doc()['_ts']
for id in secondary_ids[8:]:
self.opman.doc_managers[1].remove(id, 'test.mc', ts)
for id in secondary_ids[2:]:
self.opman.doc_managers[2].remove(id, 'test.mc', ts)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")['ismaster'],
'restarted primary never resumed primary status')
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.primary_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0)
# Give OplogThread some time to catch up
time.sleep(10)
# Same case should hold for the doc managers
for dm in self.opman.doc_managers:
self.assertEqual(len(dm._search()), 1)
self.assertEqual(dm._search()[0]["i"], 0)
self.opman.join()
def test_deletions(self):
"""Test rolling back 'd' operations"""
self.opman.start()
# Insert a document, wait till it replicates to secondary
self.main_conn["test"]["mc"].insert({"i": 0})
self.main_conn["test"]["mc"].insert({"i": 1})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2)
assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2,
"first write didn't replicate to secondary")
# Kill the primary, wait for secondary to be promoted
self.repl_set.primary.stop(destroy=False)
assert_soon(lambda: self.secondary_conn["admin"]
.command("isMaster")["ismaster"])
# Delete first document
retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0})
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1,
"delete was not replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(self.secondary_conn.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
# Both documents should exist in mongo
assert_soon(lambda: retry_until_ok(
self.main_conn["test"]["mc"].count) == 2)
# Both document should exist in doc manager
doc_manager = self.opman.doc_managers[0]
docs = list(doc_manager._search())
self.assertEqual(len(docs), 2,
"Expected two documents, but got %r" % docs)
self.opman.join()
def test_stressed_rollback(self):
"""Stress test for a rollback with many documents."""
self.opman.start()
c = self.main_conn.test.mc
docman = self.opman.doc_managers[0]
c.insert({'i': i} for i in range(STRESS_COUNT))
assert_soon(lambda: c.count() == STRESS_COUNT)
condition = lambda: len(docman._search()) == STRESS_COUNT
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
primary_conn = self.repl_set.primary.client()
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin = new_primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
retry_until_ok(c.insert,
[{'i': str(STRESS_COUNT + i)}
for i in range(STRESS_COUNT)])
assert_soon(lambda: len(docman._search()) == c.count())
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
admin = primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT)
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
self.opman.join()
| {
"content_hash": "b5a468c4c262670a080e00ed5cb89542",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 39.21649484536083,
"alnum_prop": 0.5932351910269892,
"repo_name": "keithhigbee/mongo-connector",
"id": "57686555da808833f697477fb28a1c06acfd98c2",
"size": "11412",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tests/test_rollbacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "318298"
},
{
"name": "Shell",
"bytes": "2029"
}
],
"symlink_target": ""
} |
import itertools
import os
def getDefaultFlags():
return [
'-Wall',
'-Wextra',
'-Wno-unused-result',
'-Weffc++',
'--pipe',
'-std=c++11',
'-x', 'c++',
]
def getSystemIncludeFlags():
return getIncludePaths('-isystem', [
'/usr/include',
'/usr/local/include',
'/usr/include/eigen3',
'/opt/ros/indigo/include',
])
def getBazelWorkspace(current):
while len(current) > 1:
current = os.path.dirname(current)
if os.path.exists(os.path.join(current, 'WORKSPACE')):
return current
return None
def getLocalIncludeFlags(filename):
paths = [
'.',
'./include',
]
bazel_workspace = getBazelWorkspace(filename)
if bazel_workspace != None:
paths += [
bazel_workspace,
os.path.join(bazel_workspace, 'bazel-genfiles'),
]
return getIncludePaths('-I', paths)
def getIncludePaths(prefix, paths):
paths = filter(lambda path: os.path.exists(path), set(paths))
return list(itertools.chain.from_iterable(
itertools.izip([prefix] * len(paths), paths)))
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.hpp', '.hxx', '.hh', '.h', '.inl', '.impl']
def FlagsForFile(filename, **kwargs):
return {
'flags': getDefaultFlags() + getSystemIncludeFlags() + \
getLocalIncludeFlags(filename),
'do_cache': True
}
# import itertools
# import os
# import rospkg
# rospack = rospkg.RosPack()
# def getDefaultFlags():
# return [
# '-Wall',
# '-Wextra',
# '-Wno-unused-result',
# '-Weffc++',
# '--pipe',
# '-std=c++11',
# '-x', 'c++',
# ]
# def getSystemIncludeFlags():
# return getIncludePaths('-isystem', [
# '/usr/include',
# '/usr/local/include',
# ])
# def getRosIncludeFlags():
# paths = []
# ros_workspace = os.path.expandvars('$ROS_WORKSPACE') + '/devel/include'
# if os.path.isdir(ros_workspace):
# paths += [ros_workspace]
# paths += [rospack.get_path(path) + '/include' for path in rospack.list()]
# if os.path.isdir('/opt/ros'):
# paths += [
# os.path.join(path + 'include')
# for path in reversed(os.listdir('/opt/ros'))
# if os.path.isdir(path) and os.path.isdir(path + '/include')
# ]
# return getIncludePaths('-isystem', paths)
# def getDefaultFlags():
# return [
# '-Wall',
# '-Wextra',
# '-Wno-unused-result',
# '-Weffc++',
# '--pipe',
# '-std=c++11',
# '-x', 'c++',
# ]
# def getSystemIncludeFlags():
# return getIncludePaths('-isystem', [
# '/usr/include',
# '/usr/local/include',
# '/usr/include/eigen3',
# '/opt/ros/indigo/include',
# ])
# def getBazelWorkspace(current):
# while len(current) > 0:
# current = os.path.dirname(current)
# if os.path.exists(os.path.join(current, 'WORKSPACE')):
# return current
# return None
# def getLocalIncludeFlags(filename):
# return getIncludePaths('-I', [
# '.',
# './include',
# getBazelWorkspace(filename),
# os.path.join(getBazelWorkspace(filename), 'bazel-genfiles'),
# ])
# def getIncludePaths(prefix, paths):
# paths = filter(lambda path: os.path.exists(path), set(paths))
# return list(itertools.chain.from_iterable(
# itertools.izip([prefix] * len(paths), paths)))
# def IsHeaderFile(filename):
# extension = os.path.splitext(filename)[1]
# return extension in ['.hpp', '.hxx', '.hh', '.h', '.inl', '.impl']
# def FlagsForFile(filename, **kwargs):
# return {
# 'flags': \
# getDefaultFlags() + \
## getSystemIncludeFlags() + \
## getRosIncludeFlags() + \
# getLocalIncludeFlags(filename),
# 'do_cache': True
# }
| {
"content_hash": "707aacefcac487e9e1346d6eea80b7bf",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 26.424836601307188,
"alnum_prop": 0.5419243136284937,
"repo_name": "Peaches491/dotfiles",
"id": "018234910a60708bebf342c2b24a0b4c8a0c6126",
"size": "4067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vim/ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "40"
},
{
"name": "Python",
"bytes": "16920"
},
{
"name": "Shell",
"bytes": "104175"
},
{
"name": "Vim Snippet",
"bytes": "5841"
},
{
"name": "Vim script",
"bytes": "65438"
}
],
"symlink_target": ""
} |
import unittest
import transaction
from pyramid import testing
from .models import DBSession
class TestMyViewSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from .models import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = MyModel(name='one', value=55)
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_passing_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['one'].name, 'one')
self.assertEqual(info['project'], 'SessionTest')
class TestMyViewFailureCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from .models import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info.status_int, 500) | {
"content_hash": "1290f91f8f3d69223dd1335f5c254471",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 56,
"avg_line_length": 27.30909090909091,
"alnum_prop": 0.6091877496671105,
"repo_name": "Akagi201/learning-python",
"id": "caeb74cea3ae1c47044cee1eb9438bac81ee902c",
"size": "1502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramid/SessionTest/sessiontest/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
import os
messages_json = os.path.join(os.path.dirname(__file__), 'messages.json')
with open(messages_json, 'r') as message_file:
message_data = message_file.read()
ver = message_data.splitlines()[-2].split(':')[0].strip().replace('"', '')
version = tuple([int(i) for i in ver.split('.')])
| {
"content_hash": "b51a4d0b7f6dfe1b6c3284cf720af499",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 74,
"avg_line_length": 37,
"alnum_prop": 0.6486486486486487,
"repo_name": "j5shi/ST3_Config",
"id": "351f53269b12eb7849de8e957d00bb93859b15c2",
"size": "425",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Packages/Anaconda/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1194072"
}
],
"symlink_target": ""
} |
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import win_handle_object
class WaitableTimerType(cybox_common.BaseObjectPropertyType):
"""WaitableTimerType specifies Windows waitable timer types via a union
of the WaitableTimerTypeEnum type and the atomic xs:string type.
Its base type is the CybOX Core cybox_common.BaseObjectPropertyType, for
permitting complex (i.e. regular-expression based)
specifications.This attribute is optional and specifies the
expected type for the value of the specified property."""
subclass = None
superclass = cybox_common.BaseObjectPropertyType
def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None):
super(WaitableTimerType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_)
self.datatype = _cast(None, datatype)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if WaitableTimerType.subclass:
return WaitableTimerType.subclass(*args_, **kwargs_)
else:
return WaitableTimerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datatype(self): return self.datatype
def set_datatype(self, datatype): self.datatype = datatype
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(WaitableTimerType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WaitableTimerType')
if self.hasContent_():
lwrite('>')
lwrite(quote_xml(self.valueOf_))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType'):
super(WaitableTimerType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WaitableTimerType')
if self.datatype is not None:
lwrite(' datatype=%s' % (quote_attrib(self.datatype), ))
def exportChildren(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WaitableTimerType', fromsubclass_=False, pretty_print=True):
super(WaitableTimerType, self).exportChildren(lwrite, level, 'WinWaitableTimerObj:', name_, True, pretty_print=pretty_print)
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('datatype', node)
if value is not None:
self.datatype = value
super(WaitableTimerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class WaitableTimerType
class WindowsWaitableTimerObjectType(cybox_common.ObjectPropertiesType):
"""The WindowsWaitableTimerObjectType is intended to characterize
Windows waitable timer (synchronization) objects."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Handle=None, Name=None, Security_Attributes=None, Type=None):
super(WindowsWaitableTimerObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Handle = Handle
self.Name = Name
self.Security_Attributes = Security_Attributes
self.Type = Type
def factory(*args_, **kwargs_):
if WindowsWaitableTimerObjectType.subclass:
return WindowsWaitableTimerObjectType.subclass(*args_, **kwargs_)
else:
return WindowsWaitableTimerObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Handle(self): return self.Handle
def set_Handle(self, Handle): self.Handle = Handle
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Security_Attributes(self): return self.Security_Attributes
def set_Security_Attributes(self, Security_Attributes): self.Security_Attributes = Security_Attributes
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def validate_WaitableTimerType(self, value):
# Validate type WaitableTimerType, a restriction on None.
pass
def hasContent_(self):
if (
self.Handle is not None or
self.Name is not None or
self.Security_Attributes is not None or
self.Type is not None or
super(WindowsWaitableTimerObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsWaitableTimerObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType'):
super(WindowsWaitableTimerObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsWaitableTimerObjectType')
def exportChildren(self, lwrite, level, namespace_='WinWaitableTimerObj:', name_='WindowsWaitableTimerObjectType', fromsubclass_=False, pretty_print=True):
super(WindowsWaitableTimerObjectType, self).exportChildren(lwrite, level, 'WinWaitableTimerObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Handle is not None:
self.Handle.export(lwrite, level, 'WinWaitableTimerObj:', name_='Handle', pretty_print=pretty_print)
if self.Name is not None:
self.Name.export(lwrite, level, 'WinWaitableTimerObj:', name_='Name', pretty_print=pretty_print)
if self.Security_Attributes is not None:
self.Security_Attributes.export(lwrite, level, 'WinWaitableTimerObj:', name_='Security_Attributes', pretty_print=pretty_print)
if self.Type is not None:
self.Type.export(lwrite, level, 'WinWaitableTimerObj:', name_='Type', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(WindowsWaitableTimerObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Handle':
obj_ = win_handle_object.WindowsHandleObjectType.factory()
obj_.build(child_)
self.set_Handle(obj_)
elif nodeName_ == 'Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Security_Attributes':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Security_Attributes(obj_)
elif nodeName_ == 'Type':
obj_ = WaitableTimerType.factory()
obj_.build(child_)
self.set_Type(obj_)
super(WindowsWaitableTimerObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsWaitableTimerObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'Security_Attributes': cybox_common.StringObjectPropertyType,
'Object_Address': cybox_common.UnsignedLongObjectPropertyType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Pointer_Count': cybox_common.UnsignedLongObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': win_handle_object.HandleType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Access_Mask': cybox_common.UnsignedLongObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'ID': cybox_common.UnsignedIntegerObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Windows_Handle': win_handle_object.WindowsHandleObjectType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Handle': win_handle_object.WindowsHandleObjectType,
'Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Waitable_Timer'
rootClass = WindowsWaitableTimerObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="Windows_Waitable_Timer",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"WindowsWaitableTimerObjectType",
"WaitableTimerType"
]
| {
"content_hash": "261fa231c0760752115a21b87173be03",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 449,
"avg_line_length": 48.689373297002724,
"alnum_prop": 0.6948905926464828,
"repo_name": "CybOXProject/python-cybox",
"id": "9f0af718262a25e2f6e4eb2199608a458466321f",
"size": "17974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybox/bindings/win_waitable_timer_object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4610747"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
from setuptools import setup
import os
import sys
import glob
from distutils.core import Command
from distutils.command.build import build as build_
from distutils.command.clean import clean as clean_
from setuptools.command.develop import develop as develop_
from setuptools.command.egg_info import egg_info as egg_info_
from setuptools.command.install import install as install_
from distutils.errors import DistutilsError
import gettext
LOCALE_DIR = os.path.join(os.path.dirname(sys.argv[0]), 'src/webilder/locale')
gettext.install('webilder', LOCALE_DIR)
if sys.argv[-1] == 'setup.py':
print _("To install, run '%s'") % 'python setup.py install'
print
def GetBonoboPath():
"""Extract the bonono path from the command line."""
for flag in sys.argv[1:]:
if flag.startswith('--bonobo_path'):
sys.argv.remove(flag)
return flag.split('=', 1)[1]
else:
return 'lib/bonobo/servers'
class file_build_command(Command):
def initialize_options(self):
self.build_lib = None
self.install_scripts = None
self.install_data = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'))
self.set_undefined_options('install',
('install_scripts', 'install_scripts'),
('install_data', 'install_data'),
)
inst_cmd = self.get_finalized_command('install')
if inst_cmd.root is not None:
self.install_scripts = inst_cmd._original_install_scripts
self.install_data = inst_cmd._original_install_data
def run(self):
dest_dir = self.get_dest_dir()
self.mkpath(dest_dir)
fc = file(os.path.join(self.dir, self.filename + '.in'), 'r').read()
fw = file(os.path.join(dest_dir, self.filename), 'w')
fw.write(fc % dict(
bin_dir = self.install_scripts,
data_dir = os.path.join(self.install_data, 'share', 'pixmaps'),
version = self.distribution.get_version()))
fw.close()
class build_server(file_build_command):
description = _('Builds the bonobo server file representing the applet.')
dir = 'servers'
filename = 'GNOME_WebilderApplet.server'
def get_dest_dir(self): return 'servers'
class egg_info(egg_info_):
def find_sources(self):
egg_info_.find_sources(self)
# Prune debian/ control directory.
self.filelist.exclude_pattern(None, prefix='debian')
class build(build_):
sub_commands = build_.sub_commands[:]
sub_commands.append(('build_server', None))
sub_commands.append(('build_i18n', None))
class CompileTranslationsMixin(object):
def compile_mo(self):
for po in glob.glob(os.path.join(LOCALE_DIR, '*/*/*.po')):
self.spawn([
'msgfmt', po,
'-o', po[:-3]+'.mo'])
class develop(develop_, CompileTranslationsMixin):
def install_for_development(self):
self.compile_mo()
return develop_.install_for_development(self)
sub_commands = develop_.sub_commands[:]
sub_commands.append(('build_i18n', None))
class build_i18n(Command, CompileTranslationsMixin):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.compile_mo()
def check_modules(*modules):
for module in modules:
import imp
try:
imp.find_module(module)
except ImportError, e:
raise DistutilsError, _('Could not find module %s. Make sure all dependencies are installed.') % e
class install(install_):
user_options = install_.user_options[:]
sub_commands = install_.sub_commands[:]
def run(self):
check_modules('gtk', 'pygtk', 'gnome', 'appindicator')
install_.run(self)
print _("""
Installation completed successfully.
GNOME Users: Right-click on the GNOME panel, choose "Add to panel",
and select "Webilder Webshots Applet". If it
is not in the list - log off and log in again.
# If you prefer the command line, you can run webilder_desktop
# to configure Webilder and manage your photos. It is also
# possible to start photo downloading from the command line by
# starting webilder_downloader.
#
# Please report any problem to thesamet at gmail.com.
# """)
def change_roots(self, *names):
# in case we are going to perform a rooted install, store the original
# path names, so we can use them in file_build_command's.
for name in names:
attr = 'install_' + name
backup_attr = '_original_install_' + name
setattr(self, backup_attr, getattr(self, attr))
install_.change_roots(self, *names)
class clean(clean_):
def run(self):
if self.dry_run:
return
for mo in glob.glob(os.path.join(LOCALE_DIR, '*/*/*.mo')):
os.unlink(mo)
bonobo_server = os.path.join(
os.path.dirname(sys.argv[0]),
'servers/GNOME_WebilderApplet.server')
if os.path.exists(bonobo_server):
os.unlink(bonobo_server)
clean_.run(self)
setup(name='Webilder',
version='0.7.3',
description='Webilder Desktop',
author='Nadav Samet',
author_email='thesamet@gmail.com',
url='http://www.webilder.org',
packages = ['webilder', 'webilder.webshots', 'webilder.flickr'],
package_dir = {'': 'src'},
package_data = {
'': ['ui/*.glade', 'ui/*.png', 'ui/*.xpm', 'locale/*/*/*.mo'],
},
exclude_package_data = {
'': ['debian/*',],
},
data_files = [
(os.path.join('share', 'pixmaps'),
['src/webilder/ui/camera48.png']),
(os.path.join('share', 'applications'),
['desktop/webilder_desktop.desktop']),
(os.path.join('share', 'applications'),
['desktop/webilder_indicator.desktop']),
(os.path.join('share', 'gnome', 'autostart'),
['autostart/webilder_indicator.desktop']),
(GetBonoboPath(),
['servers/GNOME_WebilderApplet.server'])
],
cmdclass = {
'build': build,
'build_server': build_server,
'build_i18n': build_i18n,
'clean': clean,
'develop': develop,
'egg_info': egg_info,
'install': install},
entry_points = {
'console_scripts': [
'webilder_downloader = webilder.downloader:main',
'wbz_handler = webilder.wbz_handler:main',
'webilder_applet = webilder.webilder_gnome_applet:main',
'webilder_unity_indicator = webilder.webilder_unity_indicator:main',
],
'gui_scripts': [
'webilder_desktop = webilder.WebilderDesktop:main'
]
}
)
| {
"content_hash": "6d524b1833012a4126f874d5e458a939",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 110,
"avg_line_length": 32.181395348837206,
"alnum_prop": 0.6002312472900708,
"repo_name": "thesamet/webilder",
"id": "ac8b35e204b9b10b3edc0fb8bba5d7b88acddcf8",
"size": "6919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2464"
},
{
"name": "Python",
"bytes": "125231"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
from sparts.tasks.file import DirectoryWatcherTask
from sparts.vservice import VService
class DevWatcher(DirectoryWatcherTask):
INTERVAL = 1.0
PATH = '/dev'
DevWatcher.register()
if __name__ == '__main__':
VService.initFromCLI()
| {
"content_hash": "7752a027d9c63e8529c56b44b4f0df1f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 18.923076923076923,
"alnum_prop": 0.7154471544715447,
"repo_name": "bboozzoo/sparts",
"id": "40671276164c7001bc8a5927b8b009b93b2dd89d",
"size": "534",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "demo/dev_watcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "285429"
},
{
"name": "Shell",
"bytes": "742"
},
{
"name": "Thrift",
"bytes": "2439"
}
],
"symlink_target": ""
} |
from numpy import array, ndim, argsort
import sys
from ut.util.pobj import inject_method
__author__ = 'thor'
non_methods_set = set(dir(sys.modules[__name__]))
def cumul_before_partial_fit(self, min_data_len):
raise DeprecationWarning("Don't use. Dangerous.")
if not isinstance(min_data_len, (int, float)):
if isinstance(min_data_len, str):
if min_data_len in self.__dict__:
min_data_len = self.__dict__[min_data_len]
else:
raise AttributeError(
"Your {} doesn't have attribute {}".format(
self.__class__, min_data_len
)
)
elif callable(min_data_len):
min_data_len = min_data_len(self)
else:
raise ValueError("Couldn't figure out the min_data_len")
original_partial_fit = self.partial_fit
cumul = list()
got_enough_data = [False]
def _cumul_before_partial_fit(X, *args, **kwargs):
if got_enough_data[0]:
original_partial_fit(X, *args, **kwargs)
else:
cumul.extend(list(map(list, X)))
if len(cumul) >= min_data_len:
got_enough_data[0] = True
original_partial_fit(array(cumul), *args, **kwargs)
# self = inject_method(self, _cumul_before_partial_fit, 'partial_fit')
self.partial_fit = _cumul_before_partial_fit
return self
def predict_proba_with_labels(self, X):
""" returns a list of dicts of {label: predict_proba_score} entries """
if ndim(X) == 1:
pred = self.predict_proba(X.reshape(1, -1))
return dict(list(zip(self.classes_, array(pred)[0])))
else:
pred = self.predict_proba(X)
return [dict(list(zip(self.classes_, row))) for row in array(pred)]
def predict_proba_of_label(self, X, label, normalize_preds=False):
"""
If X is a single (ndim==1) feature vector, returns the probability of a given label according to the
(predict_proba method of)
If X is a observations x features matrix (ndarray), will return an array of probabilities for each observation
If the label is not in self.classes_ will raise a LookupError
"""
label_lidx = array(self.classes_) == label
if any(label_lidx):
if ndim(X) == 1:
pred = self.predict_proba(X.reshape(1, -1))
if normalize_preds:
pred = (
pred.T / pred.sum(axis=1).T
).T # added later on: Normalizing the preds (untested for ndim(X)==1)
return array(pred)[0, label_lidx][0]
else:
pred = self.predict_proba(X)
if normalize_preds:
pred = (
pred.T / pred.sum(axis=1).T
).T # added later on: Normalizing the preds
return array(pred[:, label_lidx]).reshape(-1)
else:
raise LookupError("The label {} wasn't found in the model")
def label_prob_argsort(self, X, label):
"""
X is a observations x features matrix (ndarray) and label one of the labels the model modeled.
The function will return an "argsort" array idx which will indicate how the input X can be sorted by decreasing
probability of the given label.
That is, such that
self.predict_proba(X[label_prob_argsort(self, X, label), :])[:, self.classes_ == label].reshape(-1))
will be monotone decreasing.
>>> from sklearn.datasets import make_blobs
>>> from sklearn.linear_model import LogisticRegressionCV
>>> from numpy import diff, all
>>> X, y = make_blobs()
>>> clf = LogisticRegressionCV().fit(X, y)
>>> label = clf.classes_[0]
>>> permutation_idx = label_prob_argsort(clf, X, label)
>>> sorted_predict_proba_matrix = clf.predict_proba(X)[permutation_idx, :]
>>> sorted_predict_proba_matrix = sorted_predict_proba_matrix / sorted_predict_proba_matrix.sum(axis=1)[:, None]
>>> assert all(diff(sorted_predict_proba_matrix[:, clf.classes_ == label].reshape(-1)) <= 0)
"""
return argsort(predict_proba_of_label(self, X, label))[::-1]
def true_positive_rate(self, X, y):
return sum(self.predict(X) == y) / float(len(y))
######### This is so that we can get a set of the methods outside...
methods_set = (
set(dir(sys.modules[__name__]))
.difference(non_methods_set)
.difference({'non_methods_set'})
)
######### This is so that we can get an object that has these methods as attributes
######### (to make it easier to see what's available)
class Struct(object):
def __init__(self, method_names):
for method_name in method_names:
setattr(self, method_name, getattr(sys.modules[__name__], method_name))
model_methods = Struct(methods_set)
| {
"content_hash": "00db3a581ca5503850e9d56a42d9a634",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 116,
"avg_line_length": 36.883720930232556,
"alnum_prop": 0.6025641025641025,
"repo_name": "thorwhalen/ut",
"id": "a28ad3ad9f3b11853ac7795bfaee4f6a9e9ed6b0",
"size": "4758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/ml/sk/utils/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from elvanto_subgroups.elvanto import refresh_elvanto_data
class Command(BaseCommand):
help = 'Pulls info from elvanto'
def handle(self, *args, **options):
refresh_elvanto_data()
| {
"content_hash": "d7c60a75b493b93f88a0dabf721d8a7f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 58,
"avg_line_length": 25.1,
"alnum_prop": 0.7370517928286853,
"repo_name": "monty5811/elvanto_subgroups",
"id": "31a2e9452b9a0d5359259461ce24fd962f51a5f2",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elvanto_subgroups/management/commands/pull_from_elvanto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1556"
},
{
"name": "HTML",
"bytes": "5998"
},
{
"name": "JavaScript",
"bytes": "7726"
},
{
"name": "Python",
"bytes": "21258"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from flowgen.graph import Graph
from flowgen.language import Code
from flowgen.options import parser
from pypeg2 import parse
from pypeg2.xmlast import thing2xml
class FlowGen(object):
def __init__(self, args):
self.args = parser.parse_args(args)
def any_output(self):
return any([self.args.dump_source, self.args.dump_xml])
def safe_print(self, *args, **kwargs):
if not self.any_output():
print(*args, **kwargs)
def run(self):
data_input = self.args.infile.read()
tree = parse(data_input, Code)
if self.args.dump_xml:
print(thing2xml(tree, pretty=True).decode())
graph = Graph(tree)
graph.render()
if self.args.dump_source:
print(graph.get_source())
if self.args.preview:
graph.dot.view()
if self.args.outfile:
graph.save(self.args.outfile.name)
self.safe_print("Saved graph to %s successfull" % (self.args.outfile.name))
| {
"content_hash": "9cee64d02191c30c76a7ceb82f319ad0",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 87,
"avg_line_length": 28.27027027027027,
"alnum_prop": 0.6195028680688337,
"repo_name": "ad-m/flowgen",
"id": "bff201afbc30f4216d47c3e59f5126d04d7628ec",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowgen/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2295"
},
{
"name": "Python",
"bytes": "17369"
}
],
"symlink_target": ""
} |
import unittest
from aarhus.aarhus import custom_stopwords
class TestStopwords(unittest.TestCase):
def test_basic(self):
stopwords = custom_stopwords.get_stopwords()
print (stopwords)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "022df6ae054c1883a3f41ccdef14db07",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 19.692307692307693,
"alnum_prop": 0.67578125,
"repo_name": "mikedelong/aarhus",
"id": "87505ceed1e5d1c38ff9aff8ea4612846e2570bf",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aarhus/tests/stopword_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146893"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
'''
Collection of useful statistics routines
'''
def simpSample(f, numTest, xMin, xMax, M = None, verb = False):
'''
Use the rejection sampling method to generate a
probability distribution according to the given function f,
between some range xMin and xMax.
If xMin==xMax, return an array where all values are equal to
this value.
'''
if xMin==xMax:
return np.zeros(numTest)+xMin
#find max value if not provided
if M is None:
M = calcM(f,xMin,xMax)
#initialize
n = 0
X = np.zeros(numTest);
numIter = 0;
maxIter = 1000;
nSamp = int(np.max([2*numTest,1e6]))
while n < numTest and numIter < maxIter:
xd = np.random.random(nSamp) * (xMax - xMin) + xMin
yd = np.random.random(nSamp) * M
pd = f(xd)
xd = xd[yd < pd]
X[n:min(n+len(xd), numTest)] = xd[:min(len(xd),numTest-n)]
n += len(xd)
numIter += 1
if numIter == maxIter:
raise Exception("Failed to converge.")
if verb:
print 'Finished in '+repr(numIter)+' iterations.'
return X
def calcM(f,xMin,xMax):
#first do a coarse grid to get ic
dx = np.linspace(xMin,xMax,1000000)
ic = np.argmax(f(dx))
#now optimize
g = lambda x: -f(x)
M = fmin_l_bfgs_b(g,[dx[ic]],approx_grad=True,bounds=[(xMin,xMax)])
M = f(M[0])
return M | {
"content_hash": "507039dd53fab731473bcda33753f1e7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 71,
"avg_line_length": 24.610169491525422,
"alnum_prop": 0.5888429752066116,
"repo_name": "dgarrett622/FuncComp",
"id": "72c698c53222e9eb39bbd69bded44ffce468c26f",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FuncComp/statsFun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48797"
}
],
"symlink_target": ""
} |
"""Jerod Gawne, 2018.06.28
https://github.com/jerodg/hackerrank
"""
import sys
import traceback
def capitalize(string) -> str:
"""Jerod Gawne, 2017.10.13
print(input().title()) will not work because the question is
asking to capitalise firse letter of each word keeping in mind
that "if it is a letter". Title and Capitalise are different in function as:
'abcd'.title() results in 'Abcd' but
'12abcd'.title() results in '12Abcd'. This is not what we want.
We just want to capitalise first letter of each word, not the first occuring letter of a word.
:param string: str
:return: str
"""
return ' '.join((word.capitalize() for word in string.split(' ')))
if __name__ == '__main__':
try:
capitalize(input())
except Exception:
print(traceback.print_exception(*sys.exc_info()))
| {
"content_hash": "01cc2b89d70be57628775ca736d13308",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6611764705882353,
"repo_name": "jerodg/hackerrank-python",
"id": "182e7a835029cf323f8b705f8fd8e9ceec740ec8",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/02.Strings/11.Capitalize/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39802"
}
],
"symlink_target": ""
} |
"""
Created on Tue Feb 28 12:48:16 2017
@author: pconsidine
"""
def convert_to_celsius(fahrenheit):
'''(number) -> number
Return the number of Celsius degrees equivalent to fahrenheit degrees
>>> convert_to_celsius(32)
0.0
>>> convert_to_celsius(212)
100.0
'''
return (fahrenheit - 32) * 5 / 9
| {
"content_hash": "ecb8650bc16e4fbb33a6ead85500b163",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 19.38888888888889,
"alnum_prop": 0.5931232091690545,
"repo_name": "pjconsidine/codingclass",
"id": "a832dbf1b648f355a6a68a771a29a7c08dc923d8",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temperatur.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8178"
},
{
"name": "HTML",
"bytes": "12855"
},
{
"name": "JavaScript",
"bytes": "5031361"
},
{
"name": "Python",
"bytes": "305"
},
{
"name": "Shell",
"bytes": "2810"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from glob import glob
import os
import time
from xml.etree.ElementTree import ElementTree
# project
from checks import AgentCheck
from util import get_hostname
class Skip(Exception):
"""
Raised by :class:`Jenkins` when it comes across
a build or job that should be excluded from being checked.
"""
def __init__(self, reason, dir_name):
message = 'skipping build or job at %s because %s' % (dir_name, reason)
Exception.__init__(self, message)
class Jenkins(AgentCheck):
datetime_format = '%Y-%m-%d_%H-%M-%S'
def __init__(self, name, init_config, agentConfig):
AgentCheck.__init__(self, name, init_config, agentConfig)
self.high_watermarks = {}
def _timestamp_from_build_file(self, dir_name, tree):
timestamp = tree.find('timestamp')
if timestamp is None or not timestamp.text:
raise Skip('the timestamp cannot be found', dir_name)
else:
return int(timestamp.text) / 1000.0
def _timestamp_from_dirname(self, dir_name):
if not os.path.isdir(dir_name):
raise Skip('its not a build directory', dir_name)
try:
# Parse the timestamp from the directory name
date_str = os.path.basename(dir_name)
time_tuple = time.strptime(date_str, self.datetime_format)
return time.mktime(time_tuple)
except ValueError:
return None
def _get_build_metadata(self, dir_name, watermark):
if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')):
raise Skip('the build has already been archived', dir_name)
timestamp = self._timestamp_from_dirname(dir_name)
# This is not the latest build
if timestamp is not None and timestamp <= watermark:
return None
# Read the build.xml metadata file that Jenkins generates
build_metadata = os.path.join(dir_name, 'build.xml')
if not os.access(build_metadata, os.R_OK):
self.log.debug("Can't read build file at %s" % (build_metadata))
raise Exception("Can't access build.xml at %s" % (build_metadata))
else:
tree = ElementTree()
tree.parse(build_metadata)
if timestamp is None:
try:
timestamp = self._timestamp_from_build_file(dir_name, tree)
# This is not the latest build
if timestamp <= watermark:
return None
except ValueError:
return None
keys = ['result', 'number', 'duration']
kv_pairs = ((k, tree.find(k)) for k in keys)
d = dict([(k, v.text) for k, v in kv_pairs if v is not None])
d['timestamp'] = timestamp
try:
d['branch'] = tree.find('actions')\
.find('hudson.plugins.git.util.BuildData')\
.find('buildsByBranchName')\
.find('entry')\
.find('hudson.plugins.git.util.Build')\
.find('revision')\
.find('branches')\
.find('hudson.plugins.git.Branch')\
.find('name')\
.text
except Exception:
pass
return d
def _get_build_results(self, instance_key, job_dir):
job_name = os.path.basename(job_dir)
try:
dirs = glob(os.path.join(job_dir, 'builds', '*_*'))
# Before Jenkins v1.597 the build folders were named with a timestamp (eg: 2015-03-10_19-59-29)
# Starting from Jenkins v1.597 they are named after the build ID (1, 2, 3...)
# So we need try both format when trying to find the latest build and parsing build.xml
if len(dirs) == 0:
dirs = glob(os.path.join(job_dir, 'builds', '[0-9]*'))
if len(dirs) > 0:
# versions of Jenkins > 1.597 need to be sorted by build number (integer)
try:
dirs = sorted(dirs, key=lambda x: int(x.split('/')[-1]), reverse=True)
except ValueError:
dirs = sorted(dirs, reverse=True)
# We try to get the last valid build
for dir_name in dirs:
watermark = self.high_watermarks[instance_key][job_name]
try:
build_metadata = self._get_build_metadata(dir_name, watermark)
except Exception:
build_metadata = None
if build_metadata is not None:
build_result = build_metadata.get('result')
if build_result is None:
break
output = {
'job_name': job_name,
'event_type': 'build result'
}
output.update(build_metadata)
if 'number' not in output:
output['number'] = dir_name.split('/')[-1]
self.high_watermarks[instance_key][job_name] = output.get('timestamp')
self.log.debug("Processing %s results '%s'" % (job_name, output))
yield output
# If it not a new build, stop here
else:
break
except Exception as e:
self.log.error("Error while working on job %s, exception: %s" % (job_name, e))
def check(self, instance, create_event=True):
"""
DEPRECATED:
This Jenkins check is deprecated and not actively developed anymore. It will be
removed in a future version of the Datadog Agent. Please move to using the Datadog
plugin for Jenkins. More information can be found on the Jenkins Integration panel
under the Configuration tab (https://app.datadoghq.com/account/settings#integrations/jenkins)
"""
self.warning("This check is deprecated in favor of our Jenkins Datadog plugin."
" It will be removed in a future version of the Datadog Agent."
" More information can be found on the Jenkins Integration panel"
" under the Configuration tab"
" (https://app.datadoghq.com/account/settings#integrations/jenkins)")
if self.high_watermarks.get(instance.get('name'), None) is None:
# On the first run of check(), prime the high_watermarks dict
# so that we only send events that occured after the agent
# started.
# (Setting high_watermarks in the next statement prevents
# any kind of infinite loop (assuming nothing ever sets
# high_watermarks to None again!))
self.high_watermarks[instance.get('name')] = defaultdict(lambda: 0)
self.check(instance, create_event=False)
jenkins_home = instance.get('jenkins_home')
if not jenkins_home:
raise Exception("No jenkins_home directory set in the config file")
jenkins_jobs_dir = os.path.join(jenkins_home, 'jobs', '*')
job_dirs = glob(jenkins_jobs_dir)
if not job_dirs:
raise Exception('No jobs found in `%s`! '
'Check `jenkins_home` in your config' % (jenkins_jobs_dir))
for job_dir in job_dirs:
for output in self._get_build_results(instance.get('name'), job_dir):
output['host'] = get_hostname(self.agentConfig)
if create_event:
self.log.debug("Creating event for job: %s" % output['job_name'])
self.event(output)
tags = [
'job_name:%s' % output['job_name'],
'result:%s' % output['result'],
'build_number:%s' % output['number']
]
if 'branch' in output:
tags.append('branch:%s' % output['branch'])
self.gauge("jenkins.job.duration", float(output['duration'])/1000.0, tags=tags)
if output['result'] == 'SUCCESS':
self.increment('jenkins.job.success', tags=tags)
else:
self.increment('jenkins.job.failure', tags=tags)
| {
"content_hash": "4d99696ce90d0c24620f5aab1c2338a6",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 107,
"avg_line_length": 43.98453608247423,
"alnum_prop": 0.5356849876948319,
"repo_name": "indeedops/dd-agent",
"id": "4278d307efe931110004088d856fe29f5f90ba81",
"size": "8649",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "checks.d/jenkins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8553"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2300561"
},
{
"name": "Ruby",
"bytes": "102896"
},
{
"name": "Shell",
"bytes": "61965"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
# the toolbar is only enabled in debug mode:
app.debug = True
toolbar = DebugToolbarExtension(app)
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/data', methods=['GET', 'POST'])
def data():
return render_template('data.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/user')
def user():
return render_template('user.html')
if __name__ == '__main__':
manager.run()
| {
"content_hash": "28d0056c83d9b6a41dcb0d1f4c7b0dd0",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 25.185185185185187,
"alnum_prop": 0.7110294117647059,
"repo_name": "Craicerjack/socdems",
"id": "3fab1f18b985c3b3fec4c2557548e9263eb84e2d",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/tempsform",
"path": "hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47327"
},
{
"name": "HTML",
"bytes": "74490"
},
{
"name": "JavaScript",
"bytes": "473696"
},
{
"name": "Python",
"bytes": "3043"
}
],
"symlink_target": ""
} |
import crm_claim_report
| {
"content_hash": "fcdcf0a023e5e676e04900314874fc40",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.8333333333333334,
"repo_name": "vileopratama/vitech",
"id": "869d301e20cc08c00780f67628dbfa6c599df953",
"size": "124",
"binary": false,
"copies": "44",
"ref": "refs/heads/master",
"path": "src/addons/crm_claim/report/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from invoker import ReplyObject
from commands import URL
from room import Room
from user import User
from app import PSBot
import re
psb = PSBot()
test_room = Room('test')
test_user = User('user')
""" Tests the commands that are within the CommandInvoker
"""
def testInvalidCommand():
reply = psb.invoker.execute(psb, 'test_command', '', test_user, test_room)
assert reply == ReplyObject('test_command is not a valid command.'), 'Invalid command not properly recognized; {}'.format(reply.text)
def testExternalLoader():
reply = psb.invoker.execute(psb, 'source', '', test_user, test_room)
assert reply == ReplyObject('Source code can be found at: {}'.format(URL()), True), 'Commands not properly loaded'
| {
"content_hash": "c74af20fadab58a5fc6dd4bde7aca6e5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 137,
"avg_line_length": 32.95454545454545,
"alnum_prop": 0.7186206896551725,
"repo_name": "QuiteQuiet/PokemonShowdownBot",
"id": "76945a30c3f4563f781c2e48b22ff693b1a0e473",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1119704"
}
],
"symlink_target": ""
} |
import atexit
import logging
import os
import re
import readline
import Stemmer
import sys
import time
from .bot import Runner
from .brain import Brain
log = logging.getLogger("cobe")
class InitCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("init", help="Initialize a new brain")
subparser.add_argument("--force", action="store_true")
subparser.add_argument("--order", type=int, default=3)
subparser.add_argument("--megahal", action="store_true",
help="Use MegaHAL-compatible tokenizer")
subparser.set_defaults(run=cls.run)
@staticmethod
def run(args):
filename = args.brain
if os.path.exists(filename):
if args.force:
os.remove(filename)
else:
log.error("%s already exists!", filename)
return
tokenizer = None
if args.megahal:
tokenizer = "MegaHAL"
Brain.init(filename, order=args.order, tokenizer=tokenizer)
def progress_generator(filename):
s = os.stat(filename)
size_left = s.st_size
fd = open(filename, "rb")
for line in fd:
# Try to interpret any binary data as utf-8, but ignore
# errors. This tries to make as good use of corrupt input as
# possible.
line = line.decode("utf-8", errors="ignore")
size_left = size_left - len(line)
progress = 100 * (1. - (float(size_left) / float(s.st_size)))
yield line, progress
fd.close()
class LearnCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("learn", help="Learn a file of text")
subparser.add_argument("file", nargs="+")
subparser.set_defaults(run=cls.run)
@staticmethod
def run(args):
b = Brain(args.brain)
b.start_batch_learning()
for filename in args.file:
now = time.time()
print(filename)
count = 0
for line, progress in progress_generator(filename):
show_progress = ((count % 1000) == 0)
if show_progress:
elapsed = time.time() - now
sys.stdout.write("\r%.0f%% (%d/s)" % (progress,
count / elapsed))
sys.stdout.flush()
b.learn(line.strip())
count = count + 1
if (count % 10000) == 0:
b.graph.commit()
elapsed = time.time() - now
print("\r100%% (%d/s)" % (count / elapsed))
b.stop_batch_learning()
class LearnIrcLogCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("learn-irc-log",
help="Learn a file of IRC log text")
subparser.add_argument("-i", "--ignore-nick", action="append",
dest="ignored_nicks",
help="Ignore an IRC nick")
subparser.add_argument("-o", "--only-nick", action="append",
dest="only_nicks",
help="Only learn from specified nicks")
subparser.add_argument("-r", "--reply-to", action="append",
help="Reply (invisibly) to things said "
"to specified nick")
subparser.add_argument("file", nargs="+")
subparser.set_defaults(run=cls.run)
@classmethod
def run(cls, args):
b = Brain(args.brain)
b.start_batch_learning()
for filename in args.file:
now = time.time()
print(filename)
count = 0
for line, progress in progress_generator(filename):
show_progress = ((count % 100) == 0)
if show_progress:
elapsed = time.time() - now
sys.stdout.write("\r%.0f%% (%d/s)" % (progress,
count / elapsed))
sys.stdout.flush()
count = count + 1
if (count % 1000) == 0:
b.graph.commit()
parsed = cls._parse_irc_message(line.strip(),
args.ignored_nicks,
args.only_nicks)
if parsed is None:
continue
to, msg = parsed
b.learn(msg)
if args.reply_to is not None and to in args.reply_to:
b.reply(msg)
elapsed = time.time() - now
print("\r100%% (%d/s)" % (count / elapsed))
b.stop_batch_learning()
@staticmethod
def _parse_irc_message(msg, ignored_nicks=None, only_nicks=None):
# only match lines of the form "HH:MM <nick> message"
match = re.match("\d+:\d+\s+<(.+?)>\s+(.*)", msg)
if not match:
return None
nick = match.group(1)
msg = match.group(2)
if ignored_nicks is not None and nick in ignored_nicks:
return None
if only_nicks is not None and nick not in only_nicks:
return None
to = None
# strip "username: " at the beginning of messages
match = re.search("^(\S+)[,:]\s+(\S.*)", msg)
if match:
to = match.group(1)
msg = match.group(2)
# strip kibot style '"asdf" --user, 06-oct-09' quotes
msg = re.sub("\"(.*)\" --\S+,\s+\d+-\S+-\d+",
lambda m: m.group(1), msg)
return to, msg
class ConsoleCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("console", help="Interactive console")
subparser.set_defaults(run=cls.run)
@staticmethod
def run(args):
b = Brain(args.brain)
history = os.path.expanduser("~/.cobe_history")
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
while True:
try:
cmd = input("> ")
except EOFError:
print()
sys.exit(0)
b.learn(cmd)
print(b.reply(cmd).encode("utf-8"))
class IrcClientCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("irc-client",
help="IRC client [requires twisted]")
subparser.add_argument("-s", "--server", required=True,
help="IRC server hostname")
subparser.add_argument("-p", "--port", type=int, default=6667,
help="IRC server port")
subparser.add_argument("-n", "--nick", default="cobe",
help="IRC nick")
subparser.add_argument("-c", "--channel", required=True,
help="IRC channel")
subparser.add_argument("-l", "--log-channel",
help="IRC channel for logging")
subparser.add_argument("-i", "--ignore-nick", action="append",
dest="ignored_nicks",
help="Ignore an IRC nick")
subparser.add_argument("-o", "--only-nick", action="append",
dest="only_nicks",
help="Only learn from a specific IRC nick")
subparser.set_defaults(run=cls.run)
@staticmethod
def run(args):
b = Brain(args.brain)
Runner().run(b, args)
class SetStemmerCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("set-stemmer",
help="Configure a stemmer")
subparser.set_defaults(run=cls.run)
subparser.add_argument("language", choices=Stemmer.algorithms(),
help="Stemmer language")
@staticmethod
def run(args):
b = Brain(args.brain)
b.set_stemmer(args.language)
class DelStemmerCommand:
@classmethod
def add_subparser(cls, parser):
subparser = parser.add_parser("del-stemmer", help="Delete the stemmer")
subparser.set_defaults(run=cls.run)
@staticmethod
def run(args):
b = Brain(args.brain)
b.del_stemmer()
| {
"content_hash": "f87222c73e70c4cfe5e5bd03f01065b5",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 30.755395683453237,
"alnum_prop": 0.5063157894736842,
"repo_name": "pteichman/cobe",
"id": "68a9b7ecbe70e4a06da0297950bd92c26f7ed0c5",
"size": "8587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cobe/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67044"
},
{
"name": "Shell",
"bytes": "571"
}
],
"symlink_target": ""
} |
from datetime import time
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import Client, TestCase, override_settings
from model_mommy import mommy
from .. models import Course, OfficeHours
TEST_SESSION_ENGINE = 'django.contrib.sessions.backends.db'
TEST_CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
TEST_RQ_QUEUES = settings.RQ_QUEUES.copy()
TEST_RQ_QUEUES['default']['ASYNC'] = False
@override_settings(SESSION_ENGINE=TEST_SESSION_ENGINE)
@override_settings(RQ_QUEUES=TEST_RQ_QUEUES)
@override_settings(CACHES=TEST_CACHES)
class TestIndexView(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
User = get_user_model()
cls.user = User.objects.create_user('teacher', 't@ch.com', 'pass')
cls.hours = mommy.make(
OfficeHours,
user=cls.user,
)
cls.course = mommy.make(
Course,
user=cls.user,
name="TEST1234",
start_time=time(13, 30),
location="Room 123",
days=['Monday', 'Wednesday', 'Friday'],
)
def setUp(self):
self.ua_client = Client() # An Unauthenticated client
self.client.login(username="teacher", password="pass")
def test_get_index(self):
self.url = reverse("officehours:index")
# Un-authed should redirect to login
resp = self.ua_client.get(self.url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.get('Location'), reverse('officehours:login'))
# Authed should redirect to schedule
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.get('Location'), reverse('officehours:schedule'))
def test_get_login(self):
self.url = reverse("officehours:login")
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], reverse('officehours:schedule'))
def test_get_add_code(self):
self.url = reverse("officehours:add-code")
# Un-authed
resp = self.ua_client.get(self.url)
self.assertEqual(resp.status_code, 302)
# TODO: needs more view tests
| {
"content_hash": "a43a60858b89e2ec6e074d46c20789a9",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 31.06578947368421,
"alnum_prop": 0.6429479034307497,
"repo_name": "tndatacommons/tndata_backend",
"id": "c5cf4d6b6599be1db4488ef1fab6a081c9ab62e8",
"size": "2361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/officehours/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
import os.path
from graphite_api._vendor import whisper
from . import TestCase, WHISPER_DIR
class MetricsTests(TestCase):
def _create_dbs(self):
for db in (
('test', 'foo.wsp'),
('test', 'wat', 'welp.wsp'),
('test', 'bar', 'baz.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
def test_find(self):
url = '/metrics/find'
response = self.app.get(url)
self.assertEqual(response.status_code, 400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, [])
response = self.app.get(url, query_string={'query': 'test',
'format': 'completer'})
self.assertJSON(response, {'metrics': []})
self._create_dbs()
for _url in ['/metrics/find', '/metrics']:
response = self.app.get(_url, query_string={'query': 'test.*',
'format': 'treejson'})
self.assertJSON(response, [{
'allowChildren': 1,
'expandable': 1,
'id': 'test.bar',
'leaf': 0,
'text': 'bar',
}, {
'allowChildren': 1,
'expandable': 1,
'id': 'test.wat',
'leaf': 0,
'text': 'wat',
}, {
'allowChildren': 0,
'expandable': 0,
'id': 'test.foo',
'leaf': 1,
'text': 'foo',
}])
response = self.app.get(url, query_string={'query': 'test.*',
'format': 'treejson',
'wildcards': 1})
self.assertJSON(response, [{
'text': '*',
'expandable': 1,
'leaf': 0,
'id': 'test.*',
'allowChildren': 1,
}, {
'allowChildren': 1,
'expandable': 1,
'id': 'test.bar',
'leaf': 0,
'text': 'bar',
}, {
'allowChildren': 1,
'expandable': 1,
'id': 'test.wat',
'leaf': 0,
'text': 'wat',
}, {
'allowChildren': 0,
'expandable': 0,
'id': 'test.foo',
'leaf': 1,
'text': 'foo',
}])
response = self.app.get(url, query_string={'query': 'test.*',
'format': 'completer'})
self.assertJSON(response, {'metrics': [{
'is_leaf': 0,
'name': 'bar',
'path': 'test.bar.',
}, {
'is_leaf': 1,
'name': 'foo',
'path': 'test.foo',
}, {
'is_leaf': 0,
'name': 'wat',
'path': 'test.wat.',
}]})
response = self.app.get(url, query_string={'query': 'test.*',
'wildcards': 1,
'format': 'completer'})
self.assertJSON(response, {'metrics': [{
'is_leaf': 0,
'name': 'bar',
'path': 'test.bar.',
}, {
'is_leaf': 1,
'name': 'foo',
'path': 'test.foo',
}, {
'is_leaf': 0,
'name': 'wat',
'path': 'test.wat.',
}, {
'name': '*',
}]})
def test_find_validation(self):
url = '/metrics/find'
response = self.app.get(url, query_string={'query': 'foo',
'wildcards': 'aaa'})
self.assertJSON(response, {'errors': {'wildcards': 'must be 0 or 1.'}},
status_code=400)
response = self.app.get(url, query_string={'query': 'foo',
'from': 'aaa',
'until': 'bbb'})
self.assertJSON(response, {'errors': {
'from': 'must be an epoch timestamp.',
'until': 'must be an epoch timestamp.',
}}, status_code=400)
response = self.app.get(url, query_string={'query': 'foo',
'format': 'other'})
self.assertJSON(response, {'errors': {
'format': 'unrecognized format: "other".',
}}, status_code=400)
def test_expand(self):
url = '/metrics/expand'
response = self.app.get(url)
self.assertJSON(response, {'errors':
{'query': 'this parameter is required.'}},
status_code=400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'results': []})
self._create_dbs()
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'results': ['test']})
response = self.app.get(url, query_string={'query': 'test.*'})
self.assertJSON(response, {'results': ['test.bar', 'test.foo',
'test.wat']})
response = self.app.get(url, query_string={'query': 'test.*',
'leavesOnly': 1})
self.assertJSON(response, {'results': ['test.foo']})
response = self.app.get(url, query_string={'query': 'test.*',
'groupByExpr': 1})
self.assertJSON(response, {'results': {'test.*': ['test.bar',
'test.foo',
'test.wat']}})
def test_expand_validation(self):
url = '/metrics/expand'
response = self.app.get(url, query_string={'query': 'foo',
'leavesOnly': 'bbb',
'groupByExpr': 'aaa'})
self.assertJSON(response, {'errors': {
'groupByExpr': 'must be 0 or 1.',
'leavesOnly': 'must be 0 or 1.',
}}, status_code=400)
def test_noop(self):
url = '/dashboard/find'
response = self.app.get(url)
self.assertJSON(response, {'dashboards': []})
url = '/dashboard/load/foo'
response = self.app.get(url)
self.assertJSON(response, {'error': "Dashboard 'foo' does not exist."},
status_code=404)
url = '/events/get_data'
response = self.app.get(url)
self.assertJSON(response, [])
def test_search(self):
url = '/metrics/search'
response = self.app.get(url, query_string={'max_results': 'a'})
self.assertJSON(response, {'errors': {
'max_results': 'must be an integer.',
'query': 'this parameter is required.'}}, status_code=400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'metrics': []})
def test_search_index(self):
response = self.app.get('/metrics/search',
query_string={'query': 'collectd.*'})
self.assertJSON(response, {'metrics': []})
parent = os.path.join(WHISPER_DIR, 'collectd')
os.makedirs(parent)
for metric in ['load', 'memory', 'cpu']:
db = os.path.join(parent, '{0}.wsp'.format(metric))
whisper.create(db, [(1, 60)])
response = self.app.put('/index')
self.assertJSON(response, {'success': True, 'entries': 3})
response = self.app.get('/metrics/search',
query_string={'query': 'collectd.*'})
self.assertJSON(response, {'metrics': [
{'is_leaf': False, 'path': None},
{'is_leaf': True, 'path': 'collectd.cpu'},
{'is_leaf': True, 'path': 'collectd.load'},
{'is_leaf': True, 'path': 'collectd.memory'},
]})
def test_metrics_index(self):
url = '/metrics/index.json'
response = self.app.get(url)
self.assertJSON(response, [])
self.assertEqual(response.headers['Content-Type'], 'application/json')
response = self.app.get(url, query_string={'jsonp': 'foo'})
self.assertEqual(response.data, b'foo([])')
self.assertEqual(response.headers['Content-Type'], 'text/javascript')
parent = os.path.join(WHISPER_DIR, 'collectd')
os.makedirs(parent)
for metric in ['load', 'memory', 'cpu']:
db = os.path.join(parent, '{0}.wsp'.format(metric))
whisper.create(db, [(1, 60)])
response = self.app.get(url)
self.assertJSON(response, [
u'collectd.cpu',
u'collectd.load',
u'collectd.memory',
])
response = self.app.get(url, query_string={'jsonp': 'bar'})
self.assertEqual(
response.data,
b'bar(["collectd.cpu", "collectd.load", "collectd.memory"])')
| {
"content_hash": "6ce1708e090de35c1604689fcad363f2",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 36.5098814229249,
"alnum_prop": 0.4387788242936018,
"repo_name": "alphapigger/graphite-api",
"id": "18c195274d4106b2227fe5f8ef1c5ce4b6e48675",
"size": "9237",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/test_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "343793"
},
{
"name": "Shell",
"bytes": "6916"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Alarm_Item_Details'
db.delete_table(u'ddsc_core_alarm_item_details')
# Deleting model 'Alarm_Property'
db.delete_table(u'ddsc_core_alarm_property')
# Deleting field 'Alarm_Item.value'
db.delete_column(u'ddsc_core_alarm_item', 'value')
# Deleting field 'Alarm_Item.property'
db.delete_column(u'ddsc_core_alarm_item', 'property_id')
# Adding field 'Alarm_Item.value_type'
db.add_column(u'ddsc_core_alarm_item', 'value_type',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Adding field 'Alarm_Item.value_number'
db.add_column(u'ddsc_core_alarm_item', 'value_number',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Alarm_Item.value_text'
db.add_column(u'ddsc_core_alarm_item', 'value_text',
self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True),
keep_default=False)
# Adding field 'Alarm_Item.timeseries'
db.add_column(u'ddsc_core_alarm_item', 'timeseries',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.Timeseries'], null=True, blank=True),
keep_default=False)
# Adding field 'Alarm_Item.logicalgroup'
db.add_column(u'ddsc_core_alarm_item', 'logicalgroup',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.LogicalGroup'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding model 'Alarm_Item_Details'
db.create_table(u'ddsc_core_alarm_item_details', (
('alarm_details', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['ddsc_core.Alarm_Item'], unique=True, primary_key=True)),
('logicalgroup', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.LogicalGroup'])),
('location', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.Location'])),
('timeseries', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.Timeseries'])),
))
db.send_create_signal(u'ddsc_core', ['Alarm_Item_Details'])
# Adding model 'Alarm_Property'
db.create_table(u'ddsc_core_alarm_property', (
('value_type', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
))
db.send_create_signal(u'ddsc_core', ['Alarm_Property'])
# Adding field 'Alarm_Item.value'
db.add_column(u'ddsc_core_alarm_item', 'value',
self.gf('django.db.models.fields.FloatField')(default=0.0),
keep_default=False)
# Adding field 'Alarm_Item.property'
db.add_column(u'ddsc_core_alarm_item', 'property',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['ddsc_core.Alarm_Property']),
keep_default=False)
# Deleting field 'Alarm_Item.value_type'
db.delete_column(u'ddsc_core_alarm_item', 'value_type')
# Deleting field 'Alarm_Item.value_number'
db.delete_column(u'ddsc_core_alarm_item', 'value_number')
# Deleting field 'Alarm_Item.value_text'
db.delete_column(u'ddsc_core_alarm_item', 'value_text')
# Deleting field 'Alarm_Item.timeseries'
db.delete_column(u'ddsc_core_alarm_item', 'timeseries_id')
# Deleting field 'Alarm_Item.logicalgroup'
db.delete_column(u'ddsc_core_alarm_item', 'logicalgroup_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.alarm': {
'Meta': {'object_name': 'Alarm'},
'active_stutus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_cr': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.UserGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'message_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'previous_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'single_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'default': "u'this is a alarm message template'"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'ddsc_core.alarm_item': {
'Meta': {'object_name': 'Alarm_Item'},
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'comparision': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logicalgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.LogicalGroup']", 'null': 'True', 'blank': 'True'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']", 'null': 'True', 'blank': 'True'}),
'value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.idmapping': {
'Meta': {'object_name': 'IdMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'relative_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.locationtype': {
'Meta': {'object_name': 'LocationType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'location_types'", 'blank': 'True', 'to': u"orm['ddsc_core.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'ddsc_core.logicalgroup': {
'Meta': {'ordering': "[u'owner', u'name']", 'unique_together': "((u'owner', u'name'),)", 'object_name': 'LogicalGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"}),
'timeseries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Timeseries']", 'symmetrical': 'False', 'blank': 'True'})
},
u'ddsc_core.logicalgroupedge': {
'Meta': {'unique_together': "((u'child', u'parent'),)", 'object_name': 'LogicalGroupEdge'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'childs'", 'to': u"orm['ddsc_core.LogicalGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parents'", 'to': u"orm['ddsc_core.LogicalGroup']"})
},
u'ddsc_core.manufacturer': {
'Meta': {'object_name': 'Manufacturer'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.source': {
'Meta': {'object_name': 'Source'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Manufacturer']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'source_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.sourcegroup': {
'Meta': {'object_name': 'SourceGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'timeseries'", 'symmetrical': 'False', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']"}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Source']", 'null': 'True', 'blank': 'True'}),
'supplying_systems': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['ddsc_core.IdMapping']", 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']"}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'validate_diff_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_diff_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.timeseriesgroup': {
'Meta': {'object_name': 'TimeseriesGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'parameters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Parameter']", 'symmetrical': 'False'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lizard_security.dataowner': {
'Meta': {'ordering': "['name']", 'object_name': 'DataOwner'},
'data_managers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['owner', 'name']", 'unique_together': "(('owner', 'name'),)", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'})
},
'lizard_security.usergroup': {
'Meta': {'object_name': 'UserGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'managed_user_groups'", 'blank': 'True', 'to': "orm['auth.User']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_group_memberships'", 'blank': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core'] | {
"content_hash": "6d95948ab62dd4e88547ccdca86218bb",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 200,
"avg_line_length": 74.7,
"alnum_prop": 0.5544771679309832,
"repo_name": "ddsc/ddsc-core",
"id": "dd15a84c7c97cb83582f7b4a023c5a16c38e8996",
"size": "26916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddsc_core/migrations/0052_auto__del_alarm_item_details__del_alarm_property__del_field_alarm_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "1832893"
}
],
"symlink_target": ""
} |
from flask import Flask
import requests
requests_service = requests.Session()
server = Flask(__name__)
server.config['SERVER_NAME'] = '127.0.0.1:5000'
# from app.simple_blueprint import simple_page
from app import home, clicktrack, performance, sql, twitter_api
# server.register_blueprint(simple_page, url_prefix='/nicole')
server.register_blueprint(home.blueprint, url_prefix='')
server.register_blueprint(clicktrack.blueprint, url_prefix='/click')
server.register_blueprint(performance.blueprint, url_prefix='/performance')
server.register_blueprint(sql.blueprint, url_prefix='')
server.register_blueprint(twitter_api.blueprint, url_prefix='/twitter')
# print('app.url_map: %s' % (server.url_map,))
| {
"content_hash": "c8a011abf777c115e77c4197efae01da",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 39.22222222222222,
"alnum_prop": 0.7662889518413598,
"repo_name": "buckbaskin/Insight",
"id": "2b0aece51e5c36f16a0af35f12f2a0ba3f7572f5",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskserver/app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1160"
},
{
"name": "HTML",
"bytes": "4536"
},
{
"name": "JavaScript",
"bytes": "12454"
},
{
"name": "Python",
"bytes": "56284"
},
{
"name": "Shell",
"bytes": "934"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from stripe.stripe_object import StripeObject
class CreditNoteLineItem(StripeObject):
OBJECT_NAME = "credit_note_line_item"
| {
"content_hash": "f6408c4766f0c5f4c1f0d7f3ed7ad7e0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 28,
"alnum_prop": 0.7857142857142857,
"repo_name": "stripe/stripe-python",
"id": "6e5eab5993c7956f19b45d354bf400a56f12569c",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stripe/api_resources/credit_note_line_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "748390"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
class Ui_RAddNuclideDialog(object):
def setupUi(self, RAddNuclideDialog):
RAddNuclideDialog.setObjectName("RAddNuclideDialog")
RAddNuclideDialog.setWindowModality(QtCore.Qt.NonModal)
RAddNuclideDialog.resize(640, 480)
RAddNuclideDialog.setMinimumSize(QtCore.QSize(640, 480))
RAddNuclideDialog.setSizeGripEnabled(True)
self.verticalLayout_3 = QtGui.QVBoxLayout(RAddNuclideDialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.topLayout = QtGui.QHBoxLayout()
self.topLayout.setObjectName("topLayout")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.elementLabel = QtGui.QLabel(RAddNuclideDialog)
self.elementLabel.setObjectName("elementLabel")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.elementLabel)
self.elementComboBox = QtGui.QComboBox(RAddNuclideDialog)
self.elementComboBox.setEditable(True)
self.elementComboBox.setMaxVisibleItems(20)
self.elementComboBox.setInsertPolicy(QtGui.QComboBox.InsertAtBottom)
self.elementComboBox.setMinimumContentsLength(15)
self.elementComboBox.setFrame(True)
self.elementComboBox.setObjectName("elementComboBox")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.elementComboBox)
self.atomicMassLabel = QtGui.QLabel(RAddNuclideDialog)
self.atomicMassLabel.setObjectName("atomicMassLabel")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.atomicMassLabel)
self.atomicMassSpinBox = QtGui.QSpinBox(RAddNuclideDialog)
self.atomicMassSpinBox.setAccelerated(True)
self.atomicMassSpinBox.setMinimum(1)
self.atomicMassSpinBox.setMaximum(999)
self.atomicMassSpinBox.setProperty("value", 1)
self.atomicMassSpinBox.setObjectName("atomicMassSpinBox")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.atomicMassSpinBox)
self.atomicNumberLabel = QtGui.QLabel(RAddNuclideDialog)
self.atomicNumberLabel.setObjectName("atomicNumberLabel")
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.atomicNumberLabel)
self.atomicNumberSpinBox = QtGui.QSpinBox(RAddNuclideDialog)
self.atomicNumberSpinBox.setEnabled(False)
self.atomicNumberSpinBox.setMaximum(999)
self.atomicNumberSpinBox.setObjectName("atomicNumberSpinBox")
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.atomicNumberSpinBox)
self.verticalLayout.addLayout(self.formLayout_2)
self.errorLabel = QtGui.QLabel(RAddNuclideDialog)
self.errorLabel.setEnabled(True)
self.errorLabel.setStyleSheet("QLabel {\n"
" background: url(:/Radiance/icons/error.png);\n"
" background-position: center left;\n"
" background-repeat: no-repeat;\n"
" padding-left: 20px;\n"
"}")
self.errorLabel.setFrameShape(QtGui.QFrame.NoFrame)
self.errorLabel.setObjectName("errorLabel")
self.verticalLayout.addWidget(self.errorLabel)
self.topLayout.addLayout(self.verticalLayout)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.topLayout.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.topLayout)
self.nuclideToolBox = QtGui.QToolBox(RAddNuclideDialog)
self.nuclideToolBox.setFrameShape(QtGui.QFrame.StyledPanel)
self.nuclideToolBox.setObjectName("nuclideToolBox")
self.gammaSpectrumPageWidget = QtGui.QWidget()
self.gammaSpectrumPageWidget.setGeometry(QtCore.QRect(0, 0, 620, 298))
self.gammaSpectrumPageWidget.setObjectName("gammaSpectrumPageWidget")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.gammaSpectrumPageWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gammaSpectrumTableView = QtGui.QTableView(self.gammaSpectrumPageWidget)
self.gammaSpectrumTableView.setEditTriggers(QtGui.QAbstractItemView.AllEditTriggers)
self.gammaSpectrumTableView.setTabKeyNavigation(False)
self.gammaSpectrumTableView.setAlternatingRowColors(True)
self.gammaSpectrumTableView.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.gammaSpectrumTableView.setCornerButtonEnabled(False)
self.gammaSpectrumTableView.setObjectName("gammaSpectrumTableView")
self.verticalLayout_2.addWidget(self.gammaSpectrumTableView)
self.nuclideToolBox.addItem(self.gammaSpectrumPageWidget, "")
self.verticalLayout_3.addWidget(self.nuclideToolBox)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.correctnessErrorLabel = QtGui.QLabel(RAddNuclideDialog)
self.correctnessErrorLabel.setEnabled(True)
self.correctnessErrorLabel.setStyleSheet("QLabel {\n"
" background: url(:/Radiance/icons/error.png);\n"
" background-position: center left;\n"
" background-repeat: no-repeat;\n"
" padding-left: 20px;\n"
"}")
self.correctnessErrorLabel.setFrameShape(QtGui.QFrame.NoFrame)
self.correctnessErrorLabel.setObjectName("correctnessErrorLabel")
self.horizontalLayout.addWidget(self.correctnessErrorLabel)
self.addButton = QtGui.QPushButton(RAddNuclideDialog)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Radiance/icons/disk.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addButton.setIcon(icon)
self.addButton.setAutoDefault(False)
self.addButton.setObjectName("addButton")
self.horizontalLayout.addWidget(self.addButton)
self.cancelButton = QtGui.QPushButton(RAddNuclideDialog)
self.cancelButton.setAutoDefault(False)
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.retranslateUi(RAddNuclideDialog)
self.nuclideToolBox.setCurrentIndex(0)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), RAddNuclideDialog.reject)
QtCore.QMetaObject.connectSlotsByName(RAddNuclideDialog)
def retranslateUi(self, RAddNuclideDialog):
RAddNuclideDialog.setWindowTitle(QtGui.QApplication.translate("RAddNuclideDialog", "Add nuclide", None, QtGui.QApplication.UnicodeUTF8))
self.elementLabel.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Element", None, QtGui.QApplication.UnicodeUTF8))
self.atomicMassLabel.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Atomic mass", None, QtGui.QApplication.UnicodeUTF8))
self.atomicNumberLabel.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Atomic number", None, QtGui.QApplication.UnicodeUTF8))
self.errorLabel.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Already exists.", None, QtGui.QApplication.UnicodeUTF8))
self.nuclideToolBox.setItemText(self.nuclideToolBox.indexOf(self.gammaSpectrumPageWidget), QtGui.QApplication.translate("RAddNuclideDialog", "Gamma spectrum", None, QtGui.QApplication.UnicodeUTF8))
self.correctnessErrorLabel.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Check the correctness of data input.", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setText(QtGui.QApplication.translate("RAddNuclideDialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
import radiance_rc
| {
"content_hash": "ea5c21e2a44acb3658c6128e5415faf1",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 205,
"avg_line_length": 64.15873015873017,
"alnum_prop": 0.7542058386937159,
"repo_name": "tetra5/radiance",
"id": "c16b600c48d06e3364c89582e10c39b2c6a50f43",
"size": "8336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/gui/radianceaddnuclidedialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2953"
},
{
"name": "Python",
"bytes": "465629"
},
{
"name": "Shell",
"bytes": "635"
},
{
"name": "TypeScript",
"bytes": "37354"
}
],
"symlink_target": ""
} |
from models.models import Topic, TopicInTopic
import json
def visual(vis, params):
root_topic = Topic.objects.get(model=vis.model, layer=0)
return json.dumps({"children": build_circles(vis.model, root_topic)})
def build_circles(model, topic):
answer = []
if topic.layer == model.layers_count:
for document in topic.get_documents():
answer.append({"id": document.id, "size": 1})
else:
relations = TopicInTopic.objects.filter(parent=topic)
for relation in relations:
child = relation.child
answer.append(
{"name": child.title, "children": build_circles(model, child)})
return answer
| {
"content_hash": "af7f16721d76ded7d3055434b3fde833",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 32.61904761904762,
"alnum_prop": 0.637956204379562,
"repo_name": "bigartm/visartm",
"id": "f1f74d0594936b459fa5a9762ae3231fff610a6a",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algo/visualizations/circles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2753"
},
{
"name": "CSS",
"bytes": "3626"
},
{
"name": "HTML",
"bytes": "202313"
},
{
"name": "JavaScript",
"bytes": "74658"
},
{
"name": "Python",
"bytes": "332341"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('contact.views',
(r'^contactForm', 'contactForm')
) | {
"content_hash": "e32674f04b7ee302686189bb34426860",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 23.8,
"alnum_prop": 0.7226890756302521,
"repo_name": "pombredanne/algos-urv",
"id": "4d69848d584eb196af7fe987fd813e5733bbebea",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contact/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from flask import request
from .core import db
from .model import PersonalAccessToken
class PersonalAccessTokenManager(object):
def __init__(self, app=None):
self.app = app
self.before_request_funcs = []
self.current_user_id = None
if app is not None:
self.init_app(app)
def init_app(self, app):
db.app = app
db.init_app(app)
app.config.setdefault('PERSONAL_ACCESS_TOKEN_ADMIN_API_PREFIX', '/personal_access_token/api')
app.config.setdefault('PERSONAL_ACCESS_TOKEN_ADMIN_PREFIX', '/personal_access_token')
from .api import bp as api_bp
api_bp.app = self
app.register_blueprint(api_bp, url_prefix='/personal_access_token/api')
from .admin import bp as admin_bp
admin_bp.app = self
app.register_blueprint(admin_bp, url_prefix='/personal_access_token/')
def create_all(self):
db.create_all()
def call_before_request_funcs(self):
for func in self.before_request_funcs:
func()
def before_request(self, f):
self.before_request_funcs.append(f)
return f
def user_loader(self, f):
self.user_loader_callback = f
return f
def load_user(self, user_id):
if not self.user_loader_callback:
raise NotImplementedError('You must implement `user_loader`.')
return self.user_loader_callback(user_id)
def load_user_by_token(self, token):
token = PersonalAccessToken.query.filter_by(token=token).first()
if not token:
return
token.use()
return self.load_user(token.user_id)
def load_user_from_request(self):
headers = request.headers
authorization = headers.get('Authorization')
if not authorization or not authorization.startswith('Bearer '):
return
token = authorization[len('Bearer '):]
return self.load_user_by_token(token)
| {
"content_hash": "ab41333fdbf49aa5297b0df08de7f437",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 101,
"avg_line_length": 31.661290322580644,
"alnum_prop": 0.6265919510952623,
"repo_name": "soasme/flask-personal-access-token",
"id": "aa122c9da7246b487ca8beb596fd375d8568d0b6",
"size": "1988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_personal_access_token/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1502"
},
{
"name": "JavaScript",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "9683"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
def try_parse_int(value, default=None):
try:
return int(value)
except (TypeError, ValueError):
return default
def parse_sg_attributes_from_dict(sg_attributes):
ip_protocol = sg_attributes.get('IpProtocol', [None])[0]
from_port = sg_attributes.get('FromPort', [None])[0]
to_port = sg_attributes.get('ToPort', [None])[0]
ip_ranges = []
ip_ranges_tree = sg_attributes.get('IpRanges') or {}
for ip_range_idx in sorted(ip_ranges_tree.keys()):
ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
source_groups = []
source_group_ids = []
groups_tree = sg_attributes.get('Groups') or {}
for group_idx in sorted(groups_tree.keys()):
group_dict = groups_tree[group_idx]
if 'GroupId' in group_dict:
source_group_ids.append(group_dict['GroupId'][0])
elif 'GroupName' in group_dict:
source_groups.append(group_dict['GroupName'][0])
return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids
class SecurityGroups(BaseResponse):
def _process_rules_from_querystring(self):
group_name_or_id = (self._get_param('GroupName') or
self._get_param('GroupId'))
querytree = {}
for key, value in self.querystring.items():
key_splitted = key.split('.')
key_splitted = [try_parse_int(e, e) for e in key_splitted]
d = querytree
for subkey in key_splitted[:-1]:
if subkey not in d:
d[subkey] = {}
d = d[subkey]
d[key_splitted[-1]] = value
if 'IpPermissions' not in querytree:
# Handle single rule syntax
ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree)
yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
source_groups, source_group_ids)
ip_permissions = querytree.get('IpPermissions') or {}
for ip_permission_idx in sorted(ip_permissions.keys()):
ip_permission = ip_permissions[ip_permission_idx]
ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission)
yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
source_groups, source_group_ids)
def authorize_security_group_egress(self):
if self.is_not_dryrun('GrantSecurityGroupEgress'):
for args in self._process_rules_from_querystring():
self.ec2_backend.authorize_security_group_egress(*args)
return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE
def authorize_security_group_ingress(self):
if self.is_not_dryrun('GrantSecurityGroupIngress'):
for args in self._process_rules_from_querystring():
self.ec2_backend.authorize_security_group_ingress(*args)
return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE
def create_security_group(self):
name = self._get_param('GroupName')
description = self._get_param('GroupDescription')
vpc_id = self._get_param('VpcId')
if self.is_not_dryrun('CreateSecurityGroup'):
group = self.ec2_backend.create_security_group(
name, description, vpc_id=vpc_id)
template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)
return template.render(group=group)
def delete_security_group(self):
# TODO this should raise an error if there are instances in the group.
# See
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html
name = self._get_param('GroupName')
sg_id = self._get_param('GroupId')
if self.is_not_dryrun('DeleteSecurityGroup'):
if name:
self.ec2_backend.delete_security_group(name)
elif sg_id:
self.ec2_backend.delete_security_group(group_id=sg_id)
return DELETE_GROUP_RESPONSE
def describe_security_groups(self):
groupnames = self._get_multi_param("GroupName")
group_ids = self._get_multi_param("GroupId")
filters = filters_from_querystring(self.querystring)
groups = self.ec2_backend.describe_security_groups(
group_ids=group_ids,
groupnames=groupnames,
filters=filters
)
template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)
return template.render(groups=groups)
def revoke_security_group_egress(self):
if self.is_not_dryrun('RevokeSecurityGroupEgress'):
for args in self._process_rules_from_querystring():
success = self.ec2_backend.revoke_security_group_egress(*args)
if not success:
return "Could not find a matching egress rule", dict(status=404)
return REVOKE_SECURITY_GROUP_EGRESS_RESPONSE
def revoke_security_group_ingress(self):
if self.is_not_dryrun('RevokeSecurityGroupIngress'):
for args in self._process_rules_from_querystring():
self.ec2_backend.revoke_security_group_ingress(*args)
return REVOKE_SECURITY_GROUP_INGRESS_REPONSE
CREATE_SECURITY_GROUP_RESPONSE = """<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
<groupId>{{ group.id }}</groupId>
</CreateSecurityGroupResponse>"""
DELETE_GROUP_RESPONSE = """<DeleteSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSecurityGroupResponse>"""
DESCRIBE_SECURITY_GROUPS_RESPONSE = """<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<securityGroupInfo>
{% for group in groups %}
<item>
<ownerId>123456789012</ownerId>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
<groupDescription>{{ group.description }}</groupDescription>
{% if group.vpc_id %}
<vpcId>{{ group.vpc_id }}</vpcId>
{% endif %}
<ipPermissions>
{% for rule in group.ingress_rules %}
<item>
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
{% if rule.from_port %}
<fromPort>{{ rule.from_port }}</fromPort>
{% endif %}
{% if rule.to_port %}
<toPort>{{ rule.to_port }}</toPort>
{% endif %}
<groups>
{% for source_group in rule.source_groups %}
<item>
<userId>123456789012</userId>
<groupId>{{ source_group.id }}</groupId>
<groupName>{{ source_group.name }}</groupName>
</item>
{% endfor %}
</groups>
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range }}</cidrIp>
</item>
{% endfor %}
</ipRanges>
</item>
{% endfor %}
</ipPermissions>
<ipPermissionsEgress>
{% for rule in group.egress_rules %}
<item>
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
{% if rule.from_port %}
<fromPort>{{ rule.from_port }}</fromPort>
{% endif %}
{% if rule.to_port %}
<toPort>{{ rule.to_port }}</toPort>
{% endif %}
<groups>
{% for source_group in rule.source_groups %}
<item>
<userId>123456789012</userId>
<groupId>{{ source_group.id }}</groupId>
<groupName>{{ source_group.name }}</groupName>
</item>
{% endfor %}
</groups>
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range }}</cidrIp>
</item>
{% endfor %}
</ipRanges>
</item>
{% endfor %}
</ipPermissionsEgress>
<tagSet>
{% for tag in group.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</securityGroupInfo>
</DescribeSecurityGroupsResponse>"""
AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupIngressResponse>"""
REVOKE_SECURITY_GROUP_INGRESS_REPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupIngressResponse>"""
AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE = """
<AuthorizeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupEgressResponse>"""
REVOKE_SECURITY_GROUP_EGRESS_RESPONSE = """<RevokeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupEgressResponse>"""
| {
"content_hash": "9fd42951b74fdab96e51e928775fd92c",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 134,
"avg_line_length": 42.691699604743086,
"alnum_prop": 0.5566151282288677,
"repo_name": "botify-labs/moto",
"id": "4aecfcf78b81ada34cd11352974a4ab79a1edf6f",
"size": "10801",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "moto/ec2/responses/security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "4578457"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
import os
import json
import logging
import re
import requests
import sys
import traceback
from flask import Flask, request, abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# Setup Logging
logging.basicConfig(level="INFO", format='%(levelname)s: %(message)s')
LOG = logging.getLogger("deploy_seldon")
def apply_oid_token_monkey_patch():
LOG.warning("applying monkey-patch for https://github.com/kubernetes-client/python/issues/525")
import base64
import json
import kubernetes
from datetime import datetime, timezone
from kubernetes.config.kube_config import _is_expired
def load_oid_token_patched(self, provider):
if 'auth-provider' not in self._user:
return
provider = self._user['auth-provider']
if 'name' not in provider or 'config' not in provider or provider['name'] != 'oidc':
return
parts = provider['config']['id-token'].split('.')
if len(parts) != 3: # Not a valid JWT
return None
padding = (4 - len(parts[1]) % 4) * '='
jwt_attributes = json.loads(base64.b64decode(parts[1] + padding).decode('utf-8'))
expire = jwt_attributes.get('exp')
if (expire is not None) and _is_expired(datetime.fromtimestamp(expire, tz=timezone.utc)):
self._refresh_oidc(provider)
if self._config_persister:
self._config_persister(self._config.value)
self.token = "Bearer %s" % provider['config']['id-token']
return self.token
kubernetes.config.kube_config.KubeConfigLoader._load_oid_token = load_oid_token_patched
def load_kube_config(params):
# from six import PY3
# PY3 = sys.version_info.major == 3
#
# # apply monkey-patch for kubernetes client OIDC authentication issue 525 ("binascii.Error: Incorrect padding")
# # before importing client and config from kubernetes
# if PY3:
# apply_oid_token_monkey_patch()
from kubernetes import config
# kube_config_file = "kube/%s/kube-config.yml" % params["public_ip"]
config.load_incluster_config()
def get_api_client_v1():
import kubernetes
api_client_v1 = kubernetes.client.CoreV1Api()
return api_client_v1
def get_custom_objects_api_client():
import kubernetes
api_client = kubernetes.client.CustomObjectsApi()
return api_client
def get_seldon_spec(params):
with open("kube/seldon.json") as f:
spec = json.load(f)
# override the 'SELDON_DEPLOYMENT_ID' and the kubernetes service name with the 'deployment_name' from the parameters
deployment_name = get_deployment_name(params)
spec["metadata"]["name"] = deployment_name # 'fashion-deployment-id' ... SELDON_DEPLOYMENT_ID
spec["spec"]["name"] = deployment_name # 'fashion-service-name'
return spec
def update_seldon_spec(params):
spec = get_seldon_spec(params)
if "container_image" in params:
spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["image"] = params["container_image"]
env_list = spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"]
env_dict = {var["name"]: var["value"] for var in env_list}
env_dict["MODEL_FILE_NAME"] = params["model_file_name"]
env_dict["TRAINING_ID"] = params["training_id"]
env_dict["BUCKET_NAME"] = params["training_results_bucket"]
env_dict["BUCKET_ENDPOINT_URL"] = params["aws_endpoint_url"]
env_dict["BUCKET_KEY"] = params['aws_access_key_id']
env_dict["BUCKET_SECRET"] = params['aws_secret_access_key']
env_dict["MODEL_CLASS_NAME"] = params['model_class_name']
env_dict["MODEL_CLASS_FILE"] = params['model_class_file']
env_updated = [{"name": key, "value": value} for key, value in env_dict.items()]
spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"] = env_updated
return spec
def deploy_seldon_spec(spec):
name = spec["metadata"]["name"]
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower()+"s" # TODO: verify the "rule" for constructing plural
group, version = spec["apiVersion"].split("/")
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
api_response = api_client.patch_namespaced_custom_object(group, version, namespace, plural, name, spec)
else:
api_response = api_client.create_namespaced_custom_object(group, version, namespace, plural, spec)
# api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
LOG.info("%s ..." % str(api_response)[:160])
return api_response
def delete_deployment(params):
from kubernetes.client import V1DeleteOptions
spec = get_seldon_spec(params)
name = get_deployment_name(params) # spec["metadata"]["name"]
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower()+"s" # TODO: verify the "rule" for constructing plural
group, version = spec["apiVersion"].split("/")
del_opts = V1DeleteOptions()
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
api_response = api_client.delete_namespaced_custom_object(group, version, namespace, plural, name, del_opts)
else:
LOG.error("Could not find the Seldon deployment '%s'" % name)
return {
"status": "Error",
"details": "Could not find a Seldon deployment with name '%s'" % name
}
# api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
LOG.info("%s ..." % str(api_response)[:160])
return api_response
def get_service_name(params):
# 'SELDON_DEPLOYMENT_ID': 'fashion-mnist'
# 'PREDICTOR_ID': 'single-model'
# 'PREDICTIVE_UNIT_ID': 'classifier'
seldon_spec = get_seldon_spec(params)
spec_name = get_deployment_name(params) # seldon_spec["spec"]["name"]) # 'fashion-mnist'
predictor_name = seldon_spec["spec"]["predictors"][0]["name"] # 'single-model'
graph_name = seldon_spec["spec"]["predictors"][0]["graph"]["name"] # 'classifier' (== containers[0].name)
pod_name_prefix = "%s-%s-%s" % (spec_name, predictor_name, graph_name)
return pod_name_prefix # 'fashion-mnist-single-model-classifier'
def get_pods(params):
api_client_v1 = get_api_client_v1()
pods = api_client_v1.list_namespaced_pod(namespace="default", watch=False)
pod_name_prefix = get_service_name(params) # 'fashion-mnist-single-model-classifier'
deployment_name = get_deployment_name(params)
training_id = params["training_id"]
def match_seldon_deployment(pod):
if not pod.metadata.name.startswith(pod_name_prefix):
return False
env = {var.name: var.value for var in pod.spec.containers[0].env}
return env["SELDON_DEPLOYMENT_ID"] == deployment_name and \
env["TRAINING_ID"] == training_id
return list(filter(match_seldon_deployment, pods.items))
def get_deployment_status(params):
# AVAILABLE (classifier URL actually available)
# READY (pod status, not url availability)
# UNKNOWN (no pods)
# ERROR (CrashLoopBackOff, Succeeded - if pod terminated, will not be restarted, this should not happen)
# PENDING (Creating..., ContainerCreating, ContainersReady, PodScheduled, Pending, Initialized, Running)
pods = get_pods(params)
if not pods:
status = get_deployment_state(params) or "Unknown"
else:
status_conditions = sorted(pods[0].status.conditions, key=lambda status: status.last_transition_time, reverse=True)
status = status_conditions[0].type
if status in ["Creating...", "ContainerCreating", "ContainersReady", "PodScheduled", "Initialized", "Running"]:
status = "Pending"
if status in ["CrashLoopBackOff", "Unschedulable", "Failed", "Succeeded"]:
status = "Error"
if status == "Ready":
status = "Available"
return status.upper()
def get_deployment_state(params):
deployment_name = get_deployment_name(params)
spec = get_seldon_spec(params)
group, version = spec["apiVersion"].split("/")
namespace = "default" # TODO: the namespace should be configured or be figured out dynamically
plural = spec["kind"].lower() + "s" # TODO: verify the "rule" for constructing plural
api_client = get_custom_objects_api_client()
api_response = api_client.list_namespaced_custom_object(group, version, namespace, plural)
if deployment_name in [deployment["metadata"]["name"] for deployment in api_response["items"]]:
deployed_spec = api_client.get_namespaced_custom_object(group, version, namespace, plural, deployment_name)
env_list = deployed_spec["spec"]["predictors"][0]["componentSpecs"][0]["spec"]["containers"][0]["env"]
env_dict = {var["name"]: var["value"] for var in env_list}
deployed_training_id = env_dict["TRAINING_ID"]
if params["training_id"] == deployed_training_id and "status" in deployed_spec:
return deployed_spec["status"]["state"].upper() # "CREATING...", "FAILED", ...
else:
LOG.info("Could not find a Seldon deployment with name '%s'" % deployment_name)
return None
def get_ambassador_port():
from kubernetes.client.rest import ApiException
api_client_v1 = get_api_client_v1()
try:
svc = api_client_v1.read_namespaced_service(namespace="default", name="seldon-core-ambassador")
except ApiException:
svc = api_client_v1.read_namespaced_service(namespace="default", name="ambassador")
port = svc.spec.ports[0].node_port
return port
def get_deployment_name(params):
# DNS-1123 sub-domain must consist of lower case alphanumeric characters (or Seldon will raise an exception)
regex = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
deployment_name = params["deployment_name"]
if not re.match(regex, deployment_name):
LOG.error("deployment name '%s' does not pass Seldon regex filter '%s'" % (deployment_name, regex))
params["deployment_name"] = deployment_name\
.replace("_", "-")\
.replace(" ", "-")\
.lower()
return params["deployment_name"]
def get_deployment_url(params):
# "http://${PUBLIC_IP}:${SELDON_AMBASSADOR_PORT}/seldon/${deployment_name}/api/v0.1/predictions"
ip = params["public_ip"]
port = get_ambassador_port()
name = get_deployment_name(params)
url = "http://%s:%s/seldon/%s/api/v0.1/predictions" % (ip, port, name)
return url
def is_deployment_available(params):
url = get_deployment_url(params)
response = requests.options(url)
return response.status_code == 200
def get_http_method(params):
# GET get deployment status
# POST create or patch existing deployment
# PUT patch existing deployment
# PATCH patch existing deployment
# DELETE delete deployment
# return params.get("__ow_method", "POST").upper() # TODO: default for local testing only, remove
if params.get("check_status_only", False):
return "GET"
if params.get("delete_deployment", False):
return "DELETE"
return params.get("__ow_method", "POST").upper()
def run_safe(params, method):
try:
load_kube_config(params)
# method = get_http_method(params)
if method in ("POST", "PATCH", "PUT"):
# if set(deployment_parameters).issubset(params.keys()):
LOG.info("deploying '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
spec = update_seldon_spec(params)
deploy_result = deploy_seldon_spec(spec)
deployment_url = get_deployment_url(params)
deployment_state = deploy_result["status"]["state"].upper() if "status" in deploy_result \
else get_deployment_status(params)
result = {
"deployment_status": deployment_state,
"deployment_url": deployment_url,
"details": deploy_result
}
elif method == "GET":
LOG.info("get deployment status of '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
deployment_url = get_deployment_url(params)
deployment_state = get_deployment_status(params)
result = {
"deployment_status": deployment_state, # "Error" "Creating Container" "CrashLoopBackOff" "Pending"
"deployment_url": deployment_url
}
elif method == "DELETE":
LOG.info("deleting deployment for '%s' on cluster '%s'" % (params["deployment_name"], params["public_ip"]))
delete_result = delete_deployment(params)
result = {
"status": delete_result["status"],
"details": delete_result["details"]
}
else:
result = {
"status": "Failed",
"message": "could not identify HTTP request method"
}
result["status"] = result.get("status", "Success")
return result
except Exception as e:
LOG.exception('%s: %s' % (e.__class__.__name__, str(e)))
return {
"status": "Error",
"details": {
"error": e.__class__.__name__,
"message": str(e),
"trace": traceback.format_exc()
}
}
@app.route('/', methods=['POST'])
def deployment_api_post():
if not request.json:
abort(400)
return json.dumps(run_safe(request.json,"POST"))
@app.route('/', methods=['GET'])
def deployment_api_get():
return json.dumps(run_safe(json.loads(json.dumps(request.args)),"GET"))
@app.route('/', methods=['DELETE'])
def deployment_api_delete():
return json.dumps(run_safe(json.loads(json.dumps(request.args)),"DELETE"))
@app.route('/', methods=['OPTIONS'])
def deployment_api_options():
return "200"
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
| {
"content_hash": "0cb4ecf4b658f9387ac3ae9c1721e32b",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 123,
"avg_line_length": 40.3,
"alnum_prop": 0.6358560794044665,
"repo_name": "kubeflow/pipelines",
"id": "2f8c7bbfd119b166082e2936148466c1eb0be719",
"size": "15049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "components/ibm-components/ffdl/serve/src/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
"""A window that can be used for tools that handle the referene workflow."""
from PySide import QtGui
from PySide import QtCore
from jukeboxcore import reftrack
from jukeboxcore import djadapter
from jukeboxcore.gui.main import JB_MainWindow, get_icon
from jukeboxcore.gui import treemodel
from jukeboxcore.gui import djitemdata
from jukeboxcore.gui.widgetdelegate import WD_TreeView
from jukeboxcore.gui.widgets.reftrackwidget import ReftrackDelegate
from jukeboxcore.gui.widgets.browser import ListBrowser
from jukeboxcore.gui.reftrackitemdata import ReftrackSortFilterModel
from reftrackwin_ui import Ui_reftrack_mwin
from reftrackadder_ui import Ui_reftrackadder_mwin
class ReftrackWin(JB_MainWindow, Ui_reftrack_mwin):
"""Display reftracks in a view that can be filtered, sorted etc.
You can either add your own :class:`reftrack.Reftrack` objects to the root or
call :meth:`ReftrackWin.wrap_scene`.
"""
def __init__(self, refobjinter, root=None, parent=None, flags=0):
"""Initialize a new Reftrack window with the given refobjinter
:param root: the reftrackroot, if None is given, a default one is created
:type root: :class:`jukeboxcore.reftrack.ReftrackRoot`
:param refobjinter: the refobjinterface to use
:type refobjinter: :class:`reftrack.RefobjInterface`
:param parent: Optional - the parent of the window - default is None
:type parent: QWidget
:param flags: the window flags
:type flags: QtCore.Qt.WindowFlags
:raises: None
"""
super(ReftrackWin, self).__init__(parent, flags)
self.refobjinter = refobjinter
"""The :class:`reftrack.RefobjInterface` this window uses."""
self.root = root if root else self.create_root()
"""The :class:`reftrack.ReftrackRoot` this window uses."""
self.reftrackdelegate = ReftrackDelegate(self)
self.typecbmap = {}
"""Map a type to a checkboxes that indicates if the type should be shown"""
self.reftrackadderwin = None # the window to add new reftracks to the root
self.setupUi(self)
self.setup_ui()
self.setup_filter()
self.setup_signals()
def create_root(self, ):
"""Create a default reftrack root.
:returns: a reftrack root
:rtype: :class:`reftrack.ReftrackRoot`
:raises: None
"""
return reftrack.ReftrackRoot()
def setup_ui(self, ):
"""Setup the general ui
:returns: None
:rtype: None
:raises: None
"""
w = QtGui.QWidget(self)
w.setLayout(self.central_widget_vbox)
self.setCentralWidget(w)
self.reftrack_treev = WD_TreeView(parent=self)
self.reftrack_treev.setHeaderHidden(True)
self.central_widget_vbox.insertWidget(1, self.reftrack_treev)
self.setup_icons()
self.model = self.root.get_model()
self.proxy = self.create_proxy_model(self.model)
self.proxy.setFilterKeyColumn(-1) # filter all columns
self.proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.reftrack_treev.setModel(self.proxy)
self.reftrack_treev.setItemDelegate(self.reftrackdelegate)
# hide all columns but the first
cc = self.proxy.columnCount(QtCore.QModelIndex())
for i in range(1, cc):
self.reftrack_treev.setColumnHidden(i, True)
def create_proxy_model(self, model):
"""Create a sort filter proxy model for the given model
:param model: the model to wrap in a proxy
:type model: :class:`QtGui.QAbstractItemModel`
:returns: a new proxy model that can be used for sorting and filtering
:rtype: :class:`QtGui.QAbstractItemModel`
:raises: None
"""
proxy = ReftrackSortFilterModel(self)
proxy.setSourceModel(model)
model.rowsInserted.connect(self.sort_model)
return proxy
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.showfilter_tb.toggled.connect(self.switch_showfilter_icon)
self.addnew_tb.clicked.connect(self.open_addnew_win)
self.search_le.editingFinished.connect(self.update_filter)
for cb in (self.loaded_checkb, self.unloaded_checkb, self.imported_checkb, self.empty_checkb,
self.newest_checkb, self.old_checkb, self.alien_checkb):
cb.toggled.connect(self.update_filter)
def setup_icons(self, ):
"""Set all icons on buttons
:returns: None
:rtype: None
:raises: None
"""
plus_icon = get_icon('glyphicons_433_plus_bright.png', asicon=True)
self.addnew_tb.setIcon(plus_icon)
def setup_filter(self, ):
"""Create a checkbox for every reftrack type so one can filter them
:returns: None
:rtype: None
:raises: None
"""
types = self.refobjinter.types.keys()
for i, t in enumerate(types):
cb = QtGui.QCheckBox("%s" % t)
cb.setChecked(True)
cb.toggled.connect(self.update_filter)
self.typecbmap[t] = cb
self.typefilter_grid.addWidget(cb, int(i / 4), i % 4)
def switch_showfilter_icon(self, toggled):
"""Switch the icon on the showfilter_tb
:param toggled: the state of the button
:type toggled: :class:`bool`
:returns: None
:rtype: None
:raises: None
"""
at = QtCore.Qt.DownArrow if toggled else QtCore.Qt.RightArrow
self.showfilter_tb.setArrowType(at)
def open_addnew_win(self, *args, **kwargs):
"""Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError
"""
if self.reftrackadderwin:
self.reftrackadderwin.close()
self.reftrackadderwin = ReftrackAdderWin(self.refobjinter, self.root, parent=self)
self.reftrackadderwin.destroyed.connect(self.addnewwin_destroyed)
self.reftrackadderwin.show()
def addnewwin_destroyed(self, *args, **kwargs):
"""Delete the internal reference to the reftrackadderwin
:returns: None
:rtype: None
:raises: None
"""
self.reftrackadderwin = None
def update_filter(self, *args, **kwargs):
"""Update the filter
:returns: None
:rtype: None
:raises: NotImplementedError
"""
forbidden_statuses = []
if not self.loaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.LOADED)
if not self.unloaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.UNLOADED)
if not self.imported_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.IMPORTED)
if not self.empty_checkb.isChecked():
forbidden_statuses.append(None)
self.proxy.set_forbidden_statuses(forbidden_statuses)
forbidden_types = []
for typ, cb in self.typecbmap.items():
if not cb.isChecked():
forbidden_types.append(typ)
self.proxy.set_forbidden_types(forbidden_types)
forbidden_uptodate = []
if not self.old_checkb.isChecked():
forbidden_uptodate.append(False)
if not self.newest_checkb.isChecked():
forbidden_uptodate.append(True)
self.proxy.set_forbidden_uptodate(forbidden_uptodate)
forbidden_alien = [] if self.alien_checkb.isChecked() else [True]
self.proxy.set_forbidden_alien(forbidden_alien)
self.proxy.setFilterWildcard(self.search_le.text())
def sort_model(self, *args, **kwargs):
"""Sort the proxy model
:returns: None
:rtype: None
:raises: None
"""
self.proxy.sort(17) # sort the identifier
self.proxy.sort(2) # sort the element
self.proxy.sort(1) # sort the elementgrp
self.proxy.sort(0) # sort the types
def wrap_scene(self, ):
"""Wrap all reftracks in the scenen and get suggestions and display it in the view
:returns: None
:rtype: None
:raises: None
"""
reftrack.Reftrack.wrap_scene(self.root, self.refobjinter)
class ReftrackAdderWin(JB_MainWindow, Ui_reftrackadder_mwin):
"""A window for adding new reftracks to reftrack treemodel.
"""
def __init__(self, refobjinter, root, parent=None, flags=0):
"""Initialize a new ReftrackAdder window with the given refobjinter that
will add new reftracks to the given root.
:param refobjinter:
:type refobjinter:
:param root:
:type root:
:param parent:
:type parent:
:param flags:
:type flags:
:raises: None
"""
super(ReftrackAdderWin, self).__init__(parent, flags)
self.refobjinter = refobjinter
self.root = root
self.setupUi(self)
self.setup_ui()
self.setup_signals()
def setup_ui(self, ):
"""Setup the general ui
:returns: None
:rtype: None
:raises: None
"""
plus_icon = get_icon('glyphicons_433_plus_bright.png', asicon=True)
self.add_tb.setIcon(plus_icon)
self.shot_browser = ListBrowser(4, parent=self, headers=["Project", "Sequence", "Shot", "Type"])
self.asset_browser = ListBrowser(4, parent=self, headers=["Project", "Assettype", "Asset", "Type"])
self.shotmodel = self.create_shot_model()
self.assetmodel = self.create_asset_model()
self.shot_browser.set_model(self.shotmodel)
self.asset_browser.set_model(self.assetmodel)
self.shot_vbox.addWidget(self.shot_browser)
self.asset_vbox.addWidget(self.asset_browser)
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.add_tb.clicked.connect(self.add_selected)
def create_shot_model(self, ):
"""Return a treemodel with the levels: project, sequence, shot and reftrack type
:returns: a treemodel
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
"""
rootdata = treemodel.ListItemData(['Name'])
rootitem = treemodel.TreeItem(rootdata)
prjs = djadapter.projects.all()
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
prjitem = treemodel.TreeItem(prjdata, rootitem)
for seq in prj.sequence_set.all():
seqdata = djitemdata.SequenceItemData(seq)
seqitem = treemodel.TreeItem(seqdata, prjitem)
for shot in seq.shot_set.all():
shotdata = djitemdata.ShotItemData(shot)
shotitem = treemodel.TreeItem(shotdata, seqitem)
for typ in self.refobjinter.get_available_types_for_scene(shot):
typdata = treemodel.ListItemData([typ])
treemodel.TreeItem(typdata, shotitem)
return treemodel.TreeModel(rootitem)
def create_asset_model(self, ):
"""Return a treemodel with the levels: project, assettype, asset and reftrack type
:returns: a treemodel
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
"""
rootdata = treemodel.ListItemData(['Name'])
rootitem = treemodel.TreeItem(rootdata)
prjs = djadapter.projects.all()
for prj in prjs:
prjdata = djitemdata.ProjectItemData(prj)
prjitem = treemodel.TreeItem(prjdata, rootitem)
for atype in prj.atype_set.all():
atypedata = djitemdata.AtypeItemData(atype)
atypeitem = treemodel.TreeItem(atypedata, prjitem)
for asset in atype.asset_set.filter(project=prj):
assetdata = djitemdata.AssetItemData(asset)
assetitem = treemodel.TreeItem(assetdata, atypeitem)
for typ in self.refobjinter.get_available_types_for_scene(asset):
typdata = treemodel.ListItemData([typ])
treemodel.TreeItem(typdata, assetitem)
return treemodel.TreeModel(rootitem)
def add_selected(self, ):
"""Create a new reftrack with the selected element and type and add it to the root.
:returns: None
:rtype: None
:raises: NotImplementedError
"""
browser = self.shot_browser if self.browser_tabw.currentIndex() == 1 else self.asset_browser
selelements = browser.selected_indexes(2)
if not selelements:
return
seltypes = browser.selected_indexes(3)
if not seltypes:
return
elementi = selelements[0]
typi = seltypes[0]
if not elementi.isValid() or not typi.isValid():
return
element = elementi.internalPointer().internal_data()
typ = typi.internalPointer().internal_data()[0]
reftrack.Reftrack(self.root, self.refobjinter, typ=typ, element=element)
| {
"content_hash": "d1b5db6626afe05a43365c7899bfdeee",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 107,
"avg_line_length": 36.850828729281766,
"alnum_prop": 0.6263868065967017,
"repo_name": "JukeboxPipeline/jukebox-core",
"id": "c39523be6237d2b51e9f059ef07b4854bcb6afae",
"size": "13340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jukeboxcore/gui/widgets/reftrackwin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1221"
},
{
"name": "Python",
"bytes": "890248"
},
{
"name": "Shell",
"bytes": "962"
}
],
"symlink_target": ""
} |
"""URL definitions for news stories
"""
try:
from django.conf.urls.defaults import patterns, url
except ImportError:
from django.conf.urls import patterns, url
from django.views.generic import (ArchiveIndexView, YearArchiveView,
MonthArchiveView, WeekArchiveView, DayArchiveView, TodayArchiveView,
DateDetailView)
from models import Story
info_dict = {
'queryset': Story.published.all(),
'date_field': 'publish_date',
'allow_empty': True
}
print_info_dict = dict(info_dict.items() + [('template_name', 'stories/story_print.html')])
print_info_dict.pop('allow_empty')
comment_info_dict = dict(info_dict.items() + [('template_name', 'stories/story_comments.html')])
comment_info_dict.pop('allow_empty')
urlpatterns = patterns('',
# news archive index
url(
r'^$',
ArchiveIndexView.as_view(**info_dict),
name='news_archive_index'
),
# news archive year list
url(
r'^(?P<year>\d{4})/$',
YearArchiveView.as_view(**info_dict),
name='news_archive_year'
),
# news archive month list
url(
r'^(?P<year>\d{4})/(?P<month>\w{3})/$',
MonthArchiveView.as_view(**info_dict),
name='news_archive_month'
),
# news archive week list
url(
r'^(?P<year>\d{4})/(?P<week>\d{1,2})/$',
WeekArchiveView.as_view(**info_dict),
name='news_archive_week'
),
# news archive day list
url(
r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$',
DayArchiveView.as_view(**info_dict),
name='news_archive_day'
),
# news archive today list
url(
r'^today/$',
TodayArchiveView.as_view(**info_dict),
name='news_archive_day'
),
# story detail
url(
r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
'stories.views.pag_story_detail',
name='news_detail'
),
#story print detail
url(
r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/print/$',
DateDetailView.as_view(**print_info_dict),
name='news_detail_print',
),
#story comments
url(
r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/comments/$',
DateDetailView.as_view(**comment_info_dict),
name='news_detail_comments',
),
)
| {
"content_hash": "78b9a17a7e1cb4f3a7009293937228c9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 96,
"avg_line_length": 29.012345679012345,
"alnum_prop": 0.5697872340425532,
"repo_name": "callowayproject/django-stories",
"id": "767a219b41ff409a015579e951b9af53916b4d06",
"size": "2396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stories/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40282"
},
{
"name": "CoffeeScript",
"bytes": "736"
},
{
"name": "JavaScript",
"bytes": "57064"
},
{
"name": "Python",
"bytes": "85770"
},
{
"name": "Shell",
"bytes": "3658"
}
],
"symlink_target": ""
} |
import json
import time
import fabric.api
from cloudify_rest_client.executions import Execution
from cosmo_tester.framework.test_cases import MonitoringTestCase
def limit_dpoints(num):
return float("{0:.3f}".format(num))
def num(s):
return int(s)
class ManyDeploymentsTest(MonitoringTestCase):
def wait_until_all_deployment_executions_end(self, deployment_id):
while len([execution for execution in self.client.executions.list(
deployment_id=deployment_id)
if execution["status"] not in Execution.END_STATES]) > 0:
time.sleep(1)
return
def many_deployments_stress_test(self):
self._run()
def init_fabric(self):
manager_keypath = self.env._config_reader.management_key_path
fabric_env = fabric.api.env
fabric_env.update({
'timeout': 30,
'user': 'ubuntu',
'key_filename': manager_keypath,
'host_string': self.env.management_ip,
})
def get_manager_cpu_usage(self):
self.logger.info('get_manager_memory_total with ip {0}'
.format(self.env.management_ip))
return (fabric.api.run(
'top -bn1 | grep \"Cpu(s)\" | \
sed \"s/.*, *\([0-9.]*\)%* id.*/\1/\" | \
awk \'{print 100 - $1\"%\"}\''))
def get_manager_memory_available(self):
self.logger.info('get_manager_memory_available with ip {0}'
.format(self.env.management_ip))
free = int(fabric.api.run(
'free -t -m | egrep Mem | awk \'{print $4}\''))
cache = int(fabric.api.run(
'free -t -m | egrep buffers | awk \'{print $4}\' | sed -n 2p'))
return cache + free
def get_manager_memory_total(self):
self.logger.info('get_manager_memory_total with ip {0}'
.format(self.env.management_ip))
return int(fabric.api.run(
'free -t -m | egrep Mem | awk \'{print $2}\''))
def get_manager_disk_total(self):
self.logger.info('get_manager_disk_total with ip {0}'
.format(self.env.management_ip))
return int(
str(fabric.api.run(
'df -k | grep /var/lib/docker | awk \'{print $2}\'')).replace(
"sudo: unable to resolve host cloudify-manager-server", ""))
def get_manager_disk_available(self):
self.logger.info('get_manager_disk_available with ip {0}'
.format(self.env.management_ip))
return int(
str(fabric.api.run(
'df -k | grep /var/lib/docker | awk \'{print $4}\'')).replace(
"sudo: unable to resolve host cloudify-manager-server", ""))
def get_number_of_total_active_nodes(self):
return len((self.client.nodes.list(
_include=["deploy_number_of_instances",
"deployment_id"])))
def get_number_of_active_nodes_per_deployment(self, deployment_id):
return len((self.client.nodes.list(
deployment_id=deployment_id,
_include=["deploy_number_of_instances",
"deployment_id"])))
def _end_test(self, report, number_of_deployments):
self.logger.info(json.dumps(report, indent=2))
self.assertGreater(number_of_deployments, 60)
return report
def _run(self):
self.number_of_active_nodes = \
self.get_number_of_total_active_nodes()
number_of_deployments = 1
self.init_fabric()
blueprint_path = self.copy_blueprint('mocks')
self.blueprint_yaml = blueprint_path / 'single-node-blueprint.yaml'
manager_disk_space_total = self.get_manager_disk_total()
manager_memory_total = self.get_manager_memory_total()
prev_manager_memory_available = self.get_manager_memory_available()
prev_space_available = self.get_manager_disk_available()
self.upload_blueprint(blueprint_id=self.test_id)
deployment_dict = {"deployment_number": 0,
"manager_memory_available":
prev_manager_memory_available,
"manager_memory_total":
manager_memory_total,
"nodes_active": self.number_of_active_nodes,
"manager_cpu_usage":
self.get_manager_cpu_usage(),
"manager_disk space_available":
prev_space_available,
"manager_disk_space_total":
manager_disk_space_total}
deployments_dict = {0: deployment_dict}
try:
while True:
start_time = time.time()
self.create_deployment(blueprint_id=self.test_id,
deployment_id=self.test_id+str(
number_of_deployments),
inputs='')
self.wait_until_all_deployment_executions_end(
deployment_id=self.test_id+str(number_of_deployments))
end_create_deployment_time = time.time()
start_install_time = time.time()
self.client.executions.start(
deployment_id=self.test_id+str(number_of_deployments),
workflow_id="install")
self.wait_until_all_deployment_executions_end(
deployment_id=self.test_id+str(number_of_deployments))
end_execute_install_time = time.time()
self.logger.debug(
"time to create deployment number {0} : {1}".format(
number_of_deployments,
end_create_deployment_time - start_time))
self.logger.debug(
"time to execute install number {0} : {1}".format(
number_of_deployments,
end_execute_install_time - start_install_time))
manager_disk_space_available = \
self.get_manager_disk_available()
manager_memory_available = self.get_manager_memory_available()
self.number_of_active_nodes = \
self.get_number_of_total_active_nodes()
number_of_my_active_nodes = \
self.get_number_of_active_nodes_per_deployment(
deployment_id=self.test_id+str(number_of_deployments))
deployment_dict = {"deployment_number": number_of_deployments,
"number_of_my_active_nodes":
number_of_my_active_nodes,
"nodes_active": self.number_of_active_nodes,
"time_to_create_deployment": limit_dpoints(
end_create_deployment_time -
start_time),
"time_to_install":
limit_dpoints(
end_execute_install_time -
start_install_time),
"manager_memory_available":
manager_memory_available,
"manager_memory_total":
manager_memory_total,
"manager_disk_space_available":
manager_disk_space_available,
"manager_disk_space_total":
manager_disk_space_total,
"memory_change_in_deployment":
prev_manager_memory_available -
manager_memory_available,
"manager_cpu_usage":
self.get_manager_cpu_usage(),
"disk_change_in_deployment":
prev_space_available -
manager_disk_space_available}
prev_space_available = manager_disk_space_available
prev_manager_memory_available = manager_memory_available
self.logger.debug(deployment_dict)
deployments_dict.update(
{number_of_deployments: deployment_dict})
number_of_deployments += 1
self.logger.debug(deployments_dict)
for i in range(1, number_of_deployments+1):
self.wait_until_all_deployment_executions_end(
deployment_id=self.test_id+str(i))
self._end_test(deployments_dict)
except Exception as e:
self.logger.info(e)
self._end_test(deployments_dict, self.number_of_active_nodes)
| {
"content_hash": "9fbd055891aaa3168ce978de364cace8",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 46.97938144329897,
"alnum_prop": 0.5025235900811937,
"repo_name": "isaac-s/cloudify-system-tests",
"id": "afabc1b2b5cbc126497f2c328c710c136a722242",
"size": "9758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cosmo_tester/test_suites/stress_test_openstack/many_deployments_to_single_manager_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "441"
},
{
"name": "Clojure",
"bytes": "290"
},
{
"name": "Puppet",
"bytes": "154"
},
{
"name": "Python",
"bytes": "336635"
},
{
"name": "Ruby",
"bytes": "1104"
},
{
"name": "Shell",
"bytes": "3795"
}
],
"symlink_target": ""
} |
import os
import sys
import codecs
import platform
try:
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
# Extract distribution meta values, hint taken from Celery <http://celeryproject.org>
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(", ")))
return (("VERSION", ".".join(v[0:3]) + "".join(v[3:])), )
def add_doc(m):
return (("doc", m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, "pyes/__init__.py"))
try:
meta = {}
for line in meta_fh:
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
finally:
meta_fh.close()
class QuickRunTests(TestCommand):
extra_env = dict(SKIP_RLIMITS=1, QUICKTEST=1)
def run(self, *args, **kwargs):
for env_name, env_value in self.extra_env.items():
os.environ[env_name] = str(env_value)
TestCommand.run(self, *args, **kwargs)
install_requires = ["urllib3>=1.7", "six>=1.5.2"]
#if not sys.platform.startswith("java"):
# install_requires += [ "thrift", ]
try:
import importlib
except ImportError:
install_requires.append("importlib")
try:
# For Python >= 2.6
import json
except ImportError:
# For Python < 2.6 or people using a newer version of simplejson
install_requires.append("simplejson")
try:
from collections import OrderedDict
except ImportError:
# Python 2.6 or earlier, use backport
#from ordereddict import OrderedDict
install_requires.append("ordereddict")
py_version = sys.version_info
if not sys.platform.startswith("java") and sys.version_info < (2, 6):
install_requires.append("multiprocessing==2.6.2.1")
if os.path.exists("README.rst"):
long_description = codecs.open("README.rst", "r", "utf-8").read()
else:
long_description = "See http://pypi.python.org/pypi/pyes"
setup(
name='pyes',
version=meta['VERSION'],
description="Python Elastic Search driver",
author=meta['author'],
author_email=meta['contact'],
url=meta['homepage'],
platforms=["any"],
license="BSD",
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*', "docs.*"]),
scripts=[],
zip_safe=False,
install_requires=install_requires,
tests_require=['nose', 'nose-cover3', 'unittest2', 'simplejson'],
cmdclass={"quicktest": QuickRunTests},
test_suite="nose.collector",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search'
],
entry_points={
'console_scripts': [],
},
long_description=long_description,
)
| {
"content_hash": "55f04658e6e51e261d1169f8fea28833",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 85,
"avg_line_length": 28.760330578512395,
"alnum_prop": 0.631896551724138,
"repo_name": "mavarick/pyes",
"id": "941459e9473004eda3df4ad5b0890477b589409b",
"size": "3526",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "602954"
},
{
"name": "Shell",
"bytes": "1438"
}
],
"symlink_target": ""
} |
"""Helper methods for various modules."""
import asyncio
from datetime import datetime, timedelta
from itertools import chain
import threading
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from unicodedata import normalize
from typing import (Any, Optional, TypeVar, Callable, KeysView, Union, # noqa
Iterable, List, Dict, Iterator, Coroutine, MutableSet)
from .dt import as_local, utcnow
# pylint: disable=invalid-name
T = TypeVar('T')
U = TypeVar('U')
ENUM_T = TypeVar('ENUM_T', bound=enum.Enum)
# pylint: enable=invalid-name
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
TBL_SLUGIFY = {
ord('ß'): 'ss'
}
def sanitize_filename(filename: str) -> str:
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path: str) -> str:
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
text = normalize('NFKD', text)
text = text.lower()
text = text.replace(" ", "_")
text = text.translate(TBL_SLUGIFY)
text = RE_SLUGIFY.sub("", text)
return text
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
if isinstance(inp, datetime):
return as_local(inp).isoformat()
return str(inp)
def convert(value: T, to_type: Callable[[T], U],
default: Optional[U] = None) -> Optional[U]:
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str, current_strings:
Union[Iterable[str], KeysView[str]]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip() -> str:
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0] # type: ignore
except socket.error:
try:
return socket.gethostbyname(socket.gethostname())
except socket.gaierror:
return '127.0.0.1'
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length: int = 10) -> str:
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# https://github.com/PyCQA/pylint/issues/2306
# pylint: disable=comparison-with-callable
def __ge__(self, other: ENUM_T) -> bool:
"""Return the greater than element."""
if self.__class__ is other.__class__:
return bool(self.value >= other.value)
return NotImplemented
def __gt__(self, other: ENUM_T) -> bool:
"""Return the greater element."""
if self.__class__ is other.__class__:
return bool(self.value > other.value)
return NotImplemented
def __le__(self, other: ENUM_T) -> bool:
"""Return the lower than element."""
if self.__class__ is other.__class__:
return bool(self.value <= other.value)
return NotImplemented
def __lt__(self, other: ENUM_T) -> bool:
"""Return the lower element."""
if self.__class__ is other.__class__:
return bool(self.value < other.value)
return NotImplemented
class OrderedSet(MutableSet[T]):
"""Ordered set taken from http://code.activestate.com/recipes/576694/."""
def __init__(self, iterable: Optional[Iterable[T]] = None) -> None:
"""Initialize the set."""
self.end = end = [] # type: List[Any]
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # type: Dict[T, List] # key --> [key, prev, next]
if iterable is not None:
self |= iterable # type: ignore
def __len__(self) -> int:
"""Return the length of the set."""
return len(self.map)
def __contains__(self, key: T) -> bool: # type: ignore
"""Check if key is in set."""
return key in self.map
# pylint: disable=arguments-differ
def add(self, key: T) -> None:
"""Add an element to the end of the set."""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key: T) -> None:
"""Promote element to beginning of the set, add if not there."""
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
# pylint: disable=arguments-differ
def discard(self, key: T) -> None:
"""Discard an element from the set."""
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self) -> Iterator[T]:
"""Iterate of the set."""
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self) -> Iterator[T]:
"""Reverse the ordering."""
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# pylint: disable=arguments-differ
def pop(self, last: bool = True) -> T:
"""Pop element of the end of the set.
Set last=False to pop from the beginning.
"""
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key # type: ignore
def update(self, *args: Any) -> None:
"""Add elements from args to the set."""
for item in chain(*args):
self.add(item)
def __repr__(self) -> str:
"""Return the representation."""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle:
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
def __init__(self, min_time: timedelta,
limit_no_throttle: Optional[timedelta] = None) -> None:
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method: Callable) -> Callable:
"""Caller for the throttle."""
# Make sure we return a coroutine if the method is async.
if asyncio.iscoroutinefunction(method):
async def throttled_value() -> None:
"""Stand-in function for when real func is being throttled."""
return None
else:
def throttled_value() -> None: # type: ignore
"""Stand-in function for when real func is being throttled."""
return None
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname separated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args: Any, **kwargs: Any) -> Union[Callable, Coroutine]:
"""Wrap that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = getattr(method, '__self__')
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return throttled_value()
# Check if method is never called or no_throttle is given
force = kwargs.pop('no_throttle', False) or not throttle[1]
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result # type: ignore
return throttled_value()
finally:
throttle[0].release()
return wrapper
| {
"content_hash": "d9fc185a38320c0201226517413f0a9f",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 78,
"avg_line_length": 33.03012048192771,
"alnum_prop": 0.5805216122560642,
"repo_name": "persandstrom/home-assistant",
"id": "17849154ff766c19ff42245166135c315e5ce9d4",
"size": "10967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
"""Helpers for data entry flows for config entries."""
from typing import Any, Awaitable, Callable, Dict, Optional, Union
from homeassistant import config_entries
from .typing import HomeAssistantType
DiscoveryFunctionType = Callable[[], Union[Awaitable[bool], bool]]
class DiscoveryFlowHandler(config_entries.ConfigFlow):
"""Handle a discovery config flow."""
VERSION = 1
def __init__(
self,
domain: str,
title: str,
discovery_function: DiscoveryFunctionType,
connection_class: str,
) -> None:
"""Initialize the discovery config flow."""
self._domain = domain
self._title = title
self._discovery_function = discovery_function
self.CONNECTION_CLASS = connection_class # pylint: disable=invalid-name
async def async_step_user(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
await self.async_set_unique_id(self._domain, raise_on_progress=False)
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Confirm setup."""
if user_input is None:
return self.async_show_form(step_id="confirm")
if self.source == config_entries.SOURCE_USER:
# Get current discovered entries.
in_progress = self._async_in_progress()
has_devices = in_progress
if not has_devices:
has_devices = await self.hass.async_add_job( # type: ignore
self._discovery_function, self.hass
)
if not has_devices:
return self.async_abort(reason="no_devices_found")
# Cancel the discovered one.
assert self.hass is not None
for flow in in_progress:
self.hass.config_entries.flow.async_abort(flow["flow_id"])
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return self.async_create_entry(title=self._title, data={})
async def async_step_discovery(
self, discovery_info: Dict[str, Any]
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
if self._async_in_progress() or self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
await self.async_set_unique_id(self._domain)
return await self.async_step_confirm()
async_step_zeroconf = async_step_discovery
async_step_ssdp = async_step_discovery
async_step_mqtt = async_step_discovery
async_step_homekit = async_step_discovery
async def async_step_import(self, _: Optional[Dict[str, Any]]) -> Dict[str, Any]:
"""Handle a flow initialized by import."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
# Cancel other flows.
assert self.hass is not None
in_progress = self._async_in_progress()
for flow in in_progress:
self.hass.config_entries.flow.async_abort(flow["flow_id"])
return self.async_create_entry(title=self._title, data={})
def register_discovery_flow(
domain: str,
title: str,
discovery_function: DiscoveryFunctionType,
connection_class: str,
) -> None:
"""Register flow for discovered integrations that not require auth."""
class DiscoveryFlow(DiscoveryFlowHandler):
"""Discovery flow handler."""
def __init__(self) -> None:
super().__init__(domain, title, discovery_function, connection_class)
config_entries.HANDLERS.register(domain)(DiscoveryFlow)
class WebhookFlowHandler(config_entries.ConfigFlow):
"""Handle a webhook config flow."""
VERSION = 1
def __init__(
self,
domain: str,
title: str,
description_placeholder: dict,
allow_multiple: bool,
) -> None:
"""Initialize the discovery config flow."""
self._domain = domain
self._title = title
self._description_placeholder = description_placeholder
self._allow_multiple = allow_multiple
async def async_step_user(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a user initiated set up flow to create a webhook."""
if not self._allow_multiple and self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(step_id="user")
assert self.hass is not None
webhook_id = self.hass.components.webhook.async_generate_id()
if (
"cloud" in self.hass.config.components
and self.hass.components.cloud.async_active_subscription()
):
webhook_url = await self.hass.components.cloud.async_create_cloudhook(
webhook_id
)
cloudhook = True
else:
webhook_url = self.hass.components.webhook.async_generate_url(webhook_id)
cloudhook = False
self._description_placeholder["webhook_url"] = webhook_url
return self.async_create_entry(
title=self._title,
data={"webhook_id": webhook_id, "cloudhook": cloudhook},
description_placeholders=self._description_placeholder,
)
def register_webhook_flow(
domain: str, title: str, description_placeholder: dict, allow_multiple: bool = False
) -> None:
"""Register flow for webhook integrations."""
class WebhookFlow(WebhookFlowHandler):
"""Webhook flow handler."""
def __init__(self) -> None:
super().__init__(domain, title, description_placeholder, allow_multiple)
config_entries.HANDLERS.register(domain)(WebhookFlow)
async def webhook_async_remove_entry(
hass: HomeAssistantType, entry: config_entries.ConfigEntry
) -> None:
"""Remove a webhook config entry."""
if not entry.data.get("cloudhook") or "cloud" not in hass.config.components:
return
await hass.components.cloud.async_delete_cloudhook(entry.data["webhook_id"])
| {
"content_hash": "7a569c1e7547ec3f1d72050b47e496e0",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 88,
"avg_line_length": 33.752631578947366,
"alnum_prop": 0.6271635739903322,
"repo_name": "tboyce021/home-assistant",
"id": "6b9df47c4d8ccc6c3cdbd12eb475a35bb2995e3e",
"size": "6413",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/config_entry_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "28861968"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
} |
def check_results_adequacy(results, error_group):
error_group.add_message("invalid_results")
error_group.invalid_results.clear()
if results is None:
return None
if results.data is None:
error_group.invalid_results(
"Results do not include information on test data")
elif not results.data.domain.has_discrete_class:
error_group.invalid_results(
"Discrete outcome variable is required")
else:
return results
| {
"content_hash": "06d1686904f533298c9824716dc54ed4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 37.38461538461539,
"alnum_prop": 0.668724279835391,
"repo_name": "cheral/orange3",
"id": "fd9e7590ae24be9f45017104290b4e8129eaeb90",
"size": "486",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Orange/widgets/evaluate/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2013, Jurriaan Bremer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the darm developer(s) nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
class Bitsize:
def __init__(self, name, bitsize, comment):
self.name = name
self.bitsize = bitsize
self.comment = comment
def __repr__(self):
return '<%s:%d>' % (self.name, self.bitsize)
class imm(Bitsize):
def __init__(self, bits, args):
self.name = 'imm%d' % (bits)
self.bitsize = bits
self.comment = 'Immediate'
self.args = args
cond = Bitsize('cond', 4, 'Conditional Flags')
Rd = Bitsize('Rd', 4, 'Destination Register')
Rd3 = Bitsize('Rd', 3, 'Destination Register')
Rs = Bitsize('Rs', 3, 'Shift Immediate')
Rn = Bitsize('Rn', 4, 'N Register')
Rn3 = Bitsize('Rn', 3, 'N Register')
Rm = Bitsize('Rm', 4, 'Shift Register')
Rm3 = Bitsize('Rm', 3, 'Shift Register')
Rt = Bitsize('Rt', 4, 'Transferred Register')
Rt3 = Bitsize('Rt', 3, 'Transferred Register')
Rt2 = Bitsize('Rt2', 4, 'Second Ternary Register')
Ra = Bitsize('Ra', 4, 'Accumulate Register')
Rdm = Bitsize('Rdm', 4, 'Destination & M Register')
Rdm3 = Bitsize('Rdm', 3, 'Destination & M Register')
Rdn = Bitsize('Rdn', 4, 'Destination & N Register')
Rdn3 = Bitsize('Rdn', 3, 'Destination & N Register')
S = Bitsize('S', 1, 'Update Conditional Flags')
type_ = Bitsize('type', 2, 'Shift Type')
msb = Bitsize('msb', 5, 'Most Significant Bit')
lsb = Bitsize('lsb', 5, 'Least Significant Bit')
register_list8= Bitsize('register_list', 8, 'Register List')
register_list13=Bitsize('register_list', 13, 'Register List')
register_list = Bitsize('register_list', 16, 'Register List')
E = Bitsize('E', 1, 'Endian Specifier')
msr = Bitsize('msr', 2, 'Move to Special Register mask')
rotate = Bitsize('rotate', 2, 'Rotation Type')
H = Bitsize('H', 1, 'Sign Extension Bit for BL(X)')
option = Bitsize('option', 4, 'Option for Debug Hint')
W = Bitsize('W', 1, 'Some Bit for LDM')
widthm1 = Bitsize('widthm1', 5, 'Bit Width Minus One')
M = Bitsize('M', 1, 'High 16bits for Rm')
N = Bitsize('N', 1, 'High 16bits for Rn')
DN = Bitsize('DN', 1, 'High 16bits for Rdn')
RdHi = Bitsize('RdHi', 4, 'High 32bits for Rd')
RdLo = Bitsize('RdLo', 4, 'Low 32bits for Rd')
R = Bitsize('R', 1, 'Round Integer')
sat_imm4 = Bitsize('sat_imm4', 4, 'Saturate Immediate')
sat_imm5 = Bitsize('sat_imm5', 5, 'Saturate Immediate')
sh = Bitsize('sh', 1, 'Immediate Shift Value')
opc1 = Bitsize('opc1', 4, 'Coprocessor Operation Code')
opc2 = Bitsize('opc2', 3, 'Coprocessor Information')
CRn = Bitsize('CRn', 4, 'Coprocessor Operand Register')
CRd = Bitsize('CRd', 4, 'Coprocessor Destination Register')
coproc = Bitsize('coproc', 4, 'Coprocessor Number')
CPOpc = Bitsize('CPOpc', 3, 'Coprocessor Operation Mode')
CRm = Bitsize('CRm', 4, 'Coprocessor Operand Register')
U = Bitsize('U', 1, 'Addition flag for PLD')
P = Bitsize('P', 1, 'Protected Mode Flag?')
D = Bitsize('D', 1, 'User-defined bit')
tb = Bitsize('tb', 1, 'Is PKH in TB form or not?')
imm4H = Bitsize('imm4H', 4, 'High Word Register')
imm4L = Bitsize('imm4L', 4, 'Low Word Register')
im = Bitsize('im', 1, 'CPS interrupt mask')
CP3 = Bitsize('CP3', 3, 'CPS flag affects')
Qd = Bitsize('Qd', 4, 'Quadword Destination Register')
Qn = Bitsize('Qn', 4, 'Quadword First Operand Register')
Qm = Bitsize('Qm', 4, 'Quadword Second Operand Register')
Dd = Bitsize('Dd', 4, 'Doubleword Destination Register')
Dn = Bitsize('Dn', 4, 'Doubleword First Operand Register')
Dm = Bitsize('Dm', 4, 'Doubleword Second Operand Register')
Sd = Bitsize('Sd', 4, 'Single-Precision Destination Register')
Sn = Bitsize('Sn', 4, 'Single-Precision First Operand Register')
Sm = Bitsize('Sm', 4, 'Single-Precision Second Operand Register')
i = Bitsize('imm1', 1, 'Immediate')
J1 = Bitsize('J1', 1, 'Immediate')
J2 = Bitsize('J2', 1, 'Immediate')
imm2 = Bitsize('imm2', 2, 'Immediate')
imm3 = Bitsize('imm3', 3, 'Immediate')
imm4 = Bitsize('imm4', 4, 'Immediate')
imm4H = Bitsize('imm4H', 4, 'Immediate')
imm4L = Bitsize('imm4L', 4, 'Immediate')
imm5 = Bitsize('imm5', 5, 'Immediate')
imm6 = Bitsize('imm6', 6, 'Immediate')
imm7 = Bitsize('imm7', 7, 'Immediate')
imm8 = Bitsize('imm8', 8, 'Immediate')
imm10 = Bitsize('imm10', 10, 'Immediate')
imm10H = Bitsize('imm10H', 10, 'Immediate')
imm10L = Bitsize('imm10L', 10, 'Immediate')
imm11 = Bitsize('imm11', 11, 'Immediate')
imm12 = Bitsize('imm12', 12, 'Immediate')
imm24 = Bitsize('imm24', 24, 'Immediate')
| {
"content_hash": "c2180c5519e04566fe2102af877fdbfe",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 50.47692307692308,
"alnum_prop": 0.6152087778116427,
"repo_name": "epanalytics/darm",
"id": "654f0c43236d862a20ddaa033e10129bcdabe968",
"size": "6562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "darmbits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5726870"
},
{
"name": "Objective-C",
"bytes": "527"
},
{
"name": "Python",
"bytes": "564778"
}
],
"symlink_target": ""
} |
from ConfigParser import ConfigParser, NoOptionError
import os
import settings
class Config(ConfigParser):
"""
``ConfigParser`` subclass that looks into your home folder for a file named
``.gvoice`` and parses configuration data from it.
"""
def __init__(self):
GV_ROOT = os.path.abspath(os.path.dirname(__file__))
self.fname = os.path.join(GV_ROOT, "gvoice")
if not os.path.exists(self.fname):
try:
f = open(self.fname, 'w')
except IOError:
return
f.write(settings.DEFAULT_CONFIG)
f.close()
ConfigParser.__init__(self)
try:
self.read([self.fname])
except IOError:
return
def get(self, option, section='gvoice'):
try:
return ConfigParser.get(self, section, option).strip() or None
except NoOptionError:
return
def set(self, option, value, section='gvoice'):
return ConfigParser.set(self, section, option, value)
def phoneType(self):
try:
return int(self.get('phoneType'))
except TypeError:
return
def save(self):
f = open(self.fname, 'w')
self.write(f)
f.close()
phoneType = property(phoneType)
forwardingNumber = property(lambda self: self.get('forwardingNumber'))
email = property(lambda self: self.get('email','auth'))
password = property(lambda self: self.get('password','auth'))
secret = property(lambda self: self.get('secret'))
config = Config()
| {
"content_hash": "f1c34edcfe562da0e9a3780c0aae0a78",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 24.160714285714285,
"alnum_prop": 0.6866223207686623,
"repo_name": "kwantopia/shoppley-migrate",
"id": "0b68a113802dc96d152b103398a6130c9e5a0513",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shoppley.com/shoppley/apps/googlevoice/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "129526"
},
{
"name": "C++",
"bytes": "113028"
},
{
"name": "CSS",
"bytes": "150952"
},
{
"name": "Erlang",
"bytes": "3391"
},
{
"name": "Java",
"bytes": "226321"
},
{
"name": "JavaScript",
"bytes": "98748"
},
{
"name": "Objective-C",
"bytes": "2795782"
},
{
"name": "Perl",
"bytes": "8449"
},
{
"name": "Python",
"bytes": "541809"
},
{
"name": "Shell",
"bytes": "26061"
}
],
"symlink_target": ""
} |
"""Helpers for config validation using voluptuous."""
from datetime import timedelta
import jinja2
import voluptuous as vol
from homeassistant.loader import get_platform
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE)
from homeassistant.helpers.entity import valid_entity_id
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys):
"""Validator that at least one key exists."""
def validate(obj):
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value):
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isfile(value):
"""Validate that the value is an existing file."""
return vol.IsFile('not a file')(value)
def ensure_list(value):
"""Wrap value in list if it is not one."""
return value if isinstance(value, list) else [value]
def entity_id(value):
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} does not match format <domain>.<object_id>'
.format(value))
def entity_ids(value):
"""Validate Entity IDs."""
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time_period_str(value):
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
time_period = vol.Any(time_period_str, timedelta, time_period_dict)
def log_exception(logger, ex, domain, config):
"""Generate log exception for config validation."""
message = 'Invalid config for [{}]: '.format(domain)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join('%s' % m for m in ex.path))
else:
message += str(ex)
if hasattr(config, '__line__'):
message += " (See {}:{})".format(config.__config_file__,
config.__line__ or '?')
logger.error(message)
def match_all(value):
"""Validator that matches all values."""
return value
def platform_validator(domain):
"""Validate if platform exists for given domain."""
def validator(value):
"""Test if platform exists."""
if value is None:
raise vol.Invalid('platform cannot be None')
if get_platform(domain, str(value)):
return value
raise vol.Invalid(
'platform {} does not exist for {}'.format(value, domain))
return validator
def positive_timedelta(value):
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def string(value):
"""Coerce value to string, except for None."""
if value is not None:
return str(value)
raise vol.Invalid('string value is None')
def temperature_unit(value):
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
elif value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
value = str(value)
try:
jinja2.Environment().parse(value)
return value
except jinja2.exceptions.TemplateSyntaxError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def time(value):
"""Validate time."""
time_val = dt_util.parse_time(value)
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
CONF_SCAN_INTERVAL: vol.All(vol.Coerce(int), vol.Range(min=1)),
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any('sunset', 'sunrise')),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.All(time_period, positive_timedelta)
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA, EVENT_SCHEMA,
CONDITION_SCHEMA)],
)
| {
"content_hash": "7e93e5703883cb8b6eb0cdbc38eecb35",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 79,
"avg_line_length": 30.005194805194805,
"alnum_prop": 0.6314923822714681,
"repo_name": "Zyell/home-assistant",
"id": "031ab5227dcea2656b8ab3c65ae19ae21551ebe3",
"size": "11552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/helpers/config_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "798938"
},
{
"name": "Python",
"bytes": "771451"
},
{
"name": "Shell",
"bytes": "5097"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import subprocess
import re
from setuptools import setup
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.install import install as _install
VERSION_PY = """
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '%s'
"""
def update_version_py():
if not os.path.isdir(".git"):
print("This does not appear to be a Git repository.")
return
try:
p = subprocess.Popen(["git", "describe",
"--tags", "--always"],
stdout=subprocess.PIPE)
except EnvironmentError:
print("unable to run git, leaving eden/_version.py alone")
return
stdout = p.communicate()[0]
if p.returncode != 0:
print("unable to run git, leaving eden/_version.py alone")
return
ver = stdout.strip().decode()
f = open("eden/_version.py", "w")
f.write(VERSION_PY % ver)
f.close()
print("set eden/_version.py to '%s'" % ver)
def get_version():
try:
f = open("eden/_version.py")
except EnvironmentError:
return None
for line in f.readlines():
mo = re.match("__version__ = '([^']+)'", line)
if mo:
ver = mo.group(1)
return ver
return None
class sdist(_sdist):
def run(self):
update_version_py()
self.distribution.metadata.version = get_version()
return _sdist.run(self)
class install(_install):
def run(self):
_install.run(self)
def checkProgramIsInstalled(self, program, args, where_to_download,
affected_tools):
try:
subprocess.Popen([program, args], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return True
except EnvironmentError:
# handle file not found error.
# the config file is installed in:
msg = "\n**{0} not found. This " \
"program is needed for the following "\
"tools to work properly:\n"\
" {1}\n"\
"{0} can be downloaded from here:\n " \
" {2}\n".format(program, affected_tools,
where_to_download)
sys.stderr.write(msg)
except Exception as e:
sys.stderr.write("Error: {}".format(e))
setup(
name='eden',
version=get_version(),
author='Fabrizio Costa',
author_email='graph-eden@googlegroups.com',
packages=['eden',
'eden.ml',
'eden.display',
'eden.io'
],
scripts=[],
include_package_data=True,
package_data={},
url='http://pypi.python.org/pypi/eden/',
license='LICENSE',
description='The Explicit Decomposition with Neighborhoods (EDeN) is a decompositional kernel \
based on the Neighborhood Subgraph Pairwise Distance Kernel (NSPDK) that can be used to induce \
an explicit feature representation for graphs. This in turn allows the adoption of machine learning\
algorithm to perform supervised and unsupervised learning task in a scalable way (e.g. fast\
stochastic gradient descent methods in classification).',
long_description=open('README.md').read(),
install_requires=[
"dill",
"future",
"joblib",
"toolz",
"matplotlib",
"networkx >= 2.0",
"numpy >= 1.10.4",
"requests",
"scikit-learn >= 0.18.2",
"scipy >= 0.14.0",
],
cmdclass={'sdist': sdist, 'install': install}
)
| {
"content_hash": "535227b5d38a9884918056d2500e49e6",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 104,
"avg_line_length": 30.266129032258064,
"alnum_prop": 0.5787370103916867,
"repo_name": "dmaticzka/EDeN",
"id": "46e6ec54ccaa0c75144422dd3ba9430c44f9b653",
"size": "3775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "238351"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
from specparser import HadoopRuntime
from collections import OrderedDict
import json
import os
import sys
import subprocess
def cmd(cmd_str):
ret = subprocess.call(cmd_str, shell=True)
return ret
def get_the_line_of_transaction(path):
abs_path = os.path.join(path,'*')
cmd_str = "hadoop fs -text %s | wc -l " % abs_path
return cmd(cmd_str)
def main():
hr = HadoopRuntime("spec.json")
settings = hr.settings
print(settings)
# Prepare working directory
hr.hdfs_clean_working_dir()
# allocate temp_path
temp_path = hr.get_hdfs_working_dir("temp")
# allocate output_path
output_path = hr.get_hdfs_working_dir("output_path")
# build parameters for hadoop job
jar_file = "./mahout-core-1.0-SNAPSHOT-job.jar"
hadoop_params = {}
hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
hadoop_params_str = " ".join(["%s=%s" % (k,v) for k,v in hadoop_params.items()])
jar_defs = {}
jar_defs["mapreduce.framework.name"] = "yarn"
jar_defs["yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
jar_defs["yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
jar_defs["mapreduce.output.fileoutputformat.compress"] = "false"
jar_defs_str = " ".join(["-D %s=%s" % (k,v) for k,v in jar_defs.items()])
other_args = OrderedDict()
other_args["similarityClassname"] = "SIMILARITY_EUCLIDEAN_DISTANCE"
other_args["input"] = settings.Input.ratings.val
other_args["usersFile"] = settings.Input.usersFile.val
other_args["output"] = output_path
other_args["tempDir"] = temp_path
other_args_str = " ".join(["--%s %s" % (k,v) for k,v in other_args.items()])
line_num =get_the_line_of_transaction(settings.Input.ratings.val)
if line_num >0:
cmd_str = '%s hadoop jar %s org.apache.mahout.cf.taste.hadoop.item.RecommenderJob %s %s' % \
(hadoop_params_str, jar_file, jar_defs_str, other_args_str)
print("Executing:")
print(cmd_str)
ret = cmd(cmd_str)
if ret != 0:
print("Job failed")
sys.exit(ret)
else:
print "Collaborative Input Transaction Matrix is empty. Skip the calcuating."
settings.Output.cl_result.val = output_path
print("Done")
if __name__ == "__main__":
main()
| {
"content_hash": "0fbe6081bc70a5d53e471c639bd5e27d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 102,
"avg_line_length": 35.14492753623188,
"alnum_prop": 0.6428865979381443,
"repo_name": "DataCanvasIO/example-modules",
"id": "301b1d1620023155de16047faa4fb18f40baeb98",
"size": "2472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/modeling/CDH4/hero_cl_filter/collaborative_filter/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "8551"
},
{
"name": "PigLatin",
"bytes": "675"
},
{
"name": "Python",
"bytes": "1738196"
},
{
"name": "R",
"bytes": "2273"
},
{
"name": "Shell",
"bytes": "31893"
}
],
"symlink_target": ""
} |
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
domain_apikey = db.Table(
'domain_apikey',
db.Column('domain_id', db.Integer, db.ForeignKey('domain.id')),
db.Column('apikey_id', db.Integer, db.ForeignKey('apikey.id')))
| {
"content_hash": "cb7f9220a785cca85aa2760cdec895b6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6942148760330579,
"repo_name": "ngoduykhanh/PowerDNS-Admin",
"id": "7ade5db3d4efd337809d23ebbcaa8936847e3acd",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerdnsadmin/models/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "558"
},
{
"name": "Dockerfile",
"bytes": "4237"
},
{
"name": "HTML",
"bytes": "280959"
},
{
"name": "JavaScript",
"bytes": "11375"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "387609"
},
{
"name": "Shell",
"bytes": "1483"
},
{
"name": "TSQL",
"bytes": "3112"
}
],
"symlink_target": ""
} |
import json
import sys
import pandas as pd
args = sys.argv[1:]
assert len(args) == 1
store = pd.HDFStore(args[0], "r")
print store
for key in store.keys():
print "\n\nTABLENAME", key
print store[key]
print store[key].dtypes
print store[key].describe()
| {
"content_hash": "4a38c73cc62e3dca439a4cf06b8afa77",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 33,
"avg_line_length": 15.222222222222221,
"alnum_prop": 0.6605839416058394,
"repo_name": "bricegnichols/urbansim",
"id": "47e9f0f49e6a4099e4b470c0f8a5f7df9ac7cac5",
"size": "274",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "scripts/view_hdf5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27290"
},
{
"name": "Python",
"bytes": "345947"
},
{
"name": "R",
"bytes": "1763"
},
{
"name": "Shell",
"bytes": "2072"
}
],
"symlink_target": ""
} |
import unittest
import io
class TestFileBasedBuffer(unittest.TestCase):
def _makeOne(self, file=None, from_buffer=None):
from waitress.buffers import FileBasedBuffer
return FileBasedBuffer(file, from_buffer=from_buffer)
def test_ctor_from_buffer_None(self):
inst = self._makeOne('file')
self.assertEqual(inst.file, 'file')
def test_ctor_from_buffer(self):
from_buffer = io.BytesIO(b'data')
from_buffer.getfile = lambda *x: from_buffer
f = io.BytesIO()
inst = self._makeOne(f, from_buffer)
self.assertEqual(inst.file, f)
del from_buffer.getfile
self.assertEqual(inst.remain, 4)
from_buffer.close()
def test___len__(self):
inst = self._makeOne()
inst.remain = 10
self.assertEqual(len(inst), 10)
def test___nonzero__(self):
inst = self._makeOne()
inst.remain = 10
self.assertEqual(bool(inst), True)
inst.remain = 0
self.assertEqual(bool(inst), False)
def test_append(self):
f = io.BytesIO(b'data')
inst = self._makeOne(f)
inst.append(b'data2')
self.assertEqual(f.getvalue(), b'datadata2')
self.assertEqual(inst.remain, 5)
def test_get_skip_true(self):
f = io.BytesIO(b'data')
inst = self._makeOne(f)
result = inst.get(100, skip=True)
self.assertEqual(result, b'data')
self.assertEqual(inst.remain, -4)
def test_get_skip_false(self):
f = io.BytesIO(b'data')
inst = self._makeOne(f)
result = inst.get(100, skip=False)
self.assertEqual(result, b'data')
self.assertEqual(inst.remain, 0)
def test_get_skip_bytes_less_than_zero(self):
f = io.BytesIO(b'data')
inst = self._makeOne(f)
result = inst.get(-1, skip=False)
self.assertEqual(result, b'data')
self.assertEqual(inst.remain, 0)
def test_skip_remain_gt_bytes(self):
f = io.BytesIO(b'd')
inst = self._makeOne(f)
inst.remain = 1
inst.skip(1)
self.assertEqual(inst.remain, 0)
def test_skip_remain_lt_bytes(self):
f = io.BytesIO(b'd')
inst = self._makeOne(f)
inst.remain = 1
self.assertRaises(ValueError, inst.skip, 2)
def test_newfile(self):
inst = self._makeOne()
self.assertRaises(NotImplementedError, inst.newfile)
def test_prune_remain_notzero(self):
f = io.BytesIO(b'd')
inst = self._makeOne(f)
inst.remain = 1
nf = io.BytesIO()
inst.newfile = lambda *x: nf
inst.prune()
self.assertTrue(inst.file is not f)
self.assertEqual(nf.getvalue(), b'd')
def test_prune_remain_zero_tell_notzero(self):
f = io.BytesIO(b'd')
inst = self._makeOne(f)
nf = io.BytesIO(b'd')
inst.newfile = lambda *x: nf
inst.remain = 0
inst.prune()
self.assertTrue(inst.file is not f)
self.assertEqual(nf.getvalue(), b'd')
def test_prune_remain_zero_tell_zero(self):
f = io.BytesIO()
inst = self._makeOne(f)
inst.remain = 0
inst.prune()
self.assertTrue(inst.file is f)
def test__close(self):
f = io.BytesIO()
inst = self._makeOne(f)
inst._close()
self.assertTrue(f.closed)
class TestTempfileBasedBuffer(unittest.TestCase):
def _makeOne(self, from_buffer=None):
from waitress.buffers import TempfileBasedBuffer
return TempfileBasedBuffer(from_buffer=from_buffer)
def test_newfile(self):
inst = self._makeOne()
r = inst.newfile()
self.assertTrue(hasattr(r, 'fileno')) # file
class TestBytesIOBasedBuffer(unittest.TestCase):
def _makeOne(self, from_buffer=None):
from waitress.buffers import BytesIOBasedBuffer
return BytesIOBasedBuffer(from_buffer=from_buffer)
def test_ctor_from_buffer_not_None(self):
f = io.BytesIO()
f.getfile = lambda *x: f
inst = self._makeOne(f)
self.assertTrue(hasattr(inst.file, 'read'))
def test_ctor_from_buffer_None(self):
inst = self._makeOne()
self.assertTrue(hasattr(inst.file, 'read'))
def test_newfile(self):
inst = self._makeOne()
r = inst.newfile()
self.assertTrue(hasattr(r, 'read'))
class TestReadOnlyFileBasedBuffer(unittest.TestCase):
def _makeOne(self, file, block_size=8192):
from waitress.buffers import ReadOnlyFileBasedBuffer
return ReadOnlyFileBasedBuffer(file, block_size)
def test_prepare_not_seekable_not_closeable(self):
f = KindaFilelike(b'abc')
inst = self._makeOne(f)
result = inst.prepare()
self.assertEqual(result, False)
self.assertEqual(inst.remain, 0)
self.assertFalse(hasattr(inst, 'close'))
def test_prepare_not_seekable_closeable(self):
f = KindaFilelike(b'abc', close=1)
inst = self._makeOne(f)
result = inst.prepare()
self.assertEqual(result, False)
self.assertEqual(inst.remain, 0)
self.assertEqual(inst.close, f.close)
def test_prepare_seekable_closeable(self):
f = Filelike(b'abc', close=1, tellresults=[0, 10])
inst = self._makeOne(f)
result = inst.prepare()
self.assertEqual(result, 10)
self.assertEqual(inst.remain, 10)
self.assertEqual(inst.file.seeked, 0)
self.assertFalse(hasattr(inst, 'close'))
def test_get_numbytes_neg_one(self):
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f)
inst.remain = 2
result = inst.get(-1)
self.assertEqual(result, b'ab')
self.assertEqual(inst.remain, 2)
self.assertEqual(f.tell(), 0)
def test_get_numbytes_gt_remain(self):
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f)
inst.remain = 2
result = inst.get(3)
self.assertEqual(result, b'ab')
self.assertEqual(inst.remain, 2)
self.assertEqual(f.tell(), 0)
def test_get_numbytes_lt_remain(self):
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f)
inst.remain = 2
result = inst.get(1)
self.assertEqual(result, b'a')
self.assertEqual(inst.remain, 2)
self.assertEqual(f.tell(), 0)
def test_get_numbytes_gt_remain_withskip(self):
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f)
inst.remain = 2
result = inst.get(3, skip=True)
self.assertEqual(result, b'ab')
self.assertEqual(inst.remain, 0)
self.assertEqual(f.tell(), 2)
def test_get_numbytes_lt_remain_withskip(self):
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f)
inst.remain = 2
result = inst.get(1, skip=True)
self.assertEqual(result, b'a')
self.assertEqual(inst.remain, 1)
self.assertEqual(f.tell(), 1)
def test___iter__(self):
data = b'a' * 10000
f = io.BytesIO(data)
inst = self._makeOne(f)
r = b''
for val in inst:
r += val
self.assertEqual(r, data)
def test_append(self):
inst = self._makeOne(None)
self.assertRaises(NotImplementedError, inst.append, 'a')
class TestOverflowableBuffer(unittest.TestCase):
def _makeOne(self, overflow=10):
from waitress.buffers import OverflowableBuffer
return OverflowableBuffer(overflow)
def test___len__buf_is_None(self):
inst = self._makeOne()
self.assertEqual(len(inst), 0)
def test___len__buf_is_not_None(self):
inst = self._makeOne()
inst.buf = b'abc'
self.assertEqual(len(inst), 3)
def test___nonzero__(self):
inst = self._makeOne()
inst.buf = b'abc'
self.assertEqual(bool(inst), True)
inst.buf = b''
self.assertEqual(bool(inst), False)
def test___nonzero___on_int_overflow_buffer(self):
inst = self._makeOne()
class int_overflow_buf(bytes):
def __len__(self):
# maxint + 1
return 0x7fffffffffffffff + 1
inst.buf = int_overflow_buf()
self.assertEqual(bool(inst), True)
inst.buf = b''
self.assertEqual(bool(inst), False)
def test__create_buffer_large(self):
from waitress.buffers import TempfileBasedBuffer
inst = self._makeOne()
inst.strbuf = b'x' * 11
inst._create_buffer()
self.assertEqual(inst.buf.__class__, TempfileBasedBuffer)
self.assertEqual(inst.buf.get(100), b'x' * 11)
self.assertEqual(inst.strbuf, b'')
def test__create_buffer_small(self):
from waitress.buffers import BytesIOBasedBuffer
inst = self._makeOne()
inst.strbuf = b'x' * 5
inst._create_buffer()
self.assertEqual(inst.buf.__class__, BytesIOBasedBuffer)
self.assertEqual(inst.buf.get(100), b'x' * 5)
self.assertEqual(inst.strbuf, b'')
def test_append_with_len_more_than_max_int(self):
from waitress.compat import MAXINT
inst = self._makeOne()
inst.overflowed = True
buf = DummyBuffer(length=MAXINT)
inst.buf = buf
result = inst.append(b'x')
# we don't want this to throw an OverflowError on Python 2 (see
# https://github.com/Pylons/waitress/issues/47)
self.assertEqual(result, None)
def test_append_buf_None_not_longer_than_srtbuf_limit(self):
inst = self._makeOne()
inst.strbuf = b'x' * 5
inst.append(b'hello')
self.assertEqual(inst.strbuf, b'xxxxxhello')
def test_append_buf_None_longer_than_strbuf_limit(self):
inst = self._makeOne(10000)
inst.strbuf = b'x' * 8192
inst.append(b'hello')
self.assertEqual(inst.strbuf, b'')
self.assertEqual(len(inst.buf), 8197)
def test_append_overflow(self):
inst = self._makeOne(10)
inst.strbuf = b'x' * 8192
inst.append(b'hello')
self.assertEqual(inst.strbuf, b'')
self.assertEqual(len(inst.buf), 8197)
def test_append_sz_gt_overflow(self):
from waitress.buffers import BytesIOBasedBuffer
f = io.BytesIO(b'data')
inst = self._makeOne(f)
buf = BytesIOBasedBuffer()
inst.buf = buf
inst.overflow = 2
inst.append(b'data2')
self.assertEqual(f.getvalue(), b'data')
self.assertTrue(inst.overflowed)
self.assertNotEqual(inst.buf, buf)
def test_get_buf_None_skip_False(self):
inst = self._makeOne()
inst.strbuf = b'x' * 5
r = inst.get(5)
self.assertEqual(r, b'xxxxx')
def test_get_buf_None_skip_True(self):
inst = self._makeOne()
inst.strbuf = b'x' * 5
r = inst.get(5, skip=True)
self.assertFalse(inst.buf is None)
self.assertEqual(r, b'xxxxx')
def test_skip_buf_None(self):
inst = self._makeOne()
inst.strbuf = b'data'
inst.skip(4)
self.assertEqual(inst.strbuf, b'')
self.assertNotEqual(inst.buf, None)
def test_skip_buf_None_allow_prune_True(self):
inst = self._makeOne()
inst.strbuf = b'data'
inst.skip(4, True)
self.assertEqual(inst.strbuf, b'')
self.assertEqual(inst.buf, None)
def test_prune_buf_None(self):
inst = self._makeOne()
inst.prune()
self.assertEqual(inst.strbuf, b'')
def test_prune_with_buf(self):
inst = self._makeOne()
class Buf(object):
def prune(self):
self.pruned = True
inst.buf = Buf()
inst.prune()
self.assertEqual(inst.buf.pruned, True)
def test_prune_with_buf_overflow(self):
inst = self._makeOne()
class DummyBuffer(io.BytesIO):
def getfile(self):
return self
def prune(self):
return True
def __len__(self):
return 5
buf = DummyBuffer(b'data')
inst.buf = buf
inst.overflowed = True
inst.overflow = 10
inst.prune()
self.assertNotEqual(inst.buf, buf)
def test_prune_with_buflen_more_than_max_int(self):
from waitress.compat import MAXINT
inst = self._makeOne()
inst.overflowed = True
buf = DummyBuffer(length=MAXINT+1)
inst.buf = buf
result = inst.prune()
# we don't want this to throw an OverflowError on Python 2 (see
# https://github.com/Pylons/waitress/issues/47)
self.assertEqual(result, None)
def test_getfile_buf_None(self):
inst = self._makeOne()
f = inst.getfile()
self.assertTrue(hasattr(f, 'read'))
def test_getfile_buf_not_None(self):
inst = self._makeOne()
buf = io.BytesIO()
buf.getfile = lambda *x: buf
inst.buf = buf
f = inst.getfile()
self.assertEqual(f, buf)
def test__close_nobuf(self):
inst = self._makeOne()
inst.buf = None
self.assertEqual(inst._close(), None) # doesnt raise
def test__close_withbuf(self):
class Buffer(object):
def _close(self):
self.closed = True
buf = Buffer()
inst = self._makeOne()
inst.buf = buf
inst._close()
self.assertTrue(buf.closed)
class KindaFilelike(object):
def __init__(self, bytes, close=None, tellresults=None):
self.bytes = bytes
self.tellresults = tellresults
if close is not None:
self.close = close
class Filelike(KindaFilelike):
def seek(self, v, whence=0):
self.seeked = v
def tell(self):
v = self.tellresults.pop(0)
return v
class DummyBuffer(object):
def __init__(self, length=0):
self.length = length
def __len__(self):
return self.length
def append(self, s):
self.length = self.length + len(s)
def prune(self):
pass
| {
"content_hash": "57c2c53312f594d2bb23cf90a09efecb",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 71,
"avg_line_length": 30.993392070484582,
"alnum_prop": 0.5843223651481771,
"repo_name": "korbenzhang/vim-ycm-win",
"id": "f7c90b4eaddf1316d7cf89cca9671f60989c1c87",
"size": "14071",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "third_party/ycmd/third_party/waitress/waitress/tests/test_buffers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93345"
},
{
"name": "VimL",
"bytes": "30287"
}
],
"symlink_target": ""
} |
import os
import shutil
from revision.config import Config, read_config
def test_read_config():
#: pass path string
test_revision_path = os.path.join(
os.path.dirname(__file__),
'.revision/config.json'
)
config = read_config(test_revision_path)
assert isinstance(config, Config)
assert config.clients[0].key == "dataset"
#: pass dict object
config = read_config({
"clients": [
{
"key": "dataset",
"module": "revision.client.DummyClient",
"local_path": "./tests/data",
"remote_path": ""
}
]
})
assert isinstance(config, Config)
assert config.clients[0].key == "dataset"
#: pass nothing
rev_dirpath = os.path.normpath(os.path.join(
os.path.dirname(__file__),
'../.revision'
))
if not os.path.isdir(rev_dirpath):
os.mkdir(rev_dirpath)
revision_path = os.path.join(
rev_dirpath,
'config.json'
)
shutil.copy2(test_revision_path, revision_path)
config = read_config()
assert isinstance(config, Config)
assert config.clients[0].key == "dataset"
shutil.rmtree(rev_dirpath)
| {
"content_hash": "c6f4da800fd2854263108b69051560e5",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 56,
"avg_line_length": 21.56140350877193,
"alnum_prop": 0.5663140764849471,
"repo_name": "COLORFULBOARD/revision",
"id": "c29286cd252370512202436bede1391aa81e5781",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "119"
},
{
"name": "Python",
"bytes": "49567"
}
],
"symlink_target": ""
} |
"""The machinery of importlib: finders, loaders, hooks, etc."""
import _imp
from ._bootstrap import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES,
EXTENSION_SUFFIXES)
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap import WindowsRegistryFinder
from ._bootstrap import PathFinder
from ._bootstrap import FileFinder
from ._bootstrap import SourceFileLoader
from ._bootstrap import SourcelessFileLoader
from ._bootstrap import ExtensionFileLoader
def all_suffixes():
"""Returns a list of all recognized module suffixes for this process"""
return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES
| {
"content_hash": "fc928e02ed3ec6b362cfec4e5d303af9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 38.7,
"alnum_prop": 0.7299741602067183,
"repo_name": "severb/flowy-website",
"id": "cec86adfc58fe521eb99bf0b52436efa864d72dd",
"size": "774",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "js/Lib/importlib/machinery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78754"
},
{
"name": "JavaScript",
"bytes": "710652"
},
{
"name": "Python",
"bytes": "3013303"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
def test_config(parse_options):
from pylama.config import get_config
config = get_config()
assert config
options = parse_options()
assert options
assert options.skip
assert options.max_line_length
assert not options.verbose
assert options.paths
assert "pylama" in options.paths[0]
options = parse_options(["-l", "pydocstyle,pycodestyle,unknown", "-i", "E"])
assert set(options.linters) == set(["pydocstyle", "pycodestyle"])
assert options.ignore == {"E"}
options = parse_options("-o dummy dummy.py".split())
assert set(options.linters) == set(["pycodestyle", "mccabe", "pyflakes"])
assert options.skip == []
def test_parse_options(parse_options):
options = parse_options()
assert not options.select
def test_from_stdin(parse_options):
options = parse_options("--from-stdin dummy.py".split())
assert options
assert options.from_stdin is True
assert options.paths
| {
"content_hash": "9610cc0f7fd44a7088cb69017c1c0b06",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 29.03030303030303,
"alnum_prop": 0.675365344467641,
"repo_name": "klen/pylama",
"id": "bef7a631093d91216ce164477e2007f99ce37851",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "209"
},
{
"name": "Makefile",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "80335"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.